Sat, 03 Jan 2015 20:18:00 +0100
Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.
1 #include "SkXfermode.h"
2 #include "SkXfermode_proccoeff.h"
3 #include "SkColorPriv.h"
5 #include <arm_neon.h>
6 #include "SkColor_opts_neon.h"
7 #include "SkXfermode_opts_arm_neon.h"
9 #define SkAlphaMulAlpha(a, b) SkMulDiv255Round(a, b)
12 ////////////////////////////////////////////////////////////////////////////////
13 // NEONized skia functions
14 ////////////////////////////////////////////////////////////////////////////////
16 static inline uint8x8_t SkAlphaMulAlpha_neon8(uint8x8_t color, uint8x8_t alpha) {
17 uint16x8_t tmp;
18 uint8x8_t ret;
20 tmp = vmull_u8(color, alpha);
21 tmp = vaddq_u16(tmp, vdupq_n_u16(128));
22 tmp = vaddq_u16(tmp, vshrq_n_u16(tmp, 8));
24 ret = vshrn_n_u16(tmp, 8);
26 return ret;
27 }
29 static inline uint16x8_t SkAlphaMulAlpha_neon8_16(uint8x8_t color, uint8x8_t alpha) {
30 uint16x8_t ret;
32 ret = vmull_u8(color, alpha);
33 ret = vaddq_u16(ret, vdupq_n_u16(128));
34 ret = vaddq_u16(ret, vshrq_n_u16(ret, 8));
36 ret = vshrq_n_u16(ret, 8);
38 return ret;
39 }
41 static inline uint8x8_t SkDiv255Round_neon8_32_8(int32x4_t p1, int32x4_t p2) {
42 uint16x8_t tmp;
44 tmp = vcombine_u16(vmovn_u32(vreinterpretq_u32_s32(p1)),
45 vmovn_u32(vreinterpretq_u32_s32(p2)));
47 tmp += vdupq_n_u16(128);
48 tmp += vshrq_n_u16(tmp, 8);
50 return vshrn_n_u16(tmp, 8);
51 }
53 static inline uint16x8_t SkDiv255Round_neon8_16_16(uint16x8_t prod) {
54 prod += vdupq_n_u16(128);
55 prod += vshrq_n_u16(prod, 8);
57 return vshrq_n_u16(prod, 8);
58 }
60 static inline uint8x8_t clamp_div255round_simd8_32(int32x4_t val1, int32x4_t val2) {
61 uint8x8_t ret;
62 uint32x4_t cmp1, cmp2;
63 uint16x8_t cmp16;
64 uint8x8_t cmp8, cmp8_1;
66 // Test if <= 0
67 cmp1 = vcleq_s32(val1, vdupq_n_s32(0));
68 cmp2 = vcleq_s32(val2, vdupq_n_s32(0));
69 cmp16 = vcombine_u16(vmovn_u32(cmp1), vmovn_u32(cmp2));
70 cmp8_1 = vmovn_u16(cmp16);
72 // Init to zero
73 ret = vdup_n_u8(0);
75 // Test if >= 255*255
76 cmp1 = vcgeq_s32(val1, vdupq_n_s32(255*255));
77 cmp2 = vcgeq_s32(val2, vdupq_n_s32(255*255));
78 cmp16 = vcombine_u16(vmovn_u32(cmp1), vmovn_u32(cmp2));
79 cmp8 = vmovn_u16(cmp16);
81 // Insert 255 where true
82 ret = vbsl_u8(cmp8, vdup_n_u8(255), ret);
84 // Calc SkDiv255Round
85 uint8x8_t div = SkDiv255Round_neon8_32_8(val1, val2);
87 // Insert where false and previous test false
88 cmp8 = cmp8 | cmp8_1;
89 ret = vbsl_u8(cmp8, ret, div);
91 // Return the final combination
92 return ret;
93 }
95 ////////////////////////////////////////////////////////////////////////////////
96 // 1 pixel modeprocs
97 ////////////////////////////////////////////////////////////////////////////////
99 // kSrcATop_Mode, //!< [Da, Sc * Da + (1 - Sa) * Dc]
100 SkPMColor srcatop_modeproc_neon(SkPMColor src, SkPMColor dst) {
101 unsigned sa = SkGetPackedA32(src);
102 unsigned da = SkGetPackedA32(dst);
103 unsigned isa = 255 - sa;
105 uint8x8_t vda, visa, vsrc, vdst;
107 vda = vdup_n_u8(da);
108 visa = vdup_n_u8(isa);
110 uint16x8_t vsrc_wide, vdst_wide;
111 vsrc_wide = vmull_u8(vda, vreinterpret_u8_u32(vdup_n_u32(src)));
112 vdst_wide = vmull_u8(visa, vreinterpret_u8_u32(vdup_n_u32(dst)));
114 vsrc_wide += vdupq_n_u16(128);
115 vsrc_wide += vshrq_n_u16(vsrc_wide, 8);
117 vdst_wide += vdupq_n_u16(128);
118 vdst_wide += vshrq_n_u16(vdst_wide, 8);
120 vsrc = vshrn_n_u16(vsrc_wide, 8);
121 vdst = vshrn_n_u16(vdst_wide, 8);
123 vsrc += vdst;
124 vsrc = vset_lane_u8(da, vsrc, 3);
126 return vget_lane_u32(vreinterpret_u32_u8(vsrc), 0);
127 }
129 // kDstATop_Mode, //!< [Sa, Sa * Dc + Sc * (1 - Da)]
130 SkPMColor dstatop_modeproc_neon(SkPMColor src, SkPMColor dst) {
131 unsigned sa = SkGetPackedA32(src);
132 unsigned da = SkGetPackedA32(dst);
133 unsigned ida = 255 - da;
135 uint8x8_t vsa, vida, vsrc, vdst;
137 vsa = vdup_n_u8(sa);
138 vida = vdup_n_u8(ida);
140 uint16x8_t vsrc_wide, vdst_wide;
141 vsrc_wide = vmull_u8(vida, vreinterpret_u8_u32(vdup_n_u32(src)));
142 vdst_wide = vmull_u8(vsa, vreinterpret_u8_u32(vdup_n_u32(dst)));
144 vsrc_wide += vdupq_n_u16(128);
145 vsrc_wide += vshrq_n_u16(vsrc_wide, 8);
147 vdst_wide += vdupq_n_u16(128);
148 vdst_wide += vshrq_n_u16(vdst_wide, 8);
150 vsrc = vshrn_n_u16(vsrc_wide, 8);
151 vdst = vshrn_n_u16(vdst_wide, 8);
153 vsrc += vdst;
154 vsrc = vset_lane_u8(sa, vsrc, 3);
156 return vget_lane_u32(vreinterpret_u32_u8(vsrc), 0);
157 }
159 // kXor_Mode [Sa + Da - 2 * Sa * Da, Sc * (1 - Da) + (1 - Sa) * Dc]
160 SkPMColor xor_modeproc_neon(SkPMColor src, SkPMColor dst) {
161 unsigned sa = SkGetPackedA32(src);
162 unsigned da = SkGetPackedA32(dst);
163 unsigned ret_alpha = sa + da - (SkAlphaMulAlpha(sa, da) << 1);
164 unsigned isa = 255 - sa;
165 unsigned ida = 255 - da;
167 uint8x8_t vsrc, vdst, visa, vida;
168 uint16x8_t vsrc_wide, vdst_wide;
170 visa = vdup_n_u8(isa);
171 vida = vdup_n_u8(ida);
172 vsrc = vreinterpret_u8_u32(vdup_n_u32(src));
173 vdst = vreinterpret_u8_u32(vdup_n_u32(dst));
175 vsrc_wide = vmull_u8(vsrc, vida);
176 vdst_wide = vmull_u8(vdst, visa);
178 vsrc_wide += vdupq_n_u16(128);
179 vsrc_wide += vshrq_n_u16(vsrc_wide, 8);
181 vdst_wide += vdupq_n_u16(128);
182 vdst_wide += vshrq_n_u16(vdst_wide, 8);
184 vsrc = vshrn_n_u16(vsrc_wide, 8);
185 vdst = vshrn_n_u16(vdst_wide, 8);
187 vsrc += vdst;
189 vsrc = vset_lane_u8(ret_alpha, vsrc, 3);
191 return vget_lane_u32(vreinterpret_u32_u8(vsrc), 0);
192 }
194 // kPlus_Mode
195 SkPMColor plus_modeproc_neon(SkPMColor src, SkPMColor dst) {
196 uint8x8_t vsrc, vdst;
197 vsrc = vreinterpret_u8_u32(vdup_n_u32(src));
198 vdst = vreinterpret_u8_u32(vdup_n_u32(dst));
199 vsrc = vqadd_u8(vsrc, vdst);
201 return vget_lane_u32(vreinterpret_u32_u8(vsrc), 0);
202 }
204 // kModulate_Mode
205 SkPMColor modulate_modeproc_neon(SkPMColor src, SkPMColor dst) {
206 uint8x8_t vsrc, vdst, vres;
207 uint16x8_t vres_wide;
209 vsrc = vreinterpret_u8_u32(vdup_n_u32(src));
210 vdst = vreinterpret_u8_u32(vdup_n_u32(dst));
212 vres_wide = vmull_u8(vsrc, vdst);
214 vres_wide += vdupq_n_u16(128);
215 vres_wide += vshrq_n_u16(vres_wide, 8);
217 vres = vshrn_n_u16(vres_wide, 8);
219 return vget_lane_u32(vreinterpret_u32_u8(vres), 0);
220 }
222 ////////////////////////////////////////////////////////////////////////////////
223 // 8 pixels modeprocs
224 ////////////////////////////////////////////////////////////////////////////////
226 uint8x8x4_t dstover_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) {
227 uint8x8x4_t ret;
228 uint16x8_t src_scale;
230 src_scale = vsubw_u8(vdupq_n_u16(256), dst.val[NEON_A]);
232 ret.val[NEON_A] = dst.val[NEON_A] + SkAlphaMul_neon8(src.val[NEON_A], src_scale);
233 ret.val[NEON_R] = dst.val[NEON_R] + SkAlphaMul_neon8(src.val[NEON_R], src_scale);
234 ret.val[NEON_G] = dst.val[NEON_G] + SkAlphaMul_neon8(src.val[NEON_G], src_scale);
235 ret.val[NEON_B] = dst.val[NEON_B] + SkAlphaMul_neon8(src.val[NEON_B], src_scale);
237 return ret;
238 }
240 uint8x8x4_t srcin_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) {
241 uint8x8x4_t ret;
242 uint16x8_t scale;
244 scale = SkAlpha255To256_neon8(dst.val[NEON_A]);
246 ret.val[NEON_A] = SkAlphaMul_neon8(src.val[NEON_A], scale);
247 ret.val[NEON_R] = SkAlphaMul_neon8(src.val[NEON_R], scale);
248 ret.val[NEON_G] = SkAlphaMul_neon8(src.val[NEON_G], scale);
249 ret.val[NEON_B] = SkAlphaMul_neon8(src.val[NEON_B], scale);
251 return ret;
252 }
254 uint8x8x4_t dstin_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) {
255 uint8x8x4_t ret;
256 uint16x8_t scale;
258 scale = SkAlpha255To256_neon8(src.val[NEON_A]);
260 ret = SkAlphaMulQ_neon8(dst, scale);
262 return ret;
263 }
265 uint8x8x4_t srcout_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) {
266 uint8x8x4_t ret;
267 uint16x8_t scale = vsubw_u8(vdupq_n_u16(256), dst.val[NEON_A]);
269 ret = SkAlphaMulQ_neon8(src, scale);
271 return ret;
272 }
274 uint8x8x4_t dstout_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) {
275 uint8x8x4_t ret;
276 uint16x8_t scale = vsubw_u8(vdupq_n_u16(256), src.val[NEON_A]);
278 ret = SkAlphaMulQ_neon8(dst, scale);
280 return ret;
281 }
283 uint8x8x4_t srcatop_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) {
284 uint8x8x4_t ret;
285 uint8x8_t isa;
287 isa = vsub_u8(vdup_n_u8(255), src.val[NEON_A]);
289 ret.val[NEON_A] = dst.val[NEON_A];
290 ret.val[NEON_R] = SkAlphaMulAlpha_neon8(src.val[NEON_R], dst.val[NEON_A])
291 + SkAlphaMulAlpha_neon8(dst.val[NEON_R], isa);
292 ret.val[NEON_G] = SkAlphaMulAlpha_neon8(src.val[NEON_G], dst.val[NEON_A])
293 + SkAlphaMulAlpha_neon8(dst.val[NEON_G], isa);
294 ret.val[NEON_B] = SkAlphaMulAlpha_neon8(src.val[NEON_B], dst.val[NEON_A])
295 + SkAlphaMulAlpha_neon8(dst.val[NEON_B], isa);
297 return ret;
298 }
300 uint8x8x4_t dstatop_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) {
301 uint8x8x4_t ret;
302 uint8x8_t ida;
304 ida = vsub_u8(vdup_n_u8(255), dst.val[NEON_A]);
306 ret.val[NEON_A] = src.val[NEON_A];
307 ret.val[NEON_R] = SkAlphaMulAlpha_neon8(src.val[NEON_R], ida)
308 + SkAlphaMulAlpha_neon8(dst.val[NEON_R], src.val[NEON_A]);
309 ret.val[NEON_G] = SkAlphaMulAlpha_neon8(src.val[NEON_G], ida)
310 + SkAlphaMulAlpha_neon8(dst.val[NEON_G], src.val[NEON_A]);
311 ret.val[NEON_B] = SkAlphaMulAlpha_neon8(src.val[NEON_B], ida)
312 + SkAlphaMulAlpha_neon8(dst.val[NEON_B], src.val[NEON_A]);
314 return ret;
315 }
317 uint8x8x4_t xor_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) {
318 uint8x8x4_t ret;
319 uint8x8_t isa, ida;
320 uint16x8_t tmp_wide, tmp_wide2;
322 isa = vsub_u8(vdup_n_u8(255), src.val[NEON_A]);
323 ida = vsub_u8(vdup_n_u8(255), dst.val[NEON_A]);
325 // First calc alpha
326 tmp_wide = vmovl_u8(src.val[NEON_A]);
327 tmp_wide = vaddw_u8(tmp_wide, dst.val[NEON_A]);
328 tmp_wide2 = vshll_n_u8(SkAlphaMulAlpha_neon8(src.val[NEON_A], dst.val[NEON_A]), 1);
329 tmp_wide = vsubq_u16(tmp_wide, tmp_wide2);
330 ret.val[NEON_A] = vmovn_u16(tmp_wide);
332 // Then colors
333 ret.val[NEON_R] = SkAlphaMulAlpha_neon8(src.val[NEON_R], ida)
334 + SkAlphaMulAlpha_neon8(dst.val[NEON_R], isa);
335 ret.val[NEON_G] = SkAlphaMulAlpha_neon8(src.val[NEON_G], ida)
336 + SkAlphaMulAlpha_neon8(dst.val[NEON_G], isa);
337 ret.val[NEON_B] = SkAlphaMulAlpha_neon8(src.val[NEON_B], ida)
338 + SkAlphaMulAlpha_neon8(dst.val[NEON_B], isa);
340 return ret;
341 }
343 uint8x8x4_t plus_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) {
344 uint8x8x4_t ret;
346 ret.val[NEON_A] = vqadd_u8(src.val[NEON_A], dst.val[NEON_A]);
347 ret.val[NEON_R] = vqadd_u8(src.val[NEON_R], dst.val[NEON_R]);
348 ret.val[NEON_G] = vqadd_u8(src.val[NEON_G], dst.val[NEON_G]);
349 ret.val[NEON_B] = vqadd_u8(src.val[NEON_B], dst.val[NEON_B]);
351 return ret;
352 }
354 uint8x8x4_t modulate_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) {
355 uint8x8x4_t ret;
357 ret.val[NEON_A] = SkAlphaMulAlpha_neon8(src.val[NEON_A], dst.val[NEON_A]);
358 ret.val[NEON_R] = SkAlphaMulAlpha_neon8(src.val[NEON_R], dst.val[NEON_R]);
359 ret.val[NEON_G] = SkAlphaMulAlpha_neon8(src.val[NEON_G], dst.val[NEON_G]);
360 ret.val[NEON_B] = SkAlphaMulAlpha_neon8(src.val[NEON_B], dst.val[NEON_B]);
362 return ret;
363 }
365 static inline uint8x8_t srcover_color(uint8x8_t a, uint8x8_t b) {
366 uint16x8_t tmp;
368 tmp = vaddl_u8(a, b);
369 tmp -= SkAlphaMulAlpha_neon8_16(a, b);
371 return vmovn_u16(tmp);
372 }
374 uint8x8x4_t screen_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) {
375 uint8x8x4_t ret;
377 ret.val[NEON_A] = srcover_color(src.val[NEON_A], dst.val[NEON_A]);
378 ret.val[NEON_R] = srcover_color(src.val[NEON_R], dst.val[NEON_R]);
379 ret.val[NEON_G] = srcover_color(src.val[NEON_G], dst.val[NEON_G]);
380 ret.val[NEON_B] = srcover_color(src.val[NEON_B], dst.val[NEON_B]);
382 return ret;
383 }
385 template <bool overlay>
386 static inline uint8x8_t overlay_hardlight_color(uint8x8_t sc, uint8x8_t dc,
387 uint8x8_t sa, uint8x8_t da) {
388 /*
389 * In the end we're gonna use (rc + tmp) with a different rc
390 * coming from an alternative.
391 * The whole value (rc + tmp) can always be expressed as
392 * VAL = COM - SUB in the if case
393 * VAL = COM + SUB - sa*da in the else case
394 *
395 * with COM = 255 * (sc + dc)
396 * and SUB = sc*da + dc*sa - 2*dc*sc
397 */
399 // Prepare common subexpressions
400 uint16x8_t const255 = vdupq_n_u16(255);
401 uint16x8_t sc_plus_dc = vaddl_u8(sc, dc);
402 uint16x8_t scda = vmull_u8(sc, da);
403 uint16x8_t dcsa = vmull_u8(dc, sa);
404 uint16x8_t sada = vmull_u8(sa, da);
406 // Prepare non common subexpressions
407 uint16x8_t dc2, sc2;
408 uint32x4_t scdc2_1, scdc2_2;
409 if (overlay) {
410 dc2 = vshll_n_u8(dc, 1);
411 scdc2_1 = vmull_u16(vget_low_u16(dc2), vget_low_u16(vmovl_u8(sc)));
412 scdc2_2 = vmull_u16(vget_high_u16(dc2), vget_high_u16(vmovl_u8(sc)));
413 } else {
414 sc2 = vshll_n_u8(sc, 1);
415 scdc2_1 = vmull_u16(vget_low_u16(sc2), vget_low_u16(vmovl_u8(dc)));
416 scdc2_2 = vmull_u16(vget_high_u16(sc2), vget_high_u16(vmovl_u8(dc)));
417 }
419 // Calc COM
420 int32x4_t com1, com2;
421 com1 = vreinterpretq_s32_u32(
422 vmull_u16(vget_low_u16(const255), vget_low_u16(sc_plus_dc)));
423 com2 = vreinterpretq_s32_u32(
424 vmull_u16(vget_high_u16(const255), vget_high_u16(sc_plus_dc)));
426 // Calc SUB
427 int32x4_t sub1, sub2;
428 sub1 = vreinterpretq_s32_u32(vaddl_u16(vget_low_u16(scda), vget_low_u16(dcsa)));
429 sub2 = vreinterpretq_s32_u32(vaddl_u16(vget_high_u16(scda), vget_high_u16(dcsa)));
430 sub1 = vsubq_s32(sub1, vreinterpretq_s32_u32(scdc2_1));
431 sub2 = vsubq_s32(sub2, vreinterpretq_s32_u32(scdc2_2));
433 // Compare 2*dc <= da
434 uint16x8_t cmp;
436 if (overlay) {
437 cmp = vcleq_u16(dc2, vmovl_u8(da));
438 } else {
439 cmp = vcleq_u16(sc2, vmovl_u8(sa));
440 }
442 // Prepare variables
443 int32x4_t val1_1, val1_2;
444 int32x4_t val2_1, val2_2;
445 uint32x4_t cmp1, cmp2;
447 cmp1 = vmovl_u16(vget_low_u16(cmp));
448 cmp1 |= vshlq_n_u32(cmp1, 16);
449 cmp2 = vmovl_u16(vget_high_u16(cmp));
450 cmp2 |= vshlq_n_u32(cmp2, 16);
452 // Calc COM - SUB
453 val1_1 = com1 - sub1;
454 val1_2 = com2 - sub2;
456 // Calc COM + SUB - sa*da
457 val2_1 = com1 + sub1;
458 val2_2 = com2 + sub2;
460 val2_1 = vsubq_s32(val2_1, vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(sada))));
461 val2_2 = vsubq_s32(val2_2, vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(sada))));
463 // Insert where needed
464 val1_1 = vbslq_s32(cmp1, val1_1, val2_1);
465 val1_2 = vbslq_s32(cmp2, val1_2, val2_2);
467 // Call the clamp_div255round function
468 return clamp_div255round_simd8_32(val1_1, val1_2);
469 }
471 static inline uint8x8_t overlay_color(uint8x8_t sc, uint8x8_t dc,
472 uint8x8_t sa, uint8x8_t da) {
473 return overlay_hardlight_color<true>(sc, dc, sa, da);
474 }
476 uint8x8x4_t overlay_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) {
477 uint8x8x4_t ret;
479 ret.val[NEON_A] = srcover_color(src.val[NEON_A], dst.val[NEON_A]);
480 ret.val[NEON_R] = overlay_color(src.val[NEON_R], dst.val[NEON_R],
481 src.val[NEON_A], dst.val[NEON_A]);
482 ret.val[NEON_G] = overlay_color(src.val[NEON_G], dst.val[NEON_G],
483 src.val[NEON_A], dst.val[NEON_A]);
484 ret.val[NEON_B] = overlay_color(src.val[NEON_B], dst.val[NEON_B],
485 src.val[NEON_A], dst.val[NEON_A]);
487 return ret;
488 }
490 template <bool lighten>
491 static inline uint8x8_t lighten_darken_color(uint8x8_t sc, uint8x8_t dc,
492 uint8x8_t sa, uint8x8_t da) {
493 uint16x8_t sd, ds, cmp, tmp, tmp2;
495 // Prepare
496 sd = vmull_u8(sc, da);
497 ds = vmull_u8(dc, sa);
499 // Do test
500 if (lighten) {
501 cmp = vcgtq_u16(sd, ds);
502 } else {
503 cmp = vcltq_u16(sd, ds);
504 }
506 // Assign if
507 tmp = vaddl_u8(sc, dc);
508 tmp2 = tmp;
509 tmp -= SkDiv255Round_neon8_16_16(ds);
511 // Calc else
512 tmp2 -= SkDiv255Round_neon8_16_16(sd);
514 // Insert where needed
515 tmp = vbslq_u16(cmp, tmp, tmp2);
517 return vmovn_u16(tmp);
518 }
520 static inline uint8x8_t darken_color(uint8x8_t sc, uint8x8_t dc,
521 uint8x8_t sa, uint8x8_t da) {
522 return lighten_darken_color<false>(sc, dc, sa, da);
523 }
525 uint8x8x4_t darken_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) {
526 uint8x8x4_t ret;
528 ret.val[NEON_A] = srcover_color(src.val[NEON_A], dst.val[NEON_A]);
529 ret.val[NEON_R] = darken_color(src.val[NEON_R], dst.val[NEON_R],
530 src.val[NEON_A], dst.val[NEON_A]);
531 ret.val[NEON_G] = darken_color(src.val[NEON_G], dst.val[NEON_G],
532 src.val[NEON_A], dst.val[NEON_A]);
533 ret.val[NEON_B] = darken_color(src.val[NEON_B], dst.val[NEON_B],
534 src.val[NEON_A], dst.val[NEON_A]);
536 return ret;
537 }
539 static inline uint8x8_t lighten_color(uint8x8_t sc, uint8x8_t dc,
540 uint8x8_t sa, uint8x8_t da) {
541 return lighten_darken_color<true>(sc, dc, sa, da);
542 }
544 uint8x8x4_t lighten_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) {
545 uint8x8x4_t ret;
547 ret.val[NEON_A] = srcover_color(src.val[NEON_A], dst.val[NEON_A]);
548 ret.val[NEON_R] = lighten_color(src.val[NEON_R], dst.val[NEON_R],
549 src.val[NEON_A], dst.val[NEON_A]);
550 ret.val[NEON_G] = lighten_color(src.val[NEON_G], dst.val[NEON_G],
551 src.val[NEON_A], dst.val[NEON_A]);
552 ret.val[NEON_B] = lighten_color(src.val[NEON_B], dst.val[NEON_B],
553 src.val[NEON_A], dst.val[NEON_A]);
555 return ret;
556 }
558 static inline uint8x8_t hardlight_color(uint8x8_t sc, uint8x8_t dc,
559 uint8x8_t sa, uint8x8_t da) {
560 return overlay_hardlight_color<false>(sc, dc, sa, da);
561 }
563 uint8x8x4_t hardlight_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) {
564 uint8x8x4_t ret;
566 ret.val[NEON_A] = srcover_color(src.val[NEON_A], dst.val[NEON_A]);
567 ret.val[NEON_R] = hardlight_color(src.val[NEON_R], dst.val[NEON_R],
568 src.val[NEON_A], dst.val[NEON_A]);
569 ret.val[NEON_G] = hardlight_color(src.val[NEON_G], dst.val[NEON_G],
570 src.val[NEON_A], dst.val[NEON_A]);
571 ret.val[NEON_B] = hardlight_color(src.val[NEON_B], dst.val[NEON_B],
572 src.val[NEON_A], dst.val[NEON_A]);
574 return ret;
575 }
577 static inline uint8x8_t difference_color(uint8x8_t sc, uint8x8_t dc,
578 uint8x8_t sa, uint8x8_t da) {
579 uint16x8_t sd, ds, tmp;
580 int16x8_t val;
582 sd = vmull_u8(sc, da);
583 ds = vmull_u8(dc, sa);
585 tmp = vminq_u16(sd, ds);
586 tmp = SkDiv255Round_neon8_16_16(tmp);
587 tmp = vshlq_n_u16(tmp, 1);
589 val = vreinterpretq_s16_u16(vaddl_u8(sc, dc));
591 val -= vreinterpretq_s16_u16(tmp);
593 val = vmaxq_s16(val, vdupq_n_s16(0));
594 val = vminq_s16(val, vdupq_n_s16(255));
596 return vmovn_u16(vreinterpretq_u16_s16(val));
597 }
599 uint8x8x4_t difference_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) {
600 uint8x8x4_t ret;
602 ret.val[NEON_A] = srcover_color(src.val[NEON_A], dst.val[NEON_A]);
603 ret.val[NEON_R] = difference_color(src.val[NEON_R], dst.val[NEON_R],
604 src.val[NEON_A], dst.val[NEON_A]);
605 ret.val[NEON_G] = difference_color(src.val[NEON_G], dst.val[NEON_G],
606 src.val[NEON_A], dst.val[NEON_A]);
607 ret.val[NEON_B] = difference_color(src.val[NEON_B], dst.val[NEON_B],
608 src.val[NEON_A], dst.val[NEON_A]);
610 return ret;
611 }
613 static inline uint8x8_t exclusion_color(uint8x8_t sc, uint8x8_t dc,
614 uint8x8_t sa, uint8x8_t da) {
615 /* The equation can be simplified to 255(sc + dc) - 2 * sc * dc */
617 uint16x8_t sc_plus_dc, scdc, const255;
618 int32x4_t term1_1, term1_2, term2_1, term2_2;
620 /* Calc (sc + dc) and (sc * dc) */
621 sc_plus_dc = vaddl_u8(sc, dc);
622 scdc = vmull_u8(sc, dc);
624 /* Prepare constants */
625 const255 = vdupq_n_u16(255);
627 /* Calc the first term */
628 term1_1 = vreinterpretq_s32_u32(
629 vmull_u16(vget_low_u16(const255), vget_low_u16(sc_plus_dc)));
630 term1_2 = vreinterpretq_s32_u32(
631 vmull_u16(vget_high_u16(const255), vget_high_u16(sc_plus_dc)));
633 /* Calc the second term */
634 term2_1 = vreinterpretq_s32_u32(vshll_n_u16(vget_low_u16(scdc), 1));
635 term2_2 = vreinterpretq_s32_u32(vshll_n_u16(vget_high_u16(scdc), 1));
637 return clamp_div255round_simd8_32(term1_1 - term2_1, term1_2 - term2_2);
638 }
640 uint8x8x4_t exclusion_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) {
641 uint8x8x4_t ret;
643 ret.val[NEON_A] = srcover_color(src.val[NEON_A], dst.val[NEON_A]);
644 ret.val[NEON_R] = exclusion_color(src.val[NEON_R], dst.val[NEON_R],
645 src.val[NEON_A], dst.val[NEON_A]);
646 ret.val[NEON_G] = exclusion_color(src.val[NEON_G], dst.val[NEON_G],
647 src.val[NEON_A], dst.val[NEON_A]);
648 ret.val[NEON_B] = exclusion_color(src.val[NEON_B], dst.val[NEON_B],
649 src.val[NEON_A], dst.val[NEON_A]);
651 return ret;
652 }
654 static inline uint8x8_t blendfunc_multiply_color(uint8x8_t sc, uint8x8_t dc,
655 uint8x8_t sa, uint8x8_t da) {
656 uint32x4_t val1, val2;
657 uint16x8_t scdc, t1, t2;
659 t1 = vmull_u8(sc, vdup_n_u8(255) - da);
660 t2 = vmull_u8(dc, vdup_n_u8(255) - sa);
661 scdc = vmull_u8(sc, dc);
663 val1 = vaddl_u16(vget_low_u16(t1), vget_low_u16(t2));
664 val2 = vaddl_u16(vget_high_u16(t1), vget_high_u16(t2));
666 val1 = vaddw_u16(val1, vget_low_u16(scdc));
667 val2 = vaddw_u16(val2, vget_high_u16(scdc));
669 return clamp_div255round_simd8_32(
670 vreinterpretq_s32_u32(val1), vreinterpretq_s32_u32(val2));
671 }
673 uint8x8x4_t multiply_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) {
674 uint8x8x4_t ret;
676 ret.val[NEON_A] = srcover_color(src.val[NEON_A], dst.val[NEON_A]);
677 ret.val[NEON_R] = blendfunc_multiply_color(src.val[NEON_R], dst.val[NEON_R],
678 src.val[NEON_A], dst.val[NEON_A]);
679 ret.val[NEON_G] = blendfunc_multiply_color(src.val[NEON_G], dst.val[NEON_G],
680 src.val[NEON_A], dst.val[NEON_A]);
681 ret.val[NEON_B] = blendfunc_multiply_color(src.val[NEON_B], dst.val[NEON_B],
682 src.val[NEON_A], dst.val[NEON_A]);
684 return ret;
685 }
687 ////////////////////////////////////////////////////////////////////////////////
689 typedef uint8x8x4_t (*SkXfermodeProcSIMD)(uint8x8x4_t src, uint8x8x4_t dst);
691 extern SkXfermodeProcSIMD gNEONXfermodeProcs[];
693 SkNEONProcCoeffXfermode::SkNEONProcCoeffXfermode(SkReadBuffer& buffer)
694 : INHERITED(buffer) {
695 fProcSIMD = reinterpret_cast<void*>(gNEONXfermodeProcs[this->getMode()]);
696 }
698 void SkNEONProcCoeffXfermode::xfer32(SkPMColor dst[], const SkPMColor src[],
699 int count, const SkAlpha aa[]) const {
700 SkASSERT(dst && src && count >= 0);
702 SkXfermodeProc proc = this->getProc();
703 SkXfermodeProcSIMD procSIMD = reinterpret_cast<SkXfermodeProcSIMD>(fProcSIMD);
704 SkASSERT(procSIMD != NULL);
706 if (NULL == aa) {
707 // Unrolled NEON code
708 while (count >= 8) {
709 uint8x8x4_t vsrc, vdst, vres;
711 #if (__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 6))
712 asm volatile (
713 "vld4.u8 %h[vsrc], [%[src]]! \t\n"
714 "vld4.u8 %h[vdst], [%[dst]] \t\n"
715 : [vsrc] "=w" (vsrc), [vdst] "=w" (vdst), [src] "+&r" (src)
716 : [dst] "r" (dst)
717 :
718 );
719 #else
720 register uint8x8_t d0 asm("d0");
721 register uint8x8_t d1 asm("d1");
722 register uint8x8_t d2 asm("d2");
723 register uint8x8_t d3 asm("d3");
724 register uint8x8_t d4 asm("d4");
725 register uint8x8_t d5 asm("d5");
726 register uint8x8_t d6 asm("d6");
727 register uint8x8_t d7 asm("d7");
729 asm volatile (
730 "vld4.u8 {d0-d3},[%[src]]!;"
731 "vld4.u8 {d4-d7},[%[dst]];"
732 : "=w" (d0), "=w" (d1), "=w" (d2), "=w" (d3),
733 "=w" (d4), "=w" (d5), "=w" (d6), "=w" (d7),
734 [src] "+&r" (src)
735 : [dst] "r" (dst)
736 :
737 );
738 vsrc.val[0] = d0; vdst.val[0] = d4;
739 vsrc.val[1] = d1; vdst.val[1] = d5;
740 vsrc.val[2] = d2; vdst.val[2] = d6;
741 vsrc.val[3] = d3; vdst.val[3] = d7;
742 #endif
744 vres = procSIMD(vsrc, vdst);
746 vst4_u8((uint8_t*)dst, vres);
748 count -= 8;
749 dst += 8;
750 }
751 // Leftovers
752 for (int i = 0; i < count; i++) {
753 dst[i] = proc(src[i], dst[i]);
754 }
755 } else {
756 for (int i = count - 1; i >= 0; --i) {
757 unsigned a = aa[i];
758 if (0 != a) {
759 SkPMColor dstC = dst[i];
760 SkPMColor C = proc(src[i], dstC);
761 if (a != 0xFF) {
762 C = SkFourByteInterp_neon(C, dstC, a);
763 }
764 dst[i] = C;
765 }
766 }
767 }
768 }
770 void SkNEONProcCoeffXfermode::xfer16(uint16_t* SK_RESTRICT dst,
771 const SkPMColor* SK_RESTRICT src, int count,
772 const SkAlpha* SK_RESTRICT aa) const {
773 SkASSERT(dst && src && count >= 0);
775 SkXfermodeProc proc = this->getProc();
776 SkXfermodeProcSIMD procSIMD = reinterpret_cast<SkXfermodeProcSIMD>(fProcSIMD);
777 SkASSERT(procSIMD != NULL);
779 if (NULL == aa) {
780 while(count >= 8) {
781 uint16x8_t vdst, vres16;
782 uint8x8x4_t vdst32, vsrc, vres;
784 vdst = vld1q_u16(dst);
786 #if (__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 6))
787 asm volatile (
788 "vld4.u8 %h[vsrc], [%[src]]! \t\n"
789 : [vsrc] "=w" (vsrc), [src] "+&r" (src)
790 : :
791 );
792 #else
793 register uint8x8_t d0 asm("d0");
794 register uint8x8_t d1 asm("d1");
795 register uint8x8_t d2 asm("d2");
796 register uint8x8_t d3 asm("d3");
798 asm volatile (
799 "vld4.u8 {d0-d3},[%[src]]!;"
800 : "=w" (d0), "=w" (d1), "=w" (d2), "=w" (d3),
801 [src] "+&r" (src)
802 : :
803 );
804 vsrc.val[0] = d0;
805 vsrc.val[1] = d1;
806 vsrc.val[2] = d2;
807 vsrc.val[3] = d3;
808 #endif
810 vdst32 = SkPixel16ToPixel32_neon8(vdst);
811 vres = procSIMD(vsrc, vdst32);
812 vres16 = SkPixel32ToPixel16_neon8(vres);
814 vst1q_u16(dst, vres16);
816 count -= 8;
817 dst += 8;
818 }
819 for (int i = 0; i < count; i++) {
820 SkPMColor dstC = SkPixel16ToPixel32(dst[i]);
821 dst[i] = SkPixel32ToPixel16_ToU16(proc(src[i], dstC));
822 }
823 } else {
824 for (int i = count - 1; i >= 0; --i) {
825 unsigned a = aa[i];
826 if (0 != a) {
827 SkPMColor dstC = SkPixel16ToPixel32(dst[i]);
828 SkPMColor C = proc(src[i], dstC);
829 if (0xFF != a) {
830 C = SkFourByteInterp_neon(C, dstC, a);
831 }
832 dst[i] = SkPixel32ToPixel16_ToU16(C);
833 }
834 }
835 }
836 }
838 #ifndef SK_IGNORE_TO_STRING
839 void SkNEONProcCoeffXfermode::toString(SkString* str) const {
840 this->INHERITED::toString(str);
841 }
842 #endif
844 ////////////////////////////////////////////////////////////////////////////////
846 SkXfermodeProcSIMD gNEONXfermodeProcs[] = {
847 NULL, // kClear_Mode
848 NULL, // kSrc_Mode
849 NULL, // kDst_Mode
850 NULL, // kSrcOver_Mode
851 dstover_modeproc_neon8,
852 srcin_modeproc_neon8,
853 dstin_modeproc_neon8,
854 srcout_modeproc_neon8,
855 dstout_modeproc_neon8,
856 srcatop_modeproc_neon8,
857 dstatop_modeproc_neon8,
858 xor_modeproc_neon8,
859 plus_modeproc_neon8,
860 modulate_modeproc_neon8,
861 screen_modeproc_neon8,
863 overlay_modeproc_neon8,
864 darken_modeproc_neon8,
865 lighten_modeproc_neon8,
866 NULL, // kColorDodge_Mode
867 NULL, // kColorBurn_Mode
868 hardlight_modeproc_neon8,
869 NULL, // kSoftLight_Mode
870 difference_modeproc_neon8,
871 exclusion_modeproc_neon8,
872 multiply_modeproc_neon8,
874 NULL, // kHue_Mode
875 NULL, // kSaturation_Mode
876 NULL, // kColor_Mode
877 NULL, // kLuminosity_Mode
878 };
880 SK_COMPILE_ASSERT(
881 SK_ARRAY_COUNT(gNEONXfermodeProcs) == SkXfermode::kLastMode + 1,
882 mode_count_arm
883 );
885 SkXfermodeProc gNEONXfermodeProcs1[] = {
886 NULL, // kClear_Mode
887 NULL, // kSrc_Mode
888 NULL, // kDst_Mode
889 NULL, // kSrcOver_Mode
890 NULL, // kDstOver_Mode
891 NULL, // kSrcIn_Mode
892 NULL, // kDstIn_Mode
893 NULL, // kSrcOut_Mode
894 NULL, // kDstOut_Mode
895 srcatop_modeproc_neon,
896 dstatop_modeproc_neon,
897 xor_modeproc_neon,
898 plus_modeproc_neon,
899 modulate_modeproc_neon,
900 NULL, // kScreen_Mode
902 NULL, // kOverlay_Mode
903 NULL, // kDarken_Mode
904 NULL, // kLighten_Mode
905 NULL, // kColorDodge_Mode
906 NULL, // kColorBurn_Mode
907 NULL, // kHardLight_Mode
908 NULL, // kSoftLight_Mode
909 NULL, // kDifference_Mode
910 NULL, // kExclusion_Mode
911 NULL, // kMultiply_Mode
913 NULL, // kHue_Mode
914 NULL, // kSaturation_Mode
915 NULL, // kColor_Mode
916 NULL, // kLuminosity_Mode
917 };
919 SK_COMPILE_ASSERT(
920 SK_ARRAY_COUNT(gNEONXfermodeProcs1) == SkXfermode::kLastMode + 1,
921 mode1_count_arm
922 );
924 SkProcCoeffXfermode* SkPlatformXfermodeFactory_impl_neon(const ProcCoeff& rec,
925 SkXfermode::Mode mode) {
927 void* procSIMD = reinterpret_cast<void*>(gNEONXfermodeProcs[mode]);
929 if (procSIMD != NULL) {
930 return SkNEW_ARGS(SkNEONProcCoeffXfermode, (rec, mode, procSIMD));
931 }
932 return NULL;
933 }
935 SkXfermodeProc SkPlatformXfermodeProcFactory_impl_neon(SkXfermode::Mode mode) {
936 return gNEONXfermodeProcs1[mode];
937 }