|
1 #include "SkXfermode.h" |
|
2 #include "SkXfermode_proccoeff.h" |
|
3 #include "SkColorPriv.h" |
|
4 |
|
5 #include <arm_neon.h> |
|
6 #include "SkColor_opts_neon.h" |
|
7 #include "SkXfermode_opts_arm_neon.h" |
|
8 |
|
9 #define SkAlphaMulAlpha(a, b) SkMulDiv255Round(a, b) |
|
10 |
|
11 |
|
12 //////////////////////////////////////////////////////////////////////////////// |
|
13 // NEONized skia functions |
|
14 //////////////////////////////////////////////////////////////////////////////// |
|
15 |
|
16 static inline uint8x8_t SkAlphaMulAlpha_neon8(uint8x8_t color, uint8x8_t alpha) { |
|
17 uint16x8_t tmp; |
|
18 uint8x8_t ret; |
|
19 |
|
20 tmp = vmull_u8(color, alpha); |
|
21 tmp = vaddq_u16(tmp, vdupq_n_u16(128)); |
|
22 tmp = vaddq_u16(tmp, vshrq_n_u16(tmp, 8)); |
|
23 |
|
24 ret = vshrn_n_u16(tmp, 8); |
|
25 |
|
26 return ret; |
|
27 } |
|
28 |
|
29 static inline uint16x8_t SkAlphaMulAlpha_neon8_16(uint8x8_t color, uint8x8_t alpha) { |
|
30 uint16x8_t ret; |
|
31 |
|
32 ret = vmull_u8(color, alpha); |
|
33 ret = vaddq_u16(ret, vdupq_n_u16(128)); |
|
34 ret = vaddq_u16(ret, vshrq_n_u16(ret, 8)); |
|
35 |
|
36 ret = vshrq_n_u16(ret, 8); |
|
37 |
|
38 return ret; |
|
39 } |
|
40 |
|
41 static inline uint8x8_t SkDiv255Round_neon8_32_8(int32x4_t p1, int32x4_t p2) { |
|
42 uint16x8_t tmp; |
|
43 |
|
44 tmp = vcombine_u16(vmovn_u32(vreinterpretq_u32_s32(p1)), |
|
45 vmovn_u32(vreinterpretq_u32_s32(p2))); |
|
46 |
|
47 tmp += vdupq_n_u16(128); |
|
48 tmp += vshrq_n_u16(tmp, 8); |
|
49 |
|
50 return vshrn_n_u16(tmp, 8); |
|
51 } |
|
52 |
|
53 static inline uint16x8_t SkDiv255Round_neon8_16_16(uint16x8_t prod) { |
|
54 prod += vdupq_n_u16(128); |
|
55 prod += vshrq_n_u16(prod, 8); |
|
56 |
|
57 return vshrq_n_u16(prod, 8); |
|
58 } |
|
59 |
|
60 static inline uint8x8_t clamp_div255round_simd8_32(int32x4_t val1, int32x4_t val2) { |
|
61 uint8x8_t ret; |
|
62 uint32x4_t cmp1, cmp2; |
|
63 uint16x8_t cmp16; |
|
64 uint8x8_t cmp8, cmp8_1; |
|
65 |
|
66 // Test if <= 0 |
|
67 cmp1 = vcleq_s32(val1, vdupq_n_s32(0)); |
|
68 cmp2 = vcleq_s32(val2, vdupq_n_s32(0)); |
|
69 cmp16 = vcombine_u16(vmovn_u32(cmp1), vmovn_u32(cmp2)); |
|
70 cmp8_1 = vmovn_u16(cmp16); |
|
71 |
|
72 // Init to zero |
|
73 ret = vdup_n_u8(0); |
|
74 |
|
75 // Test if >= 255*255 |
|
76 cmp1 = vcgeq_s32(val1, vdupq_n_s32(255*255)); |
|
77 cmp2 = vcgeq_s32(val2, vdupq_n_s32(255*255)); |
|
78 cmp16 = vcombine_u16(vmovn_u32(cmp1), vmovn_u32(cmp2)); |
|
79 cmp8 = vmovn_u16(cmp16); |
|
80 |
|
81 // Insert 255 where true |
|
82 ret = vbsl_u8(cmp8, vdup_n_u8(255), ret); |
|
83 |
|
84 // Calc SkDiv255Round |
|
85 uint8x8_t div = SkDiv255Round_neon8_32_8(val1, val2); |
|
86 |
|
87 // Insert where false and previous test false |
|
88 cmp8 = cmp8 | cmp8_1; |
|
89 ret = vbsl_u8(cmp8, ret, div); |
|
90 |
|
91 // Return the final combination |
|
92 return ret; |
|
93 } |
|
94 |
|
95 //////////////////////////////////////////////////////////////////////////////// |
|
96 // 1 pixel modeprocs |
|
97 //////////////////////////////////////////////////////////////////////////////// |
|
98 |
|
99 // kSrcATop_Mode, //!< [Da, Sc * Da + (1 - Sa) * Dc] |
|
100 SkPMColor srcatop_modeproc_neon(SkPMColor src, SkPMColor dst) { |
|
101 unsigned sa = SkGetPackedA32(src); |
|
102 unsigned da = SkGetPackedA32(dst); |
|
103 unsigned isa = 255 - sa; |
|
104 |
|
105 uint8x8_t vda, visa, vsrc, vdst; |
|
106 |
|
107 vda = vdup_n_u8(da); |
|
108 visa = vdup_n_u8(isa); |
|
109 |
|
110 uint16x8_t vsrc_wide, vdst_wide; |
|
111 vsrc_wide = vmull_u8(vda, vreinterpret_u8_u32(vdup_n_u32(src))); |
|
112 vdst_wide = vmull_u8(visa, vreinterpret_u8_u32(vdup_n_u32(dst))); |
|
113 |
|
114 vsrc_wide += vdupq_n_u16(128); |
|
115 vsrc_wide += vshrq_n_u16(vsrc_wide, 8); |
|
116 |
|
117 vdst_wide += vdupq_n_u16(128); |
|
118 vdst_wide += vshrq_n_u16(vdst_wide, 8); |
|
119 |
|
120 vsrc = vshrn_n_u16(vsrc_wide, 8); |
|
121 vdst = vshrn_n_u16(vdst_wide, 8); |
|
122 |
|
123 vsrc += vdst; |
|
124 vsrc = vset_lane_u8(da, vsrc, 3); |
|
125 |
|
126 return vget_lane_u32(vreinterpret_u32_u8(vsrc), 0); |
|
127 } |
|
128 |
|
129 // kDstATop_Mode, //!< [Sa, Sa * Dc + Sc * (1 - Da)] |
|
130 SkPMColor dstatop_modeproc_neon(SkPMColor src, SkPMColor dst) { |
|
131 unsigned sa = SkGetPackedA32(src); |
|
132 unsigned da = SkGetPackedA32(dst); |
|
133 unsigned ida = 255 - da; |
|
134 |
|
135 uint8x8_t vsa, vida, vsrc, vdst; |
|
136 |
|
137 vsa = vdup_n_u8(sa); |
|
138 vida = vdup_n_u8(ida); |
|
139 |
|
140 uint16x8_t vsrc_wide, vdst_wide; |
|
141 vsrc_wide = vmull_u8(vida, vreinterpret_u8_u32(vdup_n_u32(src))); |
|
142 vdst_wide = vmull_u8(vsa, vreinterpret_u8_u32(vdup_n_u32(dst))); |
|
143 |
|
144 vsrc_wide += vdupq_n_u16(128); |
|
145 vsrc_wide += vshrq_n_u16(vsrc_wide, 8); |
|
146 |
|
147 vdst_wide += vdupq_n_u16(128); |
|
148 vdst_wide += vshrq_n_u16(vdst_wide, 8); |
|
149 |
|
150 vsrc = vshrn_n_u16(vsrc_wide, 8); |
|
151 vdst = vshrn_n_u16(vdst_wide, 8); |
|
152 |
|
153 vsrc += vdst; |
|
154 vsrc = vset_lane_u8(sa, vsrc, 3); |
|
155 |
|
156 return vget_lane_u32(vreinterpret_u32_u8(vsrc), 0); |
|
157 } |
|
158 |
|
159 // kXor_Mode [Sa + Da - 2 * Sa * Da, Sc * (1 - Da) + (1 - Sa) * Dc] |
|
160 SkPMColor xor_modeproc_neon(SkPMColor src, SkPMColor dst) { |
|
161 unsigned sa = SkGetPackedA32(src); |
|
162 unsigned da = SkGetPackedA32(dst); |
|
163 unsigned ret_alpha = sa + da - (SkAlphaMulAlpha(sa, da) << 1); |
|
164 unsigned isa = 255 - sa; |
|
165 unsigned ida = 255 - da; |
|
166 |
|
167 uint8x8_t vsrc, vdst, visa, vida; |
|
168 uint16x8_t vsrc_wide, vdst_wide; |
|
169 |
|
170 visa = vdup_n_u8(isa); |
|
171 vida = vdup_n_u8(ida); |
|
172 vsrc = vreinterpret_u8_u32(vdup_n_u32(src)); |
|
173 vdst = vreinterpret_u8_u32(vdup_n_u32(dst)); |
|
174 |
|
175 vsrc_wide = vmull_u8(vsrc, vida); |
|
176 vdst_wide = vmull_u8(vdst, visa); |
|
177 |
|
178 vsrc_wide += vdupq_n_u16(128); |
|
179 vsrc_wide += vshrq_n_u16(vsrc_wide, 8); |
|
180 |
|
181 vdst_wide += vdupq_n_u16(128); |
|
182 vdst_wide += vshrq_n_u16(vdst_wide, 8); |
|
183 |
|
184 vsrc = vshrn_n_u16(vsrc_wide, 8); |
|
185 vdst = vshrn_n_u16(vdst_wide, 8); |
|
186 |
|
187 vsrc += vdst; |
|
188 |
|
189 vsrc = vset_lane_u8(ret_alpha, vsrc, 3); |
|
190 |
|
191 return vget_lane_u32(vreinterpret_u32_u8(vsrc), 0); |
|
192 } |
|
193 |
|
194 // kPlus_Mode |
|
195 SkPMColor plus_modeproc_neon(SkPMColor src, SkPMColor dst) { |
|
196 uint8x8_t vsrc, vdst; |
|
197 vsrc = vreinterpret_u8_u32(vdup_n_u32(src)); |
|
198 vdst = vreinterpret_u8_u32(vdup_n_u32(dst)); |
|
199 vsrc = vqadd_u8(vsrc, vdst); |
|
200 |
|
201 return vget_lane_u32(vreinterpret_u32_u8(vsrc), 0); |
|
202 } |
|
203 |
|
204 // kModulate_Mode |
|
205 SkPMColor modulate_modeproc_neon(SkPMColor src, SkPMColor dst) { |
|
206 uint8x8_t vsrc, vdst, vres; |
|
207 uint16x8_t vres_wide; |
|
208 |
|
209 vsrc = vreinterpret_u8_u32(vdup_n_u32(src)); |
|
210 vdst = vreinterpret_u8_u32(vdup_n_u32(dst)); |
|
211 |
|
212 vres_wide = vmull_u8(vsrc, vdst); |
|
213 |
|
214 vres_wide += vdupq_n_u16(128); |
|
215 vres_wide += vshrq_n_u16(vres_wide, 8); |
|
216 |
|
217 vres = vshrn_n_u16(vres_wide, 8); |
|
218 |
|
219 return vget_lane_u32(vreinterpret_u32_u8(vres), 0); |
|
220 } |
|
221 |
|
222 //////////////////////////////////////////////////////////////////////////////// |
|
223 // 8 pixels modeprocs |
|
224 //////////////////////////////////////////////////////////////////////////////// |
|
225 |
|
226 uint8x8x4_t dstover_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) { |
|
227 uint8x8x4_t ret; |
|
228 uint16x8_t src_scale; |
|
229 |
|
230 src_scale = vsubw_u8(vdupq_n_u16(256), dst.val[NEON_A]); |
|
231 |
|
232 ret.val[NEON_A] = dst.val[NEON_A] + SkAlphaMul_neon8(src.val[NEON_A], src_scale); |
|
233 ret.val[NEON_R] = dst.val[NEON_R] + SkAlphaMul_neon8(src.val[NEON_R], src_scale); |
|
234 ret.val[NEON_G] = dst.val[NEON_G] + SkAlphaMul_neon8(src.val[NEON_G], src_scale); |
|
235 ret.val[NEON_B] = dst.val[NEON_B] + SkAlphaMul_neon8(src.val[NEON_B], src_scale); |
|
236 |
|
237 return ret; |
|
238 } |
|
239 |
|
240 uint8x8x4_t srcin_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) { |
|
241 uint8x8x4_t ret; |
|
242 uint16x8_t scale; |
|
243 |
|
244 scale = SkAlpha255To256_neon8(dst.val[NEON_A]); |
|
245 |
|
246 ret.val[NEON_A] = SkAlphaMul_neon8(src.val[NEON_A], scale); |
|
247 ret.val[NEON_R] = SkAlphaMul_neon8(src.val[NEON_R], scale); |
|
248 ret.val[NEON_G] = SkAlphaMul_neon8(src.val[NEON_G], scale); |
|
249 ret.val[NEON_B] = SkAlphaMul_neon8(src.val[NEON_B], scale); |
|
250 |
|
251 return ret; |
|
252 } |
|
253 |
|
254 uint8x8x4_t dstin_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) { |
|
255 uint8x8x4_t ret; |
|
256 uint16x8_t scale; |
|
257 |
|
258 scale = SkAlpha255To256_neon8(src.val[NEON_A]); |
|
259 |
|
260 ret = SkAlphaMulQ_neon8(dst, scale); |
|
261 |
|
262 return ret; |
|
263 } |
|
264 |
|
265 uint8x8x4_t srcout_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) { |
|
266 uint8x8x4_t ret; |
|
267 uint16x8_t scale = vsubw_u8(vdupq_n_u16(256), dst.val[NEON_A]); |
|
268 |
|
269 ret = SkAlphaMulQ_neon8(src, scale); |
|
270 |
|
271 return ret; |
|
272 } |
|
273 |
|
274 uint8x8x4_t dstout_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) { |
|
275 uint8x8x4_t ret; |
|
276 uint16x8_t scale = vsubw_u8(vdupq_n_u16(256), src.val[NEON_A]); |
|
277 |
|
278 ret = SkAlphaMulQ_neon8(dst, scale); |
|
279 |
|
280 return ret; |
|
281 } |
|
282 |
|
283 uint8x8x4_t srcatop_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) { |
|
284 uint8x8x4_t ret; |
|
285 uint8x8_t isa; |
|
286 |
|
287 isa = vsub_u8(vdup_n_u8(255), src.val[NEON_A]); |
|
288 |
|
289 ret.val[NEON_A] = dst.val[NEON_A]; |
|
290 ret.val[NEON_R] = SkAlphaMulAlpha_neon8(src.val[NEON_R], dst.val[NEON_A]) |
|
291 + SkAlphaMulAlpha_neon8(dst.val[NEON_R], isa); |
|
292 ret.val[NEON_G] = SkAlphaMulAlpha_neon8(src.val[NEON_G], dst.val[NEON_A]) |
|
293 + SkAlphaMulAlpha_neon8(dst.val[NEON_G], isa); |
|
294 ret.val[NEON_B] = SkAlphaMulAlpha_neon8(src.val[NEON_B], dst.val[NEON_A]) |
|
295 + SkAlphaMulAlpha_neon8(dst.val[NEON_B], isa); |
|
296 |
|
297 return ret; |
|
298 } |
|
299 |
|
300 uint8x8x4_t dstatop_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) { |
|
301 uint8x8x4_t ret; |
|
302 uint8x8_t ida; |
|
303 |
|
304 ida = vsub_u8(vdup_n_u8(255), dst.val[NEON_A]); |
|
305 |
|
306 ret.val[NEON_A] = src.val[NEON_A]; |
|
307 ret.val[NEON_R] = SkAlphaMulAlpha_neon8(src.val[NEON_R], ida) |
|
308 + SkAlphaMulAlpha_neon8(dst.val[NEON_R], src.val[NEON_A]); |
|
309 ret.val[NEON_G] = SkAlphaMulAlpha_neon8(src.val[NEON_G], ida) |
|
310 + SkAlphaMulAlpha_neon8(dst.val[NEON_G], src.val[NEON_A]); |
|
311 ret.val[NEON_B] = SkAlphaMulAlpha_neon8(src.val[NEON_B], ida) |
|
312 + SkAlphaMulAlpha_neon8(dst.val[NEON_B], src.val[NEON_A]); |
|
313 |
|
314 return ret; |
|
315 } |
|
316 |
|
317 uint8x8x4_t xor_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) { |
|
318 uint8x8x4_t ret; |
|
319 uint8x8_t isa, ida; |
|
320 uint16x8_t tmp_wide, tmp_wide2; |
|
321 |
|
322 isa = vsub_u8(vdup_n_u8(255), src.val[NEON_A]); |
|
323 ida = vsub_u8(vdup_n_u8(255), dst.val[NEON_A]); |
|
324 |
|
325 // First calc alpha |
|
326 tmp_wide = vmovl_u8(src.val[NEON_A]); |
|
327 tmp_wide = vaddw_u8(tmp_wide, dst.val[NEON_A]); |
|
328 tmp_wide2 = vshll_n_u8(SkAlphaMulAlpha_neon8(src.val[NEON_A], dst.val[NEON_A]), 1); |
|
329 tmp_wide = vsubq_u16(tmp_wide, tmp_wide2); |
|
330 ret.val[NEON_A] = vmovn_u16(tmp_wide); |
|
331 |
|
332 // Then colors |
|
333 ret.val[NEON_R] = SkAlphaMulAlpha_neon8(src.val[NEON_R], ida) |
|
334 + SkAlphaMulAlpha_neon8(dst.val[NEON_R], isa); |
|
335 ret.val[NEON_G] = SkAlphaMulAlpha_neon8(src.val[NEON_G], ida) |
|
336 + SkAlphaMulAlpha_neon8(dst.val[NEON_G], isa); |
|
337 ret.val[NEON_B] = SkAlphaMulAlpha_neon8(src.val[NEON_B], ida) |
|
338 + SkAlphaMulAlpha_neon8(dst.val[NEON_B], isa); |
|
339 |
|
340 return ret; |
|
341 } |
|
342 |
|
343 uint8x8x4_t plus_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) { |
|
344 uint8x8x4_t ret; |
|
345 |
|
346 ret.val[NEON_A] = vqadd_u8(src.val[NEON_A], dst.val[NEON_A]); |
|
347 ret.val[NEON_R] = vqadd_u8(src.val[NEON_R], dst.val[NEON_R]); |
|
348 ret.val[NEON_G] = vqadd_u8(src.val[NEON_G], dst.val[NEON_G]); |
|
349 ret.val[NEON_B] = vqadd_u8(src.val[NEON_B], dst.val[NEON_B]); |
|
350 |
|
351 return ret; |
|
352 } |
|
353 |
|
354 uint8x8x4_t modulate_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) { |
|
355 uint8x8x4_t ret; |
|
356 |
|
357 ret.val[NEON_A] = SkAlphaMulAlpha_neon8(src.val[NEON_A], dst.val[NEON_A]); |
|
358 ret.val[NEON_R] = SkAlphaMulAlpha_neon8(src.val[NEON_R], dst.val[NEON_R]); |
|
359 ret.val[NEON_G] = SkAlphaMulAlpha_neon8(src.val[NEON_G], dst.val[NEON_G]); |
|
360 ret.val[NEON_B] = SkAlphaMulAlpha_neon8(src.val[NEON_B], dst.val[NEON_B]); |
|
361 |
|
362 return ret; |
|
363 } |
|
364 |
|
365 static inline uint8x8_t srcover_color(uint8x8_t a, uint8x8_t b) { |
|
366 uint16x8_t tmp; |
|
367 |
|
368 tmp = vaddl_u8(a, b); |
|
369 tmp -= SkAlphaMulAlpha_neon8_16(a, b); |
|
370 |
|
371 return vmovn_u16(tmp); |
|
372 } |
|
373 |
|
374 uint8x8x4_t screen_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) { |
|
375 uint8x8x4_t ret; |
|
376 |
|
377 ret.val[NEON_A] = srcover_color(src.val[NEON_A], dst.val[NEON_A]); |
|
378 ret.val[NEON_R] = srcover_color(src.val[NEON_R], dst.val[NEON_R]); |
|
379 ret.val[NEON_G] = srcover_color(src.val[NEON_G], dst.val[NEON_G]); |
|
380 ret.val[NEON_B] = srcover_color(src.val[NEON_B], dst.val[NEON_B]); |
|
381 |
|
382 return ret; |
|
383 } |
|
384 |
|
385 template <bool overlay> |
|
386 static inline uint8x8_t overlay_hardlight_color(uint8x8_t sc, uint8x8_t dc, |
|
387 uint8x8_t sa, uint8x8_t da) { |
|
388 /* |
|
389 * In the end we're gonna use (rc + tmp) with a different rc |
|
390 * coming from an alternative. |
|
391 * The whole value (rc + tmp) can always be expressed as |
|
392 * VAL = COM - SUB in the if case |
|
393 * VAL = COM + SUB - sa*da in the else case |
|
394 * |
|
395 * with COM = 255 * (sc + dc) |
|
396 * and SUB = sc*da + dc*sa - 2*dc*sc |
|
397 */ |
|
398 |
|
399 // Prepare common subexpressions |
|
400 uint16x8_t const255 = vdupq_n_u16(255); |
|
401 uint16x8_t sc_plus_dc = vaddl_u8(sc, dc); |
|
402 uint16x8_t scda = vmull_u8(sc, da); |
|
403 uint16x8_t dcsa = vmull_u8(dc, sa); |
|
404 uint16x8_t sada = vmull_u8(sa, da); |
|
405 |
|
406 // Prepare non common subexpressions |
|
407 uint16x8_t dc2, sc2; |
|
408 uint32x4_t scdc2_1, scdc2_2; |
|
409 if (overlay) { |
|
410 dc2 = vshll_n_u8(dc, 1); |
|
411 scdc2_1 = vmull_u16(vget_low_u16(dc2), vget_low_u16(vmovl_u8(sc))); |
|
412 scdc2_2 = vmull_u16(vget_high_u16(dc2), vget_high_u16(vmovl_u8(sc))); |
|
413 } else { |
|
414 sc2 = vshll_n_u8(sc, 1); |
|
415 scdc2_1 = vmull_u16(vget_low_u16(sc2), vget_low_u16(vmovl_u8(dc))); |
|
416 scdc2_2 = vmull_u16(vget_high_u16(sc2), vget_high_u16(vmovl_u8(dc))); |
|
417 } |
|
418 |
|
419 // Calc COM |
|
420 int32x4_t com1, com2; |
|
421 com1 = vreinterpretq_s32_u32( |
|
422 vmull_u16(vget_low_u16(const255), vget_low_u16(sc_plus_dc))); |
|
423 com2 = vreinterpretq_s32_u32( |
|
424 vmull_u16(vget_high_u16(const255), vget_high_u16(sc_plus_dc))); |
|
425 |
|
426 // Calc SUB |
|
427 int32x4_t sub1, sub2; |
|
428 sub1 = vreinterpretq_s32_u32(vaddl_u16(vget_low_u16(scda), vget_low_u16(dcsa))); |
|
429 sub2 = vreinterpretq_s32_u32(vaddl_u16(vget_high_u16(scda), vget_high_u16(dcsa))); |
|
430 sub1 = vsubq_s32(sub1, vreinterpretq_s32_u32(scdc2_1)); |
|
431 sub2 = vsubq_s32(sub2, vreinterpretq_s32_u32(scdc2_2)); |
|
432 |
|
433 // Compare 2*dc <= da |
|
434 uint16x8_t cmp; |
|
435 |
|
436 if (overlay) { |
|
437 cmp = vcleq_u16(dc2, vmovl_u8(da)); |
|
438 } else { |
|
439 cmp = vcleq_u16(sc2, vmovl_u8(sa)); |
|
440 } |
|
441 |
|
442 // Prepare variables |
|
443 int32x4_t val1_1, val1_2; |
|
444 int32x4_t val2_1, val2_2; |
|
445 uint32x4_t cmp1, cmp2; |
|
446 |
|
447 cmp1 = vmovl_u16(vget_low_u16(cmp)); |
|
448 cmp1 |= vshlq_n_u32(cmp1, 16); |
|
449 cmp2 = vmovl_u16(vget_high_u16(cmp)); |
|
450 cmp2 |= vshlq_n_u32(cmp2, 16); |
|
451 |
|
452 // Calc COM - SUB |
|
453 val1_1 = com1 - sub1; |
|
454 val1_2 = com2 - sub2; |
|
455 |
|
456 // Calc COM + SUB - sa*da |
|
457 val2_1 = com1 + sub1; |
|
458 val2_2 = com2 + sub2; |
|
459 |
|
460 val2_1 = vsubq_s32(val2_1, vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(sada)))); |
|
461 val2_2 = vsubq_s32(val2_2, vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(sada)))); |
|
462 |
|
463 // Insert where needed |
|
464 val1_1 = vbslq_s32(cmp1, val1_1, val2_1); |
|
465 val1_2 = vbslq_s32(cmp2, val1_2, val2_2); |
|
466 |
|
467 // Call the clamp_div255round function |
|
468 return clamp_div255round_simd8_32(val1_1, val1_2); |
|
469 } |
|
470 |
|
471 static inline uint8x8_t overlay_color(uint8x8_t sc, uint8x8_t dc, |
|
472 uint8x8_t sa, uint8x8_t da) { |
|
473 return overlay_hardlight_color<true>(sc, dc, sa, da); |
|
474 } |
|
475 |
|
476 uint8x8x4_t overlay_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) { |
|
477 uint8x8x4_t ret; |
|
478 |
|
479 ret.val[NEON_A] = srcover_color(src.val[NEON_A], dst.val[NEON_A]); |
|
480 ret.val[NEON_R] = overlay_color(src.val[NEON_R], dst.val[NEON_R], |
|
481 src.val[NEON_A], dst.val[NEON_A]); |
|
482 ret.val[NEON_G] = overlay_color(src.val[NEON_G], dst.val[NEON_G], |
|
483 src.val[NEON_A], dst.val[NEON_A]); |
|
484 ret.val[NEON_B] = overlay_color(src.val[NEON_B], dst.val[NEON_B], |
|
485 src.val[NEON_A], dst.val[NEON_A]); |
|
486 |
|
487 return ret; |
|
488 } |
|
489 |
|
490 template <bool lighten> |
|
491 static inline uint8x8_t lighten_darken_color(uint8x8_t sc, uint8x8_t dc, |
|
492 uint8x8_t sa, uint8x8_t da) { |
|
493 uint16x8_t sd, ds, cmp, tmp, tmp2; |
|
494 |
|
495 // Prepare |
|
496 sd = vmull_u8(sc, da); |
|
497 ds = vmull_u8(dc, sa); |
|
498 |
|
499 // Do test |
|
500 if (lighten) { |
|
501 cmp = vcgtq_u16(sd, ds); |
|
502 } else { |
|
503 cmp = vcltq_u16(sd, ds); |
|
504 } |
|
505 |
|
506 // Assign if |
|
507 tmp = vaddl_u8(sc, dc); |
|
508 tmp2 = tmp; |
|
509 tmp -= SkDiv255Round_neon8_16_16(ds); |
|
510 |
|
511 // Calc else |
|
512 tmp2 -= SkDiv255Round_neon8_16_16(sd); |
|
513 |
|
514 // Insert where needed |
|
515 tmp = vbslq_u16(cmp, tmp, tmp2); |
|
516 |
|
517 return vmovn_u16(tmp); |
|
518 } |
|
519 |
|
520 static inline uint8x8_t darken_color(uint8x8_t sc, uint8x8_t dc, |
|
521 uint8x8_t sa, uint8x8_t da) { |
|
522 return lighten_darken_color<false>(sc, dc, sa, da); |
|
523 } |
|
524 |
|
525 uint8x8x4_t darken_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) { |
|
526 uint8x8x4_t ret; |
|
527 |
|
528 ret.val[NEON_A] = srcover_color(src.val[NEON_A], dst.val[NEON_A]); |
|
529 ret.val[NEON_R] = darken_color(src.val[NEON_R], dst.val[NEON_R], |
|
530 src.val[NEON_A], dst.val[NEON_A]); |
|
531 ret.val[NEON_G] = darken_color(src.val[NEON_G], dst.val[NEON_G], |
|
532 src.val[NEON_A], dst.val[NEON_A]); |
|
533 ret.val[NEON_B] = darken_color(src.val[NEON_B], dst.val[NEON_B], |
|
534 src.val[NEON_A], dst.val[NEON_A]); |
|
535 |
|
536 return ret; |
|
537 } |
|
538 |
|
539 static inline uint8x8_t lighten_color(uint8x8_t sc, uint8x8_t dc, |
|
540 uint8x8_t sa, uint8x8_t da) { |
|
541 return lighten_darken_color<true>(sc, dc, sa, da); |
|
542 } |
|
543 |
|
544 uint8x8x4_t lighten_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) { |
|
545 uint8x8x4_t ret; |
|
546 |
|
547 ret.val[NEON_A] = srcover_color(src.val[NEON_A], dst.val[NEON_A]); |
|
548 ret.val[NEON_R] = lighten_color(src.val[NEON_R], dst.val[NEON_R], |
|
549 src.val[NEON_A], dst.val[NEON_A]); |
|
550 ret.val[NEON_G] = lighten_color(src.val[NEON_G], dst.val[NEON_G], |
|
551 src.val[NEON_A], dst.val[NEON_A]); |
|
552 ret.val[NEON_B] = lighten_color(src.val[NEON_B], dst.val[NEON_B], |
|
553 src.val[NEON_A], dst.val[NEON_A]); |
|
554 |
|
555 return ret; |
|
556 } |
|
557 |
|
558 static inline uint8x8_t hardlight_color(uint8x8_t sc, uint8x8_t dc, |
|
559 uint8x8_t sa, uint8x8_t da) { |
|
560 return overlay_hardlight_color<false>(sc, dc, sa, da); |
|
561 } |
|
562 |
|
563 uint8x8x4_t hardlight_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) { |
|
564 uint8x8x4_t ret; |
|
565 |
|
566 ret.val[NEON_A] = srcover_color(src.val[NEON_A], dst.val[NEON_A]); |
|
567 ret.val[NEON_R] = hardlight_color(src.val[NEON_R], dst.val[NEON_R], |
|
568 src.val[NEON_A], dst.val[NEON_A]); |
|
569 ret.val[NEON_G] = hardlight_color(src.val[NEON_G], dst.val[NEON_G], |
|
570 src.val[NEON_A], dst.val[NEON_A]); |
|
571 ret.val[NEON_B] = hardlight_color(src.val[NEON_B], dst.val[NEON_B], |
|
572 src.val[NEON_A], dst.val[NEON_A]); |
|
573 |
|
574 return ret; |
|
575 } |
|
576 |
|
577 static inline uint8x8_t difference_color(uint8x8_t sc, uint8x8_t dc, |
|
578 uint8x8_t sa, uint8x8_t da) { |
|
579 uint16x8_t sd, ds, tmp; |
|
580 int16x8_t val; |
|
581 |
|
582 sd = vmull_u8(sc, da); |
|
583 ds = vmull_u8(dc, sa); |
|
584 |
|
585 tmp = vminq_u16(sd, ds); |
|
586 tmp = SkDiv255Round_neon8_16_16(tmp); |
|
587 tmp = vshlq_n_u16(tmp, 1); |
|
588 |
|
589 val = vreinterpretq_s16_u16(vaddl_u8(sc, dc)); |
|
590 |
|
591 val -= vreinterpretq_s16_u16(tmp); |
|
592 |
|
593 val = vmaxq_s16(val, vdupq_n_s16(0)); |
|
594 val = vminq_s16(val, vdupq_n_s16(255)); |
|
595 |
|
596 return vmovn_u16(vreinterpretq_u16_s16(val)); |
|
597 } |
|
598 |
|
599 uint8x8x4_t difference_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) { |
|
600 uint8x8x4_t ret; |
|
601 |
|
602 ret.val[NEON_A] = srcover_color(src.val[NEON_A], dst.val[NEON_A]); |
|
603 ret.val[NEON_R] = difference_color(src.val[NEON_R], dst.val[NEON_R], |
|
604 src.val[NEON_A], dst.val[NEON_A]); |
|
605 ret.val[NEON_G] = difference_color(src.val[NEON_G], dst.val[NEON_G], |
|
606 src.val[NEON_A], dst.val[NEON_A]); |
|
607 ret.val[NEON_B] = difference_color(src.val[NEON_B], dst.val[NEON_B], |
|
608 src.val[NEON_A], dst.val[NEON_A]); |
|
609 |
|
610 return ret; |
|
611 } |
|
612 |
|
613 static inline uint8x8_t exclusion_color(uint8x8_t sc, uint8x8_t dc, |
|
614 uint8x8_t sa, uint8x8_t da) { |
|
615 /* The equation can be simplified to 255(sc + dc) - 2 * sc * dc */ |
|
616 |
|
617 uint16x8_t sc_plus_dc, scdc, const255; |
|
618 int32x4_t term1_1, term1_2, term2_1, term2_2; |
|
619 |
|
620 /* Calc (sc + dc) and (sc * dc) */ |
|
621 sc_plus_dc = vaddl_u8(sc, dc); |
|
622 scdc = vmull_u8(sc, dc); |
|
623 |
|
624 /* Prepare constants */ |
|
625 const255 = vdupq_n_u16(255); |
|
626 |
|
627 /* Calc the first term */ |
|
628 term1_1 = vreinterpretq_s32_u32( |
|
629 vmull_u16(vget_low_u16(const255), vget_low_u16(sc_plus_dc))); |
|
630 term1_2 = vreinterpretq_s32_u32( |
|
631 vmull_u16(vget_high_u16(const255), vget_high_u16(sc_plus_dc))); |
|
632 |
|
633 /* Calc the second term */ |
|
634 term2_1 = vreinterpretq_s32_u32(vshll_n_u16(vget_low_u16(scdc), 1)); |
|
635 term2_2 = vreinterpretq_s32_u32(vshll_n_u16(vget_high_u16(scdc), 1)); |
|
636 |
|
637 return clamp_div255round_simd8_32(term1_1 - term2_1, term1_2 - term2_2); |
|
638 } |
|
639 |
|
640 uint8x8x4_t exclusion_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) { |
|
641 uint8x8x4_t ret; |
|
642 |
|
643 ret.val[NEON_A] = srcover_color(src.val[NEON_A], dst.val[NEON_A]); |
|
644 ret.val[NEON_R] = exclusion_color(src.val[NEON_R], dst.val[NEON_R], |
|
645 src.val[NEON_A], dst.val[NEON_A]); |
|
646 ret.val[NEON_G] = exclusion_color(src.val[NEON_G], dst.val[NEON_G], |
|
647 src.val[NEON_A], dst.val[NEON_A]); |
|
648 ret.val[NEON_B] = exclusion_color(src.val[NEON_B], dst.val[NEON_B], |
|
649 src.val[NEON_A], dst.val[NEON_A]); |
|
650 |
|
651 return ret; |
|
652 } |
|
653 |
|
654 static inline uint8x8_t blendfunc_multiply_color(uint8x8_t sc, uint8x8_t dc, |
|
655 uint8x8_t sa, uint8x8_t da) { |
|
656 uint32x4_t val1, val2; |
|
657 uint16x8_t scdc, t1, t2; |
|
658 |
|
659 t1 = vmull_u8(sc, vdup_n_u8(255) - da); |
|
660 t2 = vmull_u8(dc, vdup_n_u8(255) - sa); |
|
661 scdc = vmull_u8(sc, dc); |
|
662 |
|
663 val1 = vaddl_u16(vget_low_u16(t1), vget_low_u16(t2)); |
|
664 val2 = vaddl_u16(vget_high_u16(t1), vget_high_u16(t2)); |
|
665 |
|
666 val1 = vaddw_u16(val1, vget_low_u16(scdc)); |
|
667 val2 = vaddw_u16(val2, vget_high_u16(scdc)); |
|
668 |
|
669 return clamp_div255round_simd8_32( |
|
670 vreinterpretq_s32_u32(val1), vreinterpretq_s32_u32(val2)); |
|
671 } |
|
672 |
|
673 uint8x8x4_t multiply_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) { |
|
674 uint8x8x4_t ret; |
|
675 |
|
676 ret.val[NEON_A] = srcover_color(src.val[NEON_A], dst.val[NEON_A]); |
|
677 ret.val[NEON_R] = blendfunc_multiply_color(src.val[NEON_R], dst.val[NEON_R], |
|
678 src.val[NEON_A], dst.val[NEON_A]); |
|
679 ret.val[NEON_G] = blendfunc_multiply_color(src.val[NEON_G], dst.val[NEON_G], |
|
680 src.val[NEON_A], dst.val[NEON_A]); |
|
681 ret.val[NEON_B] = blendfunc_multiply_color(src.val[NEON_B], dst.val[NEON_B], |
|
682 src.val[NEON_A], dst.val[NEON_A]); |
|
683 |
|
684 return ret; |
|
685 } |
|
686 |
|
687 //////////////////////////////////////////////////////////////////////////////// |
|
688 |
|
689 typedef uint8x8x4_t (*SkXfermodeProcSIMD)(uint8x8x4_t src, uint8x8x4_t dst); |
|
690 |
|
691 extern SkXfermodeProcSIMD gNEONXfermodeProcs[]; |
|
692 |
|
693 SkNEONProcCoeffXfermode::SkNEONProcCoeffXfermode(SkReadBuffer& buffer) |
|
694 : INHERITED(buffer) { |
|
695 fProcSIMD = reinterpret_cast<void*>(gNEONXfermodeProcs[this->getMode()]); |
|
696 } |
|
697 |
|
698 void SkNEONProcCoeffXfermode::xfer32(SkPMColor dst[], const SkPMColor src[], |
|
699 int count, const SkAlpha aa[]) const { |
|
700 SkASSERT(dst && src && count >= 0); |
|
701 |
|
702 SkXfermodeProc proc = this->getProc(); |
|
703 SkXfermodeProcSIMD procSIMD = reinterpret_cast<SkXfermodeProcSIMD>(fProcSIMD); |
|
704 SkASSERT(procSIMD != NULL); |
|
705 |
|
706 if (NULL == aa) { |
|
707 // Unrolled NEON code |
|
708 while (count >= 8) { |
|
709 uint8x8x4_t vsrc, vdst, vres; |
|
710 |
|
711 #if (__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 6)) |
|
712 asm volatile ( |
|
713 "vld4.u8 %h[vsrc], [%[src]]! \t\n" |
|
714 "vld4.u8 %h[vdst], [%[dst]] \t\n" |
|
715 : [vsrc] "=w" (vsrc), [vdst] "=w" (vdst), [src] "+&r" (src) |
|
716 : [dst] "r" (dst) |
|
717 : |
|
718 ); |
|
719 #else |
|
720 register uint8x8_t d0 asm("d0"); |
|
721 register uint8x8_t d1 asm("d1"); |
|
722 register uint8x8_t d2 asm("d2"); |
|
723 register uint8x8_t d3 asm("d3"); |
|
724 register uint8x8_t d4 asm("d4"); |
|
725 register uint8x8_t d5 asm("d5"); |
|
726 register uint8x8_t d6 asm("d6"); |
|
727 register uint8x8_t d7 asm("d7"); |
|
728 |
|
729 asm volatile ( |
|
730 "vld4.u8 {d0-d3},[%[src]]!;" |
|
731 "vld4.u8 {d4-d7},[%[dst]];" |
|
732 : "=w" (d0), "=w" (d1), "=w" (d2), "=w" (d3), |
|
733 "=w" (d4), "=w" (d5), "=w" (d6), "=w" (d7), |
|
734 [src] "+&r" (src) |
|
735 : [dst] "r" (dst) |
|
736 : |
|
737 ); |
|
738 vsrc.val[0] = d0; vdst.val[0] = d4; |
|
739 vsrc.val[1] = d1; vdst.val[1] = d5; |
|
740 vsrc.val[2] = d2; vdst.val[2] = d6; |
|
741 vsrc.val[3] = d3; vdst.val[3] = d7; |
|
742 #endif |
|
743 |
|
744 vres = procSIMD(vsrc, vdst); |
|
745 |
|
746 vst4_u8((uint8_t*)dst, vres); |
|
747 |
|
748 count -= 8; |
|
749 dst += 8; |
|
750 } |
|
751 // Leftovers |
|
752 for (int i = 0; i < count; i++) { |
|
753 dst[i] = proc(src[i], dst[i]); |
|
754 } |
|
755 } else { |
|
756 for (int i = count - 1; i >= 0; --i) { |
|
757 unsigned a = aa[i]; |
|
758 if (0 != a) { |
|
759 SkPMColor dstC = dst[i]; |
|
760 SkPMColor C = proc(src[i], dstC); |
|
761 if (a != 0xFF) { |
|
762 C = SkFourByteInterp_neon(C, dstC, a); |
|
763 } |
|
764 dst[i] = C; |
|
765 } |
|
766 } |
|
767 } |
|
768 } |
|
769 |
|
770 void SkNEONProcCoeffXfermode::xfer16(uint16_t* SK_RESTRICT dst, |
|
771 const SkPMColor* SK_RESTRICT src, int count, |
|
772 const SkAlpha* SK_RESTRICT aa) const { |
|
773 SkASSERT(dst && src && count >= 0); |
|
774 |
|
775 SkXfermodeProc proc = this->getProc(); |
|
776 SkXfermodeProcSIMD procSIMD = reinterpret_cast<SkXfermodeProcSIMD>(fProcSIMD); |
|
777 SkASSERT(procSIMD != NULL); |
|
778 |
|
779 if (NULL == aa) { |
|
780 while(count >= 8) { |
|
781 uint16x8_t vdst, vres16; |
|
782 uint8x8x4_t vdst32, vsrc, vres; |
|
783 |
|
784 vdst = vld1q_u16(dst); |
|
785 |
|
786 #if (__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 6)) |
|
787 asm volatile ( |
|
788 "vld4.u8 %h[vsrc], [%[src]]! \t\n" |
|
789 : [vsrc] "=w" (vsrc), [src] "+&r" (src) |
|
790 : : |
|
791 ); |
|
792 #else |
|
793 register uint8x8_t d0 asm("d0"); |
|
794 register uint8x8_t d1 asm("d1"); |
|
795 register uint8x8_t d2 asm("d2"); |
|
796 register uint8x8_t d3 asm("d3"); |
|
797 |
|
798 asm volatile ( |
|
799 "vld4.u8 {d0-d3},[%[src]]!;" |
|
800 : "=w" (d0), "=w" (d1), "=w" (d2), "=w" (d3), |
|
801 [src] "+&r" (src) |
|
802 : : |
|
803 ); |
|
804 vsrc.val[0] = d0; |
|
805 vsrc.val[1] = d1; |
|
806 vsrc.val[2] = d2; |
|
807 vsrc.val[3] = d3; |
|
808 #endif |
|
809 |
|
810 vdst32 = SkPixel16ToPixel32_neon8(vdst); |
|
811 vres = procSIMD(vsrc, vdst32); |
|
812 vres16 = SkPixel32ToPixel16_neon8(vres); |
|
813 |
|
814 vst1q_u16(dst, vres16); |
|
815 |
|
816 count -= 8; |
|
817 dst += 8; |
|
818 } |
|
819 for (int i = 0; i < count; i++) { |
|
820 SkPMColor dstC = SkPixel16ToPixel32(dst[i]); |
|
821 dst[i] = SkPixel32ToPixel16_ToU16(proc(src[i], dstC)); |
|
822 } |
|
823 } else { |
|
824 for (int i = count - 1; i >= 0; --i) { |
|
825 unsigned a = aa[i]; |
|
826 if (0 != a) { |
|
827 SkPMColor dstC = SkPixel16ToPixel32(dst[i]); |
|
828 SkPMColor C = proc(src[i], dstC); |
|
829 if (0xFF != a) { |
|
830 C = SkFourByteInterp_neon(C, dstC, a); |
|
831 } |
|
832 dst[i] = SkPixel32ToPixel16_ToU16(C); |
|
833 } |
|
834 } |
|
835 } |
|
836 } |
|
837 |
|
838 #ifndef SK_IGNORE_TO_STRING |
|
839 void SkNEONProcCoeffXfermode::toString(SkString* str) const { |
|
840 this->INHERITED::toString(str); |
|
841 } |
|
842 #endif |
|
843 |
|
844 //////////////////////////////////////////////////////////////////////////////// |
|
845 |
|
846 SkXfermodeProcSIMD gNEONXfermodeProcs[] = { |
|
847 NULL, // kClear_Mode |
|
848 NULL, // kSrc_Mode |
|
849 NULL, // kDst_Mode |
|
850 NULL, // kSrcOver_Mode |
|
851 dstover_modeproc_neon8, |
|
852 srcin_modeproc_neon8, |
|
853 dstin_modeproc_neon8, |
|
854 srcout_modeproc_neon8, |
|
855 dstout_modeproc_neon8, |
|
856 srcatop_modeproc_neon8, |
|
857 dstatop_modeproc_neon8, |
|
858 xor_modeproc_neon8, |
|
859 plus_modeproc_neon8, |
|
860 modulate_modeproc_neon8, |
|
861 screen_modeproc_neon8, |
|
862 |
|
863 overlay_modeproc_neon8, |
|
864 darken_modeproc_neon8, |
|
865 lighten_modeproc_neon8, |
|
866 NULL, // kColorDodge_Mode |
|
867 NULL, // kColorBurn_Mode |
|
868 hardlight_modeproc_neon8, |
|
869 NULL, // kSoftLight_Mode |
|
870 difference_modeproc_neon8, |
|
871 exclusion_modeproc_neon8, |
|
872 multiply_modeproc_neon8, |
|
873 |
|
874 NULL, // kHue_Mode |
|
875 NULL, // kSaturation_Mode |
|
876 NULL, // kColor_Mode |
|
877 NULL, // kLuminosity_Mode |
|
878 }; |
|
879 |
|
880 SK_COMPILE_ASSERT( |
|
881 SK_ARRAY_COUNT(gNEONXfermodeProcs) == SkXfermode::kLastMode + 1, |
|
882 mode_count_arm |
|
883 ); |
|
884 |
|
885 SkXfermodeProc gNEONXfermodeProcs1[] = { |
|
886 NULL, // kClear_Mode |
|
887 NULL, // kSrc_Mode |
|
888 NULL, // kDst_Mode |
|
889 NULL, // kSrcOver_Mode |
|
890 NULL, // kDstOver_Mode |
|
891 NULL, // kSrcIn_Mode |
|
892 NULL, // kDstIn_Mode |
|
893 NULL, // kSrcOut_Mode |
|
894 NULL, // kDstOut_Mode |
|
895 srcatop_modeproc_neon, |
|
896 dstatop_modeproc_neon, |
|
897 xor_modeproc_neon, |
|
898 plus_modeproc_neon, |
|
899 modulate_modeproc_neon, |
|
900 NULL, // kScreen_Mode |
|
901 |
|
902 NULL, // kOverlay_Mode |
|
903 NULL, // kDarken_Mode |
|
904 NULL, // kLighten_Mode |
|
905 NULL, // kColorDodge_Mode |
|
906 NULL, // kColorBurn_Mode |
|
907 NULL, // kHardLight_Mode |
|
908 NULL, // kSoftLight_Mode |
|
909 NULL, // kDifference_Mode |
|
910 NULL, // kExclusion_Mode |
|
911 NULL, // kMultiply_Mode |
|
912 |
|
913 NULL, // kHue_Mode |
|
914 NULL, // kSaturation_Mode |
|
915 NULL, // kColor_Mode |
|
916 NULL, // kLuminosity_Mode |
|
917 }; |
|
918 |
|
919 SK_COMPILE_ASSERT( |
|
920 SK_ARRAY_COUNT(gNEONXfermodeProcs1) == SkXfermode::kLastMode + 1, |
|
921 mode1_count_arm |
|
922 ); |
|
923 |
|
924 SkProcCoeffXfermode* SkPlatformXfermodeFactory_impl_neon(const ProcCoeff& rec, |
|
925 SkXfermode::Mode mode) { |
|
926 |
|
927 void* procSIMD = reinterpret_cast<void*>(gNEONXfermodeProcs[mode]); |
|
928 |
|
929 if (procSIMD != NULL) { |
|
930 return SkNEW_ARGS(SkNEONProcCoeffXfermode, (rec, mode, procSIMD)); |
|
931 } |
|
932 return NULL; |
|
933 } |
|
934 |
|
935 SkXfermodeProc SkPlatformXfermodeProcFactory_impl_neon(SkXfermode::Mode mode) { |
|
936 return gNEONXfermodeProcs1[mode]; |
|
937 } |