gfx/skia/trunk/src/opts/SkBlitRow_opts_arm_neon.cpp

Sat, 03 Jan 2015 20:18:00 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Sat, 03 Jan 2015 20:18:00 +0100
branch
TOR_BUG_3246
changeset 7
129ffea94266
permissions
-rw-r--r--

Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.

michael@0 1 /*
michael@0 2 * Copyright 2012 The Android Open Source Project
michael@0 3 *
michael@0 4 * Use of this source code is governed by a BSD-style license that can be
michael@0 5 * found in the LICENSE file.
michael@0 6 */
michael@0 7
michael@0 8 #include "SkBlitRow_opts_arm_neon.h"
michael@0 9
michael@0 10 #include "SkBlitMask.h"
michael@0 11 #include "SkBlitRow.h"
michael@0 12 #include "SkColorPriv.h"
michael@0 13 #include "SkDither.h"
michael@0 14 #include "SkMathPriv.h"
michael@0 15 #include "SkUtils.h"
michael@0 16
michael@0 17 #include "SkCachePreload_arm.h"
michael@0 18 #include "SkColor_opts_neon.h"
michael@0 19 #include <arm_neon.h>
michael@0 20
michael@0 21 void S32_D565_Opaque_neon(uint16_t* SK_RESTRICT dst,
michael@0 22 const SkPMColor* SK_RESTRICT src, int count,
michael@0 23 U8CPU alpha, int /*x*/, int /*y*/) {
michael@0 24 SkASSERT(255 == alpha);
michael@0 25
michael@0 26 while (count >= 8) {
michael@0 27 uint8x8x4_t vsrc;
michael@0 28 uint16x8_t vdst;
michael@0 29
michael@0 30 // Load
michael@0 31 vsrc = vld4_u8((uint8_t*)src);
michael@0 32
michael@0 33 // Convert src to 565
michael@0 34 vdst = SkPixel32ToPixel16_neon8(vsrc);
michael@0 35
michael@0 36 // Store
michael@0 37 vst1q_u16(dst, vdst);
michael@0 38
michael@0 39 // Prepare next iteration
michael@0 40 dst += 8;
michael@0 41 src += 8;
michael@0 42 count -= 8;
michael@0 43 };
michael@0 44
michael@0 45 // Leftovers
michael@0 46 while (count > 0) {
michael@0 47 SkPMColor c = *src++;
michael@0 48 SkPMColorAssert(c);
michael@0 49 *dst = SkPixel32ToPixel16_ToU16(c);
michael@0 50 dst++;
michael@0 51 count--;
michael@0 52 };
michael@0 53 }
michael@0 54
michael@0 55 void S32A_D565_Opaque_neon(uint16_t* SK_RESTRICT dst,
michael@0 56 const SkPMColor* SK_RESTRICT src, int count,
michael@0 57 U8CPU alpha, int /*x*/, int /*y*/) {
michael@0 58 SkASSERT(255 == alpha);
michael@0 59
michael@0 60 if (count >= 8) {
michael@0 61 uint16_t* SK_RESTRICT keep_dst = 0;
michael@0 62
michael@0 63 asm volatile (
michael@0 64 "ands ip, %[count], #7 \n\t"
michael@0 65 "vmov.u8 d31, #1<<7 \n\t"
michael@0 66 "vld1.16 {q12}, [%[dst]] \n\t"
michael@0 67 "vld4.8 {d0-d3}, [%[src]] \n\t"
michael@0 68 // Thumb does not support the standard ARM conditional
michael@0 69 // instructions but instead requires the 'it' instruction
michael@0 70 // to signal conditional execution
michael@0 71 "it eq \n\t"
michael@0 72 "moveq ip, #8 \n\t"
michael@0 73 "mov %[keep_dst], %[dst] \n\t"
michael@0 74
michael@0 75 "add %[src], %[src], ip, LSL#2 \n\t"
michael@0 76 "add %[dst], %[dst], ip, LSL#1 \n\t"
michael@0 77 "subs %[count], %[count], ip \n\t"
michael@0 78 "b 9f \n\t"
michael@0 79 // LOOP
michael@0 80 "2: \n\t"
michael@0 81
michael@0 82 "vld1.16 {q12}, [%[dst]]! \n\t"
michael@0 83 "vld4.8 {d0-d3}, [%[src]]! \n\t"
michael@0 84 "vst1.16 {q10}, [%[keep_dst]] \n\t"
michael@0 85 "sub %[keep_dst], %[dst], #8*2 \n\t"
michael@0 86 "subs %[count], %[count], #8 \n\t"
michael@0 87 "9: \n\t"
michael@0 88 "pld [%[dst],#32] \n\t"
michael@0 89 // expand 0565 q12 to 8888 {d4-d7}
michael@0 90 "vmovn.u16 d4, q12 \n\t"
michael@0 91 "vshr.u16 q11, q12, #5 \n\t"
michael@0 92 "vshr.u16 q10, q12, #6+5 \n\t"
michael@0 93 "vmovn.u16 d5, q11 \n\t"
michael@0 94 "vmovn.u16 d6, q10 \n\t"
michael@0 95 "vshl.u8 d4, d4, #3 \n\t"
michael@0 96 "vshl.u8 d5, d5, #2 \n\t"
michael@0 97 "vshl.u8 d6, d6, #3 \n\t"
michael@0 98
michael@0 99 "vmovl.u8 q14, d31 \n\t"
michael@0 100 "vmovl.u8 q13, d31 \n\t"
michael@0 101 "vmovl.u8 q12, d31 \n\t"
michael@0 102
michael@0 103 // duplicate in 4/2/1 & 8pix vsns
michael@0 104 "vmvn.8 d30, d3 \n\t"
michael@0 105 "vmlal.u8 q14, d30, d6 \n\t"
michael@0 106 "vmlal.u8 q13, d30, d5 \n\t"
michael@0 107 "vmlal.u8 q12, d30, d4 \n\t"
michael@0 108 "vshr.u16 q8, q14, #5 \n\t"
michael@0 109 "vshr.u16 q9, q13, #6 \n\t"
michael@0 110 "vaddhn.u16 d6, q14, q8 \n\t"
michael@0 111 "vshr.u16 q8, q12, #5 \n\t"
michael@0 112 "vaddhn.u16 d5, q13, q9 \n\t"
michael@0 113 "vqadd.u8 d6, d6, d0 \n\t" // moved up
michael@0 114 "vaddhn.u16 d4, q12, q8 \n\t"
michael@0 115 // intentionally don't calculate alpha
michael@0 116 // result in d4-d6
michael@0 117
michael@0 118 "vqadd.u8 d5, d5, d1 \n\t"
michael@0 119 "vqadd.u8 d4, d4, d2 \n\t"
michael@0 120
michael@0 121 // pack 8888 {d4-d6} to 0565 q10
michael@0 122 "vshll.u8 q10, d6, #8 \n\t"
michael@0 123 "vshll.u8 q3, d5, #8 \n\t"
michael@0 124 "vshll.u8 q2, d4, #8 \n\t"
michael@0 125 "vsri.u16 q10, q3, #5 \n\t"
michael@0 126 "vsri.u16 q10, q2, #11 \n\t"
michael@0 127
michael@0 128 "bne 2b \n\t"
michael@0 129
michael@0 130 "1: \n\t"
michael@0 131 "vst1.16 {q10}, [%[keep_dst]] \n\t"
michael@0 132 : [count] "+r" (count)
michael@0 133 : [dst] "r" (dst), [keep_dst] "r" (keep_dst), [src] "r" (src)
michael@0 134 : "ip", "cc", "memory", "d0","d1","d2","d3","d4","d5","d6","d7",
michael@0 135 "d16","d17","d18","d19","d20","d21","d22","d23","d24","d25","d26","d27","d28","d29",
michael@0 136 "d30","d31"
michael@0 137 );
michael@0 138 }
michael@0 139 else
michael@0 140 { // handle count < 8
michael@0 141 uint16_t* SK_RESTRICT keep_dst = 0;
michael@0 142
michael@0 143 asm volatile (
michael@0 144 "vmov.u8 d31, #1<<7 \n\t"
michael@0 145 "mov %[keep_dst], %[dst] \n\t"
michael@0 146
michael@0 147 "tst %[count], #4 \n\t"
michael@0 148 "beq 14f \n\t"
michael@0 149 "vld1.16 {d25}, [%[dst]]! \n\t"
michael@0 150 "vld1.32 {q1}, [%[src]]! \n\t"
michael@0 151
michael@0 152 "14: \n\t"
michael@0 153 "tst %[count], #2 \n\t"
michael@0 154 "beq 12f \n\t"
michael@0 155 "vld1.32 {d24[1]}, [%[dst]]! \n\t"
michael@0 156 "vld1.32 {d1}, [%[src]]! \n\t"
michael@0 157
michael@0 158 "12: \n\t"
michael@0 159 "tst %[count], #1 \n\t"
michael@0 160 "beq 11f \n\t"
michael@0 161 "vld1.16 {d24[1]}, [%[dst]]! \n\t"
michael@0 162 "vld1.32 {d0[1]}, [%[src]]! \n\t"
michael@0 163
michael@0 164 "11: \n\t"
michael@0 165 // unzips achieve the same as a vld4 operation
michael@0 166 "vuzpq.u16 q0, q1 \n\t"
michael@0 167 "vuzp.u8 d0, d1 \n\t"
michael@0 168 "vuzp.u8 d2, d3 \n\t"
michael@0 169 // expand 0565 q12 to 8888 {d4-d7}
michael@0 170 "vmovn.u16 d4, q12 \n\t"
michael@0 171 "vshr.u16 q11, q12, #5 \n\t"
michael@0 172 "vshr.u16 q10, q12, #6+5 \n\t"
michael@0 173 "vmovn.u16 d5, q11 \n\t"
michael@0 174 "vmovn.u16 d6, q10 \n\t"
michael@0 175 "vshl.u8 d4, d4, #3 \n\t"
michael@0 176 "vshl.u8 d5, d5, #2 \n\t"
michael@0 177 "vshl.u8 d6, d6, #3 \n\t"
michael@0 178
michael@0 179 "vmovl.u8 q14, d31 \n\t"
michael@0 180 "vmovl.u8 q13, d31 \n\t"
michael@0 181 "vmovl.u8 q12, d31 \n\t"
michael@0 182
michael@0 183 // duplicate in 4/2/1 & 8pix vsns
michael@0 184 "vmvn.8 d30, d3 \n\t"
michael@0 185 "vmlal.u8 q14, d30, d6 \n\t"
michael@0 186 "vmlal.u8 q13, d30, d5 \n\t"
michael@0 187 "vmlal.u8 q12, d30, d4 \n\t"
michael@0 188 "vshr.u16 q8, q14, #5 \n\t"
michael@0 189 "vshr.u16 q9, q13, #6 \n\t"
michael@0 190 "vaddhn.u16 d6, q14, q8 \n\t"
michael@0 191 "vshr.u16 q8, q12, #5 \n\t"
michael@0 192 "vaddhn.u16 d5, q13, q9 \n\t"
michael@0 193 "vqadd.u8 d6, d6, d0 \n\t" // moved up
michael@0 194 "vaddhn.u16 d4, q12, q8 \n\t"
michael@0 195 // intentionally don't calculate alpha
michael@0 196 // result in d4-d6
michael@0 197
michael@0 198 "vqadd.u8 d5, d5, d1 \n\t"
michael@0 199 "vqadd.u8 d4, d4, d2 \n\t"
michael@0 200
michael@0 201 // pack 8888 {d4-d6} to 0565 q10
michael@0 202 "vshll.u8 q10, d6, #8 \n\t"
michael@0 203 "vshll.u8 q3, d5, #8 \n\t"
michael@0 204 "vshll.u8 q2, d4, #8 \n\t"
michael@0 205 "vsri.u16 q10, q3, #5 \n\t"
michael@0 206 "vsri.u16 q10, q2, #11 \n\t"
michael@0 207
michael@0 208 // store
michael@0 209 "tst %[count], #4 \n\t"
michael@0 210 "beq 24f \n\t"
michael@0 211 "vst1.16 {d21}, [%[keep_dst]]! \n\t"
michael@0 212
michael@0 213 "24: \n\t"
michael@0 214 "tst %[count], #2 \n\t"
michael@0 215 "beq 22f \n\t"
michael@0 216 "vst1.32 {d20[1]}, [%[keep_dst]]! \n\t"
michael@0 217
michael@0 218 "22: \n\t"
michael@0 219 "tst %[count], #1 \n\t"
michael@0 220 "beq 21f \n\t"
michael@0 221 "vst1.16 {d20[1]}, [%[keep_dst]]! \n\t"
michael@0 222
michael@0 223 "21: \n\t"
michael@0 224 : [count] "+r" (count)
michael@0 225 : [dst] "r" (dst), [keep_dst] "r" (keep_dst), [src] "r" (src)
michael@0 226 : "ip", "cc", "memory", "d0","d1","d2","d3","d4","d5","d6","d7",
michael@0 227 "d16","d17","d18","d19","d20","d21","d22","d23","d24","d25","d26","d27","d28","d29",
michael@0 228 "d30","d31"
michael@0 229 );
michael@0 230 }
michael@0 231 }
michael@0 232
michael@0 233 static inline uint16x8_t SkDiv255Round_neon8(uint16x8_t prod) {
michael@0 234 prod += vdupq_n_u16(128);
michael@0 235 prod += vshrq_n_u16(prod, 8);
michael@0 236 return vshrq_n_u16(prod, 8);
michael@0 237 }
michael@0 238
michael@0 239 void S32A_D565_Blend_neon(uint16_t* SK_RESTRICT dst,
michael@0 240 const SkPMColor* SK_RESTRICT src, int count,
michael@0 241 U8CPU alpha, int /*x*/, int /*y*/) {
michael@0 242 SkASSERT(255 > alpha);
michael@0 243
michael@0 244 /* This code implements a Neon version of S32A_D565_Blend. The results have
michael@0 245 * a few mismatches compared to the original code. These mismatches never
michael@0 246 * exceed 1.
michael@0 247 */
michael@0 248
michael@0 249 if (count >= 8) {
michael@0 250 uint16x8_t valpha_max, vmask_blue;
michael@0 251 uint8x8_t valpha;
michael@0 252
michael@0 253 // prepare constants
michael@0 254 valpha_max = vmovq_n_u16(255);
michael@0 255 valpha = vdup_n_u8(alpha);
michael@0 256 vmask_blue = vmovq_n_u16(SK_B16_MASK);
michael@0 257
michael@0 258 do {
michael@0 259 uint16x8_t vdst, vdst_r, vdst_g, vdst_b;
michael@0 260 uint16x8_t vres_a, vres_r, vres_g, vres_b;
michael@0 261 uint8x8x4_t vsrc;
michael@0 262
michael@0 263 // load pixels
michael@0 264 vdst = vld1q_u16(dst);
michael@0 265 #if (__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 6))
michael@0 266 asm (
michael@0 267 "vld4.u8 %h[vsrc], [%[src]]!"
michael@0 268 : [vsrc] "=w" (vsrc), [src] "+&r" (src)
michael@0 269 : :
michael@0 270 );
michael@0 271 #else
michael@0 272 register uint8x8_t d0 asm("d0");
michael@0 273 register uint8x8_t d1 asm("d1");
michael@0 274 register uint8x8_t d2 asm("d2");
michael@0 275 register uint8x8_t d3 asm("d3");
michael@0 276
michael@0 277 asm volatile (
michael@0 278 "vld4.u8 {d0-d3},[%[src]]!;"
michael@0 279 : "=w" (d0), "=w" (d1), "=w" (d2), "=w" (d3),
michael@0 280 [src] "+&r" (src)
michael@0 281 : :
michael@0 282 );
michael@0 283 vsrc.val[0] = d0;
michael@0 284 vsrc.val[1] = d1;
michael@0 285 vsrc.val[2] = d2;
michael@0 286 vsrc.val[3] = d3;
michael@0 287 #endif
michael@0 288
michael@0 289
michael@0 290 // deinterleave dst
michael@0 291 vdst_g = vshlq_n_u16(vdst, SK_R16_BITS); // shift green to top of lanes
michael@0 292 vdst_b = vdst & vmask_blue; // extract blue
michael@0 293 vdst_r = vshrq_n_u16(vdst, SK_R16_SHIFT); // extract red
michael@0 294 vdst_g = vshrq_n_u16(vdst_g, SK_R16_BITS + SK_B16_BITS); // extract green
michael@0 295
michael@0 296 // shift src to 565
michael@0 297 vsrc.val[NEON_R] = vshr_n_u8(vsrc.val[NEON_R], 8 - SK_R16_BITS);
michael@0 298 vsrc.val[NEON_G] = vshr_n_u8(vsrc.val[NEON_G], 8 - SK_G16_BITS);
michael@0 299 vsrc.val[NEON_B] = vshr_n_u8(vsrc.val[NEON_B], 8 - SK_B16_BITS);
michael@0 300
michael@0 301 // calc src * src_scale
michael@0 302 vres_a = vmull_u8(vsrc.val[NEON_A], valpha);
michael@0 303 vres_r = vmull_u8(vsrc.val[NEON_R], valpha);
michael@0 304 vres_g = vmull_u8(vsrc.val[NEON_G], valpha);
michael@0 305 vres_b = vmull_u8(vsrc.val[NEON_B], valpha);
michael@0 306
michael@0 307 // prepare dst_scale
michael@0 308 vres_a = SkDiv255Round_neon8(vres_a);
michael@0 309 vres_a = valpha_max - vres_a; // 255 - (sa * src_scale) / 255
michael@0 310
michael@0 311 // add dst * dst_scale to previous result
michael@0 312 vres_r = vmlaq_u16(vres_r, vdst_r, vres_a);
michael@0 313 vres_g = vmlaq_u16(vres_g, vdst_g, vres_a);
michael@0 314 vres_b = vmlaq_u16(vres_b, vdst_b, vres_a);
michael@0 315
michael@0 316 #ifdef S32A_D565_BLEND_EXACT
michael@0 317 // It is possible to get exact results with this but it is slow,
michael@0 318 // even slower than C code in some cases
michael@0 319 vres_r = SkDiv255Round_neon8(vres_r);
michael@0 320 vres_g = SkDiv255Round_neon8(vres_g);
michael@0 321 vres_b = SkDiv255Round_neon8(vres_b);
michael@0 322 #else
michael@0 323 vres_r = vrshrq_n_u16(vres_r, 8);
michael@0 324 vres_g = vrshrq_n_u16(vres_g, 8);
michael@0 325 vres_b = vrshrq_n_u16(vres_b, 8);
michael@0 326 #endif
michael@0 327 // pack result
michael@0 328 vres_b = vsliq_n_u16(vres_b, vres_g, SK_G16_SHIFT); // insert green into blue
michael@0 329 vres_b = vsliq_n_u16(vres_b, vres_r, SK_R16_SHIFT); // insert red into green/blue
michael@0 330
michael@0 331 // store
michael@0 332 vst1q_u16(dst, vres_b);
michael@0 333 dst += 8;
michael@0 334 count -= 8;
michael@0 335 } while (count >= 8);
michael@0 336 }
michael@0 337
michael@0 338 // leftovers
michael@0 339 while (count-- > 0) {
michael@0 340 SkPMColor sc = *src++;
michael@0 341 if (sc) {
michael@0 342 uint16_t dc = *dst;
michael@0 343 unsigned dst_scale = 255 - SkMulDiv255Round(SkGetPackedA32(sc), alpha);
michael@0 344 unsigned dr = SkMulS16(SkPacked32ToR16(sc), alpha) + SkMulS16(SkGetPackedR16(dc), dst_scale);
michael@0 345 unsigned dg = SkMulS16(SkPacked32ToG16(sc), alpha) + SkMulS16(SkGetPackedG16(dc), dst_scale);
michael@0 346 unsigned db = SkMulS16(SkPacked32ToB16(sc), alpha) + SkMulS16(SkGetPackedB16(dc), dst_scale);
michael@0 347 *dst = SkPackRGB16(SkDiv255Round(dr), SkDiv255Round(dg), SkDiv255Round(db));
michael@0 348 }
michael@0 349 dst += 1;
michael@0 350 }
michael@0 351 }
michael@0 352
michael@0 353 /* dither matrix for Neon, derived from gDitherMatrix_3Bit_16.
michael@0 354 * each dither value is spaced out into byte lanes, and repeated
michael@0 355 * to allow an 8-byte load from offsets 0, 1, 2 or 3 from the
michael@0 356 * start of each row.
michael@0 357 */
michael@0 358 static const uint8_t gDitherMatrix_Neon[48] = {
michael@0 359 0, 4, 1, 5, 0, 4, 1, 5, 0, 4, 1, 5,
michael@0 360 6, 2, 7, 3, 6, 2, 7, 3, 6, 2, 7, 3,
michael@0 361 1, 5, 0, 4, 1, 5, 0, 4, 1, 5, 0, 4,
michael@0 362 7, 3, 6, 2, 7, 3, 6, 2, 7, 3, 6, 2,
michael@0 363
michael@0 364 };
michael@0 365
michael@0 366 void S32_D565_Blend_Dither_neon(uint16_t *dst, const SkPMColor *src,
michael@0 367 int count, U8CPU alpha, int x, int y)
michael@0 368 {
michael@0 369
michael@0 370 SkASSERT(255 > alpha);
michael@0 371
michael@0 372 // rescale alpha to range 1 - 256
michael@0 373 int scale = SkAlpha255To256(alpha);
michael@0 374
michael@0 375 if (count >= 8) {
michael@0 376 /* select row and offset for dither array */
michael@0 377 const uint8_t *dstart = &gDitherMatrix_Neon[(y&3)*12 + (x&3)];
michael@0 378
michael@0 379 uint8x8_t vdither = vld1_u8(dstart); // load dither values
michael@0 380 uint8x8_t vdither_g = vshr_n_u8(vdither, 1); // calc. green dither values
michael@0 381
michael@0 382 int16x8_t vscale = vdupq_n_s16(scale); // duplicate scale into neon reg
michael@0 383 uint16x8_t vmask_b = vdupq_n_u16(0x1F); // set up blue mask
michael@0 384
michael@0 385 do {
michael@0 386
michael@0 387 uint8x8_t vsrc_r, vsrc_g, vsrc_b;
michael@0 388 uint8x8_t vsrc565_r, vsrc565_g, vsrc565_b;
michael@0 389 uint16x8_t vsrc_dit_r, vsrc_dit_g, vsrc_dit_b;
michael@0 390 uint16x8_t vsrc_res_r, vsrc_res_g, vsrc_res_b;
michael@0 391 uint16x8_t vdst;
michael@0 392 uint16x8_t vdst_r, vdst_g, vdst_b;
michael@0 393 int16x8_t vres_r, vres_g, vres_b;
michael@0 394 int8x8_t vres8_r, vres8_g, vres8_b;
michael@0 395
michael@0 396 // Load source and add dither
michael@0 397 {
michael@0 398 register uint8x8_t d0 asm("d0");
michael@0 399 register uint8x8_t d1 asm("d1");
michael@0 400 register uint8x8_t d2 asm("d2");
michael@0 401 register uint8x8_t d3 asm("d3");
michael@0 402
michael@0 403 asm (
michael@0 404 "vld4.8 {d0-d3},[%[src]]! /* r=%P0 g=%P1 b=%P2 a=%P3 */"
michael@0 405 : "=w" (d0), "=w" (d1), "=w" (d2), "=w" (d3), [src] "+&r" (src)
michael@0 406 :
michael@0 407 );
michael@0 408 vsrc_g = d1;
michael@0 409 #if SK_PMCOLOR_BYTE_ORDER(B,G,R,A)
michael@0 410 vsrc_r = d2; vsrc_b = d0;
michael@0 411 #elif SK_PMCOLOR_BYTE_ORDER(R,G,B,A)
michael@0 412 vsrc_r = d0; vsrc_b = d2;
michael@0 413 #endif
michael@0 414 }
michael@0 415
michael@0 416 vsrc565_g = vshr_n_u8(vsrc_g, 6); // calc. green >> 6
michael@0 417 vsrc565_r = vshr_n_u8(vsrc_r, 5); // calc. red >> 5
michael@0 418 vsrc565_b = vshr_n_u8(vsrc_b, 5); // calc. blue >> 5
michael@0 419
michael@0 420 vsrc_dit_g = vaddl_u8(vsrc_g, vdither_g); // add in dither to green and widen
michael@0 421 vsrc_dit_r = vaddl_u8(vsrc_r, vdither); // add in dither to red and widen
michael@0 422 vsrc_dit_b = vaddl_u8(vsrc_b, vdither); // add in dither to blue and widen
michael@0 423
michael@0 424 vsrc_dit_r = vsubw_u8(vsrc_dit_r, vsrc565_r); // sub shifted red from result
michael@0 425 vsrc_dit_g = vsubw_u8(vsrc_dit_g, vsrc565_g); // sub shifted green from result
michael@0 426 vsrc_dit_b = vsubw_u8(vsrc_dit_b, vsrc565_b); // sub shifted blue from result
michael@0 427
michael@0 428 vsrc_res_r = vshrq_n_u16(vsrc_dit_r, 3);
michael@0 429 vsrc_res_g = vshrq_n_u16(vsrc_dit_g, 2);
michael@0 430 vsrc_res_b = vshrq_n_u16(vsrc_dit_b, 3);
michael@0 431
michael@0 432 // Load dst and unpack
michael@0 433 vdst = vld1q_u16(dst);
michael@0 434 vdst_g = vshrq_n_u16(vdst, 5); // shift down to get green
michael@0 435 vdst_r = vshrq_n_u16(vshlq_n_u16(vdst, 5), 5+5); // double shift to extract red
michael@0 436 vdst_b = vandq_u16(vdst, vmask_b); // mask to get blue
michael@0 437
michael@0 438 // subtract dst from src and widen
michael@0 439 vres_r = vsubq_s16(vreinterpretq_s16_u16(vsrc_res_r), vreinterpretq_s16_u16(vdst_r));
michael@0 440 vres_g = vsubq_s16(vreinterpretq_s16_u16(vsrc_res_g), vreinterpretq_s16_u16(vdst_g));
michael@0 441 vres_b = vsubq_s16(vreinterpretq_s16_u16(vsrc_res_b), vreinterpretq_s16_u16(vdst_b));
michael@0 442
michael@0 443 // multiply diffs by scale and shift
michael@0 444 vres_r = vmulq_s16(vres_r, vscale);
michael@0 445 vres_g = vmulq_s16(vres_g, vscale);
michael@0 446 vres_b = vmulq_s16(vres_b, vscale);
michael@0 447
michael@0 448 vres8_r = vshrn_n_s16(vres_r, 8);
michael@0 449 vres8_g = vshrn_n_s16(vres_g, 8);
michael@0 450 vres8_b = vshrn_n_s16(vres_b, 8);
michael@0 451
michael@0 452 // add dst to result
michael@0 453 vres_r = vaddw_s8(vreinterpretq_s16_u16(vdst_r), vres8_r);
michael@0 454 vres_g = vaddw_s8(vreinterpretq_s16_u16(vdst_g), vres8_g);
michael@0 455 vres_b = vaddw_s8(vreinterpretq_s16_u16(vdst_b), vres8_b);
michael@0 456
michael@0 457 // put result into 565 format
michael@0 458 vres_b = vsliq_n_s16(vres_b, vres_g, 5); // shift up green and insert into blue
michael@0 459 vres_b = vsliq_n_s16(vres_b, vres_r, 6+5); // shift up red and insert into blue
michael@0 460
michael@0 461 // Store result
michael@0 462 vst1q_u16(dst, vreinterpretq_u16_s16(vres_b));
michael@0 463
michael@0 464 // Next iteration
michael@0 465 dst += 8;
michael@0 466 count -= 8;
michael@0 467
michael@0 468 } while (count >= 8);
michael@0 469 }
michael@0 470
michael@0 471 // Leftovers
michael@0 472 if (count > 0) {
michael@0 473 int scale = SkAlpha255To256(alpha);
michael@0 474 DITHER_565_SCAN(y);
michael@0 475 do {
michael@0 476 SkPMColor c = *src++;
michael@0 477 SkPMColorAssert(c);
michael@0 478
michael@0 479 int dither = DITHER_VALUE(x);
michael@0 480 int sr = SkGetPackedR32(c);
michael@0 481 int sg = SkGetPackedG32(c);
michael@0 482 int sb = SkGetPackedB32(c);
michael@0 483 sr = SkDITHER_R32To565(sr, dither);
michael@0 484 sg = SkDITHER_G32To565(sg, dither);
michael@0 485 sb = SkDITHER_B32To565(sb, dither);
michael@0 486
michael@0 487 uint16_t d = *dst;
michael@0 488 *dst++ = SkPackRGB16(SkAlphaBlend(sr, SkGetPackedR16(d), scale),
michael@0 489 SkAlphaBlend(sg, SkGetPackedG16(d), scale),
michael@0 490 SkAlphaBlend(sb, SkGetPackedB16(d), scale));
michael@0 491 DITHER_INC_X(x);
michael@0 492 } while (--count != 0);
michael@0 493 }
michael@0 494 }
michael@0 495
michael@0 496 void S32A_Opaque_BlitRow32_neon(SkPMColor* SK_RESTRICT dst,
michael@0 497 const SkPMColor* SK_RESTRICT src,
michael@0 498 int count, U8CPU alpha) {
michael@0 499
michael@0 500 SkASSERT(255 == alpha);
michael@0 501 if (count > 0) {
michael@0 502
michael@0 503
michael@0 504 uint8x8_t alpha_mask;
michael@0 505
michael@0 506 static const uint8_t alpha_mask_setup[] = {3,3,3,3,7,7,7,7};
michael@0 507 alpha_mask = vld1_u8(alpha_mask_setup);
michael@0 508
michael@0 509 /* do the NEON unrolled code */
michael@0 510 #define UNROLL 4
michael@0 511 while (count >= UNROLL) {
michael@0 512 uint8x8_t src_raw, dst_raw, dst_final;
michael@0 513 uint8x8_t src_raw_2, dst_raw_2, dst_final_2;
michael@0 514
michael@0 515 /* The two prefetches below may make the code slighlty
michael@0 516 * slower for small values of count but are worth having
michael@0 517 * in the general case.
michael@0 518 */
michael@0 519 __builtin_prefetch(src+32);
michael@0 520 __builtin_prefetch(dst+32);
michael@0 521
michael@0 522 /* get the source */
michael@0 523 src_raw = vreinterpret_u8_u32(vld1_u32(src));
michael@0 524 #if UNROLL > 2
michael@0 525 src_raw_2 = vreinterpret_u8_u32(vld1_u32(src+2));
michael@0 526 #endif
michael@0 527
michael@0 528 /* get and hold the dst too */
michael@0 529 dst_raw = vreinterpret_u8_u32(vld1_u32(dst));
michael@0 530 #if UNROLL > 2
michael@0 531 dst_raw_2 = vreinterpret_u8_u32(vld1_u32(dst+2));
michael@0 532 #endif
michael@0 533
michael@0 534 /* 1st and 2nd bits of the unrolling */
michael@0 535 {
michael@0 536 uint8x8_t dst_cooked;
michael@0 537 uint16x8_t dst_wide;
michael@0 538 uint8x8_t alpha_narrow;
michael@0 539 uint16x8_t alpha_wide;
michael@0 540
michael@0 541 /* get the alphas spread out properly */
michael@0 542 alpha_narrow = vtbl1_u8(src_raw, alpha_mask);
michael@0 543 alpha_wide = vsubw_u8(vdupq_n_u16(256), alpha_narrow);
michael@0 544
michael@0 545 /* spread the dest */
michael@0 546 dst_wide = vmovl_u8(dst_raw);
michael@0 547
michael@0 548 /* alpha mul the dest */
michael@0 549 dst_wide = vmulq_u16 (dst_wide, alpha_wide);
michael@0 550 dst_cooked = vshrn_n_u16(dst_wide, 8);
michael@0 551
michael@0 552 /* sum -- ignoring any byte lane overflows */
michael@0 553 dst_final = vadd_u8(src_raw, dst_cooked);
michael@0 554 }
michael@0 555
michael@0 556 #if UNROLL > 2
michael@0 557 /* the 3rd and 4th bits of our unrolling */
michael@0 558 {
michael@0 559 uint8x8_t dst_cooked;
michael@0 560 uint16x8_t dst_wide;
michael@0 561 uint8x8_t alpha_narrow;
michael@0 562 uint16x8_t alpha_wide;
michael@0 563
michael@0 564 alpha_narrow = vtbl1_u8(src_raw_2, alpha_mask);
michael@0 565 alpha_wide = vsubw_u8(vdupq_n_u16(256), alpha_narrow);
michael@0 566
michael@0 567 /* spread the dest */
michael@0 568 dst_wide = vmovl_u8(dst_raw_2);
michael@0 569
michael@0 570 /* alpha mul the dest */
michael@0 571 dst_wide = vmulq_u16 (dst_wide, alpha_wide);
michael@0 572 dst_cooked = vshrn_n_u16(dst_wide, 8);
michael@0 573
michael@0 574 /* sum -- ignoring any byte lane overflows */
michael@0 575 dst_final_2 = vadd_u8(src_raw_2, dst_cooked);
michael@0 576 }
michael@0 577 #endif
michael@0 578
michael@0 579 vst1_u32(dst, vreinterpret_u32_u8(dst_final));
michael@0 580 #if UNROLL > 2
michael@0 581 vst1_u32(dst+2, vreinterpret_u32_u8(dst_final_2));
michael@0 582 #endif
michael@0 583
michael@0 584 src += UNROLL;
michael@0 585 dst += UNROLL;
michael@0 586 count -= UNROLL;
michael@0 587 }
michael@0 588 #undef UNROLL
michael@0 589
michael@0 590 /* do any residual iterations */
michael@0 591 while (--count >= 0) {
michael@0 592 *dst = SkPMSrcOver(*src, *dst);
michael@0 593 src += 1;
michael@0 594 dst += 1;
michael@0 595 }
michael@0 596 }
michael@0 597 }
michael@0 598
michael@0 599 void S32A_Opaque_BlitRow32_neon_src_alpha(SkPMColor* SK_RESTRICT dst,
michael@0 600 const SkPMColor* SK_RESTRICT src,
michael@0 601 int count, U8CPU alpha) {
michael@0 602 SkASSERT(255 == alpha);
michael@0 603
michael@0 604 if (count <= 0)
michael@0 605 return;
michael@0 606
michael@0 607 /* Use these to check if src is transparent or opaque */
michael@0 608 const unsigned int ALPHA_OPAQ = 0xFF000000;
michael@0 609 const unsigned int ALPHA_TRANS = 0x00FFFFFF;
michael@0 610
michael@0 611 #define UNROLL 4
michael@0 612 const SkPMColor* SK_RESTRICT src_end = src + count - (UNROLL + 1);
michael@0 613 const SkPMColor* SK_RESTRICT src_temp = src;
michael@0 614
michael@0 615 /* set up the NEON variables */
michael@0 616 uint8x8_t alpha_mask;
michael@0 617 static const uint8_t alpha_mask_setup[] = {3,3,3,3,7,7,7,7};
michael@0 618 alpha_mask = vld1_u8(alpha_mask_setup);
michael@0 619
michael@0 620 uint8x8_t src_raw, dst_raw, dst_final;
michael@0 621 uint8x8_t src_raw_2, dst_raw_2, dst_final_2;
michael@0 622 uint8x8_t dst_cooked;
michael@0 623 uint16x8_t dst_wide;
michael@0 624 uint8x8_t alpha_narrow;
michael@0 625 uint16x8_t alpha_wide;
michael@0 626
michael@0 627 /* choose the first processing type */
michael@0 628 if( src >= src_end)
michael@0 629 goto TAIL;
michael@0 630 if(*src <= ALPHA_TRANS)
michael@0 631 goto ALPHA_0;
michael@0 632 if(*src >= ALPHA_OPAQ)
michael@0 633 goto ALPHA_255;
michael@0 634 /* fall-thru */
michael@0 635
michael@0 636 ALPHA_1_TO_254:
michael@0 637 do {
michael@0 638
michael@0 639 /* get the source */
michael@0 640 src_raw = vreinterpret_u8_u32(vld1_u32(src));
michael@0 641 src_raw_2 = vreinterpret_u8_u32(vld1_u32(src+2));
michael@0 642
michael@0 643 /* get and hold the dst too */
michael@0 644 dst_raw = vreinterpret_u8_u32(vld1_u32(dst));
michael@0 645 dst_raw_2 = vreinterpret_u8_u32(vld1_u32(dst+2));
michael@0 646
michael@0 647
michael@0 648 /* get the alphas spread out properly */
michael@0 649 alpha_narrow = vtbl1_u8(src_raw, alpha_mask);
michael@0 650 /* reflect SkAlpha255To256() semantics a+1 vs a+a>>7 */
michael@0 651 /* we collapsed (255-a)+1 ... */
michael@0 652 alpha_wide = vsubw_u8(vdupq_n_u16(256), alpha_narrow);
michael@0 653
michael@0 654 /* spread the dest */
michael@0 655 dst_wide = vmovl_u8(dst_raw);
michael@0 656
michael@0 657 /* alpha mul the dest */
michael@0 658 dst_wide = vmulq_u16 (dst_wide, alpha_wide);
michael@0 659 dst_cooked = vshrn_n_u16(dst_wide, 8);
michael@0 660
michael@0 661 /* sum -- ignoring any byte lane overflows */
michael@0 662 dst_final = vadd_u8(src_raw, dst_cooked);
michael@0 663
michael@0 664 alpha_narrow = vtbl1_u8(src_raw_2, alpha_mask);
michael@0 665 /* reflect SkAlpha255To256() semantics a+1 vs a+a>>7 */
michael@0 666 /* we collapsed (255-a)+1 ... */
michael@0 667 alpha_wide = vsubw_u8(vdupq_n_u16(256), alpha_narrow);
michael@0 668
michael@0 669 /* spread the dest */
michael@0 670 dst_wide = vmovl_u8(dst_raw_2);
michael@0 671
michael@0 672 /* alpha mul the dest */
michael@0 673 dst_wide = vmulq_u16 (dst_wide, alpha_wide);
michael@0 674 dst_cooked = vshrn_n_u16(dst_wide, 8);
michael@0 675
michael@0 676 /* sum -- ignoring any byte lane overflows */
michael@0 677 dst_final_2 = vadd_u8(src_raw_2, dst_cooked);
michael@0 678
michael@0 679 vst1_u32(dst, vreinterpret_u32_u8(dst_final));
michael@0 680 vst1_u32(dst+2, vreinterpret_u32_u8(dst_final_2));
michael@0 681
michael@0 682 src += UNROLL;
michael@0 683 dst += UNROLL;
michael@0 684
michael@0 685 /* if 2 of the next pixels aren't between 1 and 254
michael@0 686 it might make sense to go to the optimized loops */
michael@0 687 if((src[0] <= ALPHA_TRANS && src[1] <= ALPHA_TRANS) || (src[0] >= ALPHA_OPAQ && src[1] >= ALPHA_OPAQ))
michael@0 688 break;
michael@0 689
michael@0 690 } while(src < src_end);
michael@0 691
michael@0 692 if (src >= src_end)
michael@0 693 goto TAIL;
michael@0 694
michael@0 695 if(src[0] >= ALPHA_OPAQ && src[1] >= ALPHA_OPAQ)
michael@0 696 goto ALPHA_255;
michael@0 697
michael@0 698 /*fall-thru*/
michael@0 699
michael@0 700 ALPHA_0:
michael@0 701
michael@0 702 /*In this state, we know the current alpha is 0 and
michael@0 703 we optimize for the next alpha also being zero. */
michael@0 704 src_temp = src; //so we don't have to increment dst every time
michael@0 705 do {
michael@0 706 if(*(++src) > ALPHA_TRANS)
michael@0 707 break;
michael@0 708 if(*(++src) > ALPHA_TRANS)
michael@0 709 break;
michael@0 710 if(*(++src) > ALPHA_TRANS)
michael@0 711 break;
michael@0 712 if(*(++src) > ALPHA_TRANS)
michael@0 713 break;
michael@0 714 } while(src < src_end);
michael@0 715
michael@0 716 dst += (src - src_temp);
michael@0 717
michael@0 718 /* no longer alpha 0, so determine where to go next. */
michael@0 719 if( src >= src_end)
michael@0 720 goto TAIL;
michael@0 721 if(*src >= ALPHA_OPAQ)
michael@0 722 goto ALPHA_255;
michael@0 723 else
michael@0 724 goto ALPHA_1_TO_254;
michael@0 725
michael@0 726 ALPHA_255:
michael@0 727 while((src[0] & src[1] & src[2] & src[3]) >= ALPHA_OPAQ) {
michael@0 728 dst[0]=src[0];
michael@0 729 dst[1]=src[1];
michael@0 730 dst[2]=src[2];
michael@0 731 dst[3]=src[3];
michael@0 732 src+=UNROLL;
michael@0 733 dst+=UNROLL;
michael@0 734 if(src >= src_end)
michael@0 735 goto TAIL;
michael@0 736 }
michael@0 737
michael@0 738 //Handle remainder.
michael@0 739 if(*src >= ALPHA_OPAQ) { *dst++ = *src++;
michael@0 740 if(*src >= ALPHA_OPAQ) { *dst++ = *src++;
michael@0 741 if(*src >= ALPHA_OPAQ) { *dst++ = *src++; }
michael@0 742 }
michael@0 743 }
michael@0 744
michael@0 745 if( src >= src_end)
michael@0 746 goto TAIL;
michael@0 747 if(*src <= ALPHA_TRANS)
michael@0 748 goto ALPHA_0;
michael@0 749 else
michael@0 750 goto ALPHA_1_TO_254;
michael@0 751
michael@0 752 TAIL:
michael@0 753 /* do any residual iterations */
michael@0 754 src_end += UNROLL + 1; //goto the real end
michael@0 755 while(src != src_end) {
michael@0 756 if( *src != 0 ) {
michael@0 757 if( *src >= ALPHA_OPAQ ) {
michael@0 758 *dst = *src;
michael@0 759 }
michael@0 760 else {
michael@0 761 *dst = SkPMSrcOver(*src, *dst);
michael@0 762 }
michael@0 763 }
michael@0 764 src++;
michael@0 765 dst++;
michael@0 766 }
michael@0 767
michael@0 768 #undef UNROLL
michael@0 769 return;
michael@0 770 }
michael@0 771
michael@0 772 /* Neon version of S32_Blend_BlitRow32()
michael@0 773 * portable version is in src/core/SkBlitRow_D32.cpp
michael@0 774 */
michael@0 775 void S32_Blend_BlitRow32_neon(SkPMColor* SK_RESTRICT dst,
michael@0 776 const SkPMColor* SK_RESTRICT src,
michael@0 777 int count, U8CPU alpha) {
michael@0 778 SkASSERT(alpha <= 255);
michael@0 779
michael@0 780 if (count <= 0) {
michael@0 781 return;
michael@0 782 }
michael@0 783
michael@0 784 uint16_t src_scale = SkAlpha255To256(alpha);
michael@0 785 uint16_t dst_scale = 256 - src_scale;
michael@0 786
michael@0 787 while (count >= 2) {
michael@0 788 uint8x8_t vsrc, vdst, vres;
michael@0 789 uint16x8_t vsrc_wide, vdst_wide;
michael@0 790
michael@0 791 /* These commented prefetches are a big win for count
michael@0 792 * values > 64 on an A9 (Pandaboard) but hurt by 10% for count = 4.
michael@0 793 * They also hurt a little (<5%) on an A15
michael@0 794 */
michael@0 795 //__builtin_prefetch(src+32);
michael@0 796 //__builtin_prefetch(dst+32);
michael@0 797
michael@0 798 // Load
michael@0 799 vsrc = vreinterpret_u8_u32(vld1_u32(src));
michael@0 800 vdst = vreinterpret_u8_u32(vld1_u32(dst));
michael@0 801
michael@0 802 // Process src
michael@0 803 vsrc_wide = vmovl_u8(vsrc);
michael@0 804 vsrc_wide = vmulq_u16(vsrc_wide, vdupq_n_u16(src_scale));
michael@0 805
michael@0 806 // Process dst
michael@0 807 vdst_wide = vmull_u8(vdst, vdup_n_u8(dst_scale));
michael@0 808
michael@0 809 // Combine
michael@0 810 vres = vshrn_n_u16(vdst_wide, 8) + vshrn_n_u16(vsrc_wide, 8);
michael@0 811
michael@0 812 // Store
michael@0 813 vst1_u32(dst, vreinterpret_u32_u8(vres));
michael@0 814
michael@0 815 src += 2;
michael@0 816 dst += 2;
michael@0 817 count -= 2;
michael@0 818 }
michael@0 819
michael@0 820 if (count == 1) {
michael@0 821 uint8x8_t vsrc = vdup_n_u8(0), vdst = vdup_n_u8(0), vres;
michael@0 822 uint16x8_t vsrc_wide, vdst_wide;
michael@0 823
michael@0 824 // Load
michael@0 825 vsrc = vreinterpret_u8_u32(vld1_lane_u32(src, vreinterpret_u32_u8(vsrc), 0));
michael@0 826 vdst = vreinterpret_u8_u32(vld1_lane_u32(dst, vreinterpret_u32_u8(vdst), 0));
michael@0 827
michael@0 828 // Process
michael@0 829 vsrc_wide = vmovl_u8(vsrc);
michael@0 830 vsrc_wide = vmulq_u16(vsrc_wide, vdupq_n_u16(src_scale));
michael@0 831 vdst_wide = vmull_u8(vdst, vdup_n_u8(dst_scale));
michael@0 832 vres = vshrn_n_u16(vdst_wide, 8) + vshrn_n_u16(vsrc_wide, 8);
michael@0 833
michael@0 834 // Store
michael@0 835 vst1_lane_u32(dst, vreinterpret_u32_u8(vres), 0);
michael@0 836 }
michael@0 837 }
michael@0 838
michael@0 839 void S32A_Blend_BlitRow32_neon(SkPMColor* SK_RESTRICT dst,
michael@0 840 const SkPMColor* SK_RESTRICT src,
michael@0 841 int count, U8CPU alpha) {
michael@0 842
michael@0 843 SkASSERT(255 >= alpha);
michael@0 844
michael@0 845 if (count <= 0) {
michael@0 846 return;
michael@0 847 }
michael@0 848
michael@0 849 unsigned alpha256 = SkAlpha255To256(alpha);
michael@0 850
michael@0 851 // First deal with odd counts
michael@0 852 if (count & 1) {
michael@0 853 uint8x8_t vsrc = vdup_n_u8(0), vdst = vdup_n_u8(0), vres;
michael@0 854 uint16x8_t vdst_wide, vsrc_wide;
michael@0 855 unsigned dst_scale;
michael@0 856
michael@0 857 // Load
michael@0 858 vsrc = vreinterpret_u8_u32(vld1_lane_u32(src, vreinterpret_u32_u8(vsrc), 0));
michael@0 859 vdst = vreinterpret_u8_u32(vld1_lane_u32(dst, vreinterpret_u32_u8(vdst), 0));
michael@0 860
michael@0 861 // Calc dst_scale
michael@0 862 dst_scale = vget_lane_u8(vsrc, 3);
michael@0 863 dst_scale *= alpha256;
michael@0 864 dst_scale >>= 8;
michael@0 865 dst_scale = 256 - dst_scale;
michael@0 866
michael@0 867 // Process src
michael@0 868 vsrc_wide = vmovl_u8(vsrc);
michael@0 869 vsrc_wide = vmulq_n_u16(vsrc_wide, alpha256);
michael@0 870
michael@0 871 // Process dst
michael@0 872 vdst_wide = vmovl_u8(vdst);
michael@0 873 vdst_wide = vmulq_n_u16(vdst_wide, dst_scale);
michael@0 874
michael@0 875 // Combine
michael@0 876 vres = vshrn_n_u16(vdst_wide, 8) + vshrn_n_u16(vsrc_wide, 8);
michael@0 877
michael@0 878 vst1_lane_u32(dst, vreinterpret_u32_u8(vres), 0);
michael@0 879 dst++;
michael@0 880 src++;
michael@0 881 count--;
michael@0 882 }
michael@0 883
michael@0 884 if (count) {
michael@0 885 uint8x8_t alpha_mask;
michael@0 886 static const uint8_t alpha_mask_setup[] = {3,3,3,3,7,7,7,7};
michael@0 887 alpha_mask = vld1_u8(alpha_mask_setup);
michael@0 888
michael@0 889 do {
michael@0 890
michael@0 891 uint8x8_t vsrc, vdst, vres, vsrc_alphas;
michael@0 892 uint16x8_t vdst_wide, vsrc_wide, vsrc_scale, vdst_scale;
michael@0 893
michael@0 894 __builtin_prefetch(src+32);
michael@0 895 __builtin_prefetch(dst+32);
michael@0 896
michael@0 897 // Load
michael@0 898 vsrc = vreinterpret_u8_u32(vld1_u32(src));
michael@0 899 vdst = vreinterpret_u8_u32(vld1_u32(dst));
michael@0 900
michael@0 901 // Prepare src_scale
michael@0 902 vsrc_scale = vdupq_n_u16(alpha256);
michael@0 903
michael@0 904 // Calc dst_scale
michael@0 905 vsrc_alphas = vtbl1_u8(vsrc, alpha_mask);
michael@0 906 vdst_scale = vmovl_u8(vsrc_alphas);
michael@0 907 vdst_scale *= vsrc_scale;
michael@0 908 vdst_scale = vshrq_n_u16(vdst_scale, 8);
michael@0 909 vdst_scale = vsubq_u16(vdupq_n_u16(256), vdst_scale);
michael@0 910
michael@0 911 // Process src
michael@0 912 vsrc_wide = vmovl_u8(vsrc);
michael@0 913 vsrc_wide *= vsrc_scale;
michael@0 914
michael@0 915 // Process dst
michael@0 916 vdst_wide = vmovl_u8(vdst);
michael@0 917 vdst_wide *= vdst_scale;
michael@0 918
michael@0 919 // Combine
michael@0 920 vres = vshrn_n_u16(vdst_wide, 8) + vshrn_n_u16(vsrc_wide, 8);
michael@0 921
michael@0 922 vst1_u32(dst, vreinterpret_u32_u8(vres));
michael@0 923
michael@0 924 src += 2;
michael@0 925 dst += 2;
michael@0 926 count -= 2;
michael@0 927 } while(count);
michael@0 928 }
michael@0 929 }
michael@0 930
michael@0 931 ///////////////////////////////////////////////////////////////////////////////
michael@0 932
michael@0 933 #undef DEBUG_OPAQUE_DITHER
michael@0 934
michael@0 935 #if defined(DEBUG_OPAQUE_DITHER)
michael@0 936 static void showme8(char *str, void *p, int len)
michael@0 937 {
michael@0 938 static char buf[256];
michael@0 939 char tbuf[32];
michael@0 940 int i;
michael@0 941 char *pc = (char*) p;
michael@0 942 sprintf(buf,"%8s:", str);
michael@0 943 for(i=0;i<len;i++) {
michael@0 944 sprintf(tbuf, " %02x", pc[i]);
michael@0 945 strcat(buf, tbuf);
michael@0 946 }
michael@0 947 SkDebugf("%s\n", buf);
michael@0 948 }
michael@0 949 static void showme16(char *str, void *p, int len)
michael@0 950 {
michael@0 951 static char buf[256];
michael@0 952 char tbuf[32];
michael@0 953 int i;
michael@0 954 uint16_t *pc = (uint16_t*) p;
michael@0 955 sprintf(buf,"%8s:", str);
michael@0 956 len = (len / sizeof(uint16_t)); /* passed as bytes */
michael@0 957 for(i=0;i<len;i++) {
michael@0 958 sprintf(tbuf, " %04x", pc[i]);
michael@0 959 strcat(buf, tbuf);
michael@0 960 }
michael@0 961 SkDebugf("%s\n", buf);
michael@0 962 }
michael@0 963 #endif
michael@0 964
michael@0 965 void S32A_D565_Opaque_Dither_neon (uint16_t * SK_RESTRICT dst,
michael@0 966 const SkPMColor* SK_RESTRICT src,
michael@0 967 int count, U8CPU alpha, int x, int y) {
michael@0 968 SkASSERT(255 == alpha);
michael@0 969
michael@0 970 #define UNROLL 8
michael@0 971
michael@0 972 if (count >= UNROLL) {
michael@0 973
michael@0 974 #if defined(DEBUG_OPAQUE_DITHER)
michael@0 975 uint16_t tmpbuf[UNROLL];
michael@0 976 int td[UNROLL];
michael@0 977 int tdv[UNROLL];
michael@0 978 int ta[UNROLL];
michael@0 979 int tap[UNROLL];
michael@0 980 uint16_t in_dst[UNROLL];
michael@0 981 int offset = 0;
michael@0 982 int noisy = 0;
michael@0 983 #endif
michael@0 984
michael@0 985 uint8x8_t dbase;
michael@0 986 const uint8_t *dstart = &gDitherMatrix_Neon[(y&3)*12 + (x&3)];
michael@0 987 dbase = vld1_u8(dstart);
michael@0 988
michael@0 989 do {
michael@0 990 uint8x8_t sr, sg, sb, sa, d;
michael@0 991 uint16x8_t dst8, scale8, alpha8;
michael@0 992 uint16x8_t dst_r, dst_g, dst_b;
michael@0 993
michael@0 994 #if defined(DEBUG_OPAQUE_DITHER)
michael@0 995 // calculate 8 elements worth into a temp buffer
michael@0 996 {
michael@0 997 int my_y = y;
michael@0 998 int my_x = x;
michael@0 999 SkPMColor* my_src = (SkPMColor*)src;
michael@0 1000 uint16_t* my_dst = dst;
michael@0 1001 int i;
michael@0 1002
michael@0 1003 DITHER_565_SCAN(my_y);
michael@0 1004 for(i = 0; i < UNROLL; i++) {
michael@0 1005 SkPMColor c = *my_src++;
michael@0 1006 SkPMColorAssert(c);
michael@0 1007 if (c) {
michael@0 1008 unsigned a = SkGetPackedA32(c);
michael@0 1009
michael@0 1010 int d = SkAlphaMul(DITHER_VALUE(my_x), SkAlpha255To256(a));
michael@0 1011 tdv[i] = DITHER_VALUE(my_x);
michael@0 1012 ta[i] = a;
michael@0 1013 tap[i] = SkAlpha255To256(a);
michael@0 1014 td[i] = d;
michael@0 1015
michael@0 1016 unsigned sr = SkGetPackedR32(c);
michael@0 1017 unsigned sg = SkGetPackedG32(c);
michael@0 1018 unsigned sb = SkGetPackedB32(c);
michael@0 1019 sr = SkDITHER_R32_FOR_565(sr, d);
michael@0 1020 sg = SkDITHER_G32_FOR_565(sg, d);
michael@0 1021 sb = SkDITHER_B32_FOR_565(sb, d);
michael@0 1022
michael@0 1023 uint32_t src_expanded = (sg << 24) | (sr << 13) | (sb << 2);
michael@0 1024 uint32_t dst_expanded = SkExpand_rgb_16(*my_dst);
michael@0 1025 dst_expanded = dst_expanded * (SkAlpha255To256(255 - a) >> 3);
michael@0 1026 // now src and dst expanded are in g:11 r:10 x:1 b:10
michael@0 1027 tmpbuf[i] = SkCompact_rgb_16((src_expanded + dst_expanded) >> 5);
michael@0 1028 td[i] = d;
michael@0 1029 } else {
michael@0 1030 tmpbuf[i] = *my_dst;
michael@0 1031 ta[i] = tdv[i] = td[i] = 0xbeef;
michael@0 1032 }
michael@0 1033 in_dst[i] = *my_dst;
michael@0 1034 my_dst += 1;
michael@0 1035 DITHER_INC_X(my_x);
michael@0 1036 }
michael@0 1037 }
michael@0 1038 #endif
michael@0 1039
michael@0 1040
michael@0 1041 {
michael@0 1042 register uint8x8_t d0 asm("d0");
michael@0 1043 register uint8x8_t d1 asm("d1");
michael@0 1044 register uint8x8_t d2 asm("d2");
michael@0 1045 register uint8x8_t d3 asm("d3");
michael@0 1046
michael@0 1047 asm ("vld4.8 {d0-d3},[%[src]]! /* r=%P0 g=%P1 b=%P2 a=%P3 */"
michael@0 1048 : "=w" (d0), "=w" (d1), "=w" (d2), "=w" (d3), [src] "+r" (src)
michael@0 1049 :
michael@0 1050 );
michael@0 1051 #if SK_PMCOLOR_BYTE_ORDER(B,G,R,A)
michael@0 1052 sr = d2; sg = d1; sb = d0; sa = d3;
michael@0 1053 #elif SK_PMCOLOR_BYTE_ORDER(R,G,B,A)
michael@0 1054 sr = d0; sg = d1; sb = d2; sa = d3;
michael@0 1055 #endif
michael@0 1056 }
michael@0 1057
michael@0 1058 /* calculate 'd', which will be 0..7
michael@0 1059 * dbase[] is 0..7; alpha is 0..256; 16 bits suffice
michael@0 1060 */
michael@0 1061 alpha8 = vmovl_u8(dbase);
michael@0 1062 alpha8 = vmlal_u8(alpha8, sa, dbase);
michael@0 1063 d = vshrn_n_u16(alpha8, 8); // narrowing too
michael@0 1064
michael@0 1065 // sr = sr - (sr>>5) + d
michael@0 1066 /* watching for 8-bit overflow. d is 0..7; risky range of
michael@0 1067 * sr is >248; and then (sr>>5) is 7 so it offsets 'd';
michael@0 1068 * safe as long as we do ((sr-sr>>5) + d)
michael@0 1069 */
michael@0 1070 sr = vsub_u8(sr, vshr_n_u8(sr, 5));
michael@0 1071 sr = vadd_u8(sr, d);
michael@0 1072
michael@0 1073 // sb = sb - (sb>>5) + d
michael@0 1074 sb = vsub_u8(sb, vshr_n_u8(sb, 5));
michael@0 1075 sb = vadd_u8(sb, d);
michael@0 1076
michael@0 1077 // sg = sg - (sg>>6) + d>>1; similar logic for overflows
michael@0 1078 sg = vsub_u8(sg, vshr_n_u8(sg, 6));
michael@0 1079 sg = vadd_u8(sg, vshr_n_u8(d,1));
michael@0 1080
michael@0 1081 // need to pick up 8 dst's -- at 16 bits each, 128 bits
michael@0 1082 dst8 = vld1q_u16(dst);
michael@0 1083 dst_b = vandq_u16(dst8, vdupq_n_u16(SK_B16_MASK));
michael@0 1084 dst_g = vshrq_n_u16(vshlq_n_u16(dst8, SK_R16_BITS), SK_R16_BITS + SK_B16_BITS);
michael@0 1085 dst_r = vshrq_n_u16(dst8, SK_R16_SHIFT); // clearing hi bits
michael@0 1086
michael@0 1087 // blend
michael@0 1088 scale8 = vsubw_u8(vdupq_n_u16(256), sa);
michael@0 1089
michael@0 1090 // combine the addq and mul, save 3 insns
michael@0 1091 scale8 = vshrq_n_u16(scale8, 3);
michael@0 1092 dst_b = vmlaq_u16(vshll_n_u8(sb,2), dst_b, scale8);
michael@0 1093 dst_g = vmlaq_u16(vshll_n_u8(sg,3), dst_g, scale8);
michael@0 1094 dst_r = vmlaq_u16(vshll_n_u8(sr,2), dst_r, scale8);
michael@0 1095
michael@0 1096 // repack to store
michael@0 1097 dst8 = vshrq_n_u16(dst_b, 5);
michael@0 1098 dst8 = vsliq_n_u16(dst8, vshrq_n_u16(dst_g, 5), 5);
michael@0 1099 dst8 = vsliq_n_u16(dst8, vshrq_n_u16(dst_r,5), 11);
michael@0 1100
michael@0 1101 vst1q_u16(dst, dst8);
michael@0 1102
michael@0 1103 #if defined(DEBUG_OPAQUE_DITHER)
michael@0 1104 // verify my 8 elements match the temp buffer
michael@0 1105 {
michael@0 1106 int i, bad=0;
michael@0 1107 static int invocation;
michael@0 1108
michael@0 1109 for (i = 0; i < UNROLL; i++) {
michael@0 1110 if (tmpbuf[i] != dst[i]) {
michael@0 1111 bad=1;
michael@0 1112 }
michael@0 1113 }
michael@0 1114 if (bad) {
michael@0 1115 SkDebugf("BAD S32A_D565_Opaque_Dither_neon(); invocation %d offset %d\n",
michael@0 1116 invocation, offset);
michael@0 1117 SkDebugf(" alpha 0x%x\n", alpha);
michael@0 1118 for (i = 0; i < UNROLL; i++)
michael@0 1119 SkDebugf("%2d: %s %04x w %04x id %04x s %08x d %04x %04x %04x %04x\n",
michael@0 1120 i, ((tmpbuf[i] != dst[i])?"BAD":"got"), dst[i], tmpbuf[i],
michael@0 1121 in_dst[i], src[i-8], td[i], tdv[i], tap[i], ta[i]);
michael@0 1122
michael@0 1123 showme16("alpha8", &alpha8, sizeof(alpha8));
michael@0 1124 showme16("scale8", &scale8, sizeof(scale8));
michael@0 1125 showme8("d", &d, sizeof(d));
michael@0 1126 showme16("dst8", &dst8, sizeof(dst8));
michael@0 1127 showme16("dst_b", &dst_b, sizeof(dst_b));
michael@0 1128 showme16("dst_g", &dst_g, sizeof(dst_g));
michael@0 1129 showme16("dst_r", &dst_r, sizeof(dst_r));
michael@0 1130 showme8("sb", &sb, sizeof(sb));
michael@0 1131 showme8("sg", &sg, sizeof(sg));
michael@0 1132 showme8("sr", &sr, sizeof(sr));
michael@0 1133
michael@0 1134 return;
michael@0 1135 }
michael@0 1136 offset += UNROLL;
michael@0 1137 invocation++;
michael@0 1138 }
michael@0 1139 #endif
michael@0 1140 dst += UNROLL;
michael@0 1141 count -= UNROLL;
michael@0 1142 // skip x += UNROLL, since it's unchanged mod-4
michael@0 1143 } while (count >= UNROLL);
michael@0 1144 }
michael@0 1145 #undef UNROLL
michael@0 1146
michael@0 1147 // residuals
michael@0 1148 if (count > 0) {
michael@0 1149 DITHER_565_SCAN(y);
michael@0 1150 do {
michael@0 1151 SkPMColor c = *src++;
michael@0 1152 SkPMColorAssert(c);
michael@0 1153 if (c) {
michael@0 1154 unsigned a = SkGetPackedA32(c);
michael@0 1155
michael@0 1156 // dither and alpha are just temporary variables to work-around
michael@0 1157 // an ICE in debug.
michael@0 1158 unsigned dither = DITHER_VALUE(x);
michael@0 1159 unsigned alpha = SkAlpha255To256(a);
michael@0 1160 int d = SkAlphaMul(dither, alpha);
michael@0 1161
michael@0 1162 unsigned sr = SkGetPackedR32(c);
michael@0 1163 unsigned sg = SkGetPackedG32(c);
michael@0 1164 unsigned sb = SkGetPackedB32(c);
michael@0 1165 sr = SkDITHER_R32_FOR_565(sr, d);
michael@0 1166 sg = SkDITHER_G32_FOR_565(sg, d);
michael@0 1167 sb = SkDITHER_B32_FOR_565(sb, d);
michael@0 1168
michael@0 1169 uint32_t src_expanded = (sg << 24) | (sr << 13) | (sb << 2);
michael@0 1170 uint32_t dst_expanded = SkExpand_rgb_16(*dst);
michael@0 1171 dst_expanded = dst_expanded * (SkAlpha255To256(255 - a) >> 3);
michael@0 1172 // now src and dst expanded are in g:11 r:10 x:1 b:10
michael@0 1173 *dst = SkCompact_rgb_16((src_expanded + dst_expanded) >> 5);
michael@0 1174 }
michael@0 1175 dst += 1;
michael@0 1176 DITHER_INC_X(x);
michael@0 1177 } while (--count != 0);
michael@0 1178 }
michael@0 1179 }
michael@0 1180
michael@0 1181 ///////////////////////////////////////////////////////////////////////////////
michael@0 1182
michael@0 1183 #undef DEBUG_S32_OPAQUE_DITHER
michael@0 1184
michael@0 1185 void S32_D565_Opaque_Dither_neon(uint16_t* SK_RESTRICT dst,
michael@0 1186 const SkPMColor* SK_RESTRICT src,
michael@0 1187 int count, U8CPU alpha, int x, int y) {
michael@0 1188 SkASSERT(255 == alpha);
michael@0 1189
michael@0 1190 #define UNROLL 8
michael@0 1191 if (count >= UNROLL) {
michael@0 1192 uint8x8_t d;
michael@0 1193 const uint8_t *dstart = &gDitherMatrix_Neon[(y&3)*12 + (x&3)];
michael@0 1194 d = vld1_u8(dstart);
michael@0 1195
michael@0 1196 while (count >= UNROLL) {
michael@0 1197 uint8x8_t sr, sg, sb;
michael@0 1198 uint16x8_t dr, dg, db;
michael@0 1199 uint16x8_t dst8;
michael@0 1200
michael@0 1201 {
michael@0 1202 register uint8x8_t d0 asm("d0");
michael@0 1203 register uint8x8_t d1 asm("d1");
michael@0 1204 register uint8x8_t d2 asm("d2");
michael@0 1205 register uint8x8_t d3 asm("d3");
michael@0 1206
michael@0 1207 asm (
michael@0 1208 "vld4.8 {d0-d3},[%[src]]! /* r=%P0 g=%P1 b=%P2 a=%P3 */"
michael@0 1209 : "=w" (d0), "=w" (d1), "=w" (d2), "=w" (d3), [src] "+&r" (src)
michael@0 1210 :
michael@0 1211 );
michael@0 1212 sg = d1;
michael@0 1213 #if SK_PMCOLOR_BYTE_ORDER(B,G,R,A)
michael@0 1214 sr = d2; sb = d0;
michael@0 1215 #elif SK_PMCOLOR_BYTE_ORDER(R,G,B,A)
michael@0 1216 sr = d0; sb = d2;
michael@0 1217 #endif
michael@0 1218 }
michael@0 1219 /* XXX: if we want to prefetch, hide it in the above asm()
michael@0 1220 * using the gcc __builtin_prefetch(), the prefetch will
michael@0 1221 * fall to the bottom of the loop -- it won't stick up
michael@0 1222 * at the top of the loop, just after the vld4.
michael@0 1223 */
michael@0 1224
michael@0 1225 // sr = sr - (sr>>5) + d
michael@0 1226 sr = vsub_u8(sr, vshr_n_u8(sr, 5));
michael@0 1227 dr = vaddl_u8(sr, d);
michael@0 1228
michael@0 1229 // sb = sb - (sb>>5) + d
michael@0 1230 sb = vsub_u8(sb, vshr_n_u8(sb, 5));
michael@0 1231 db = vaddl_u8(sb, d);
michael@0 1232
michael@0 1233 // sg = sg - (sg>>6) + d>>1; similar logic for overflows
michael@0 1234 sg = vsub_u8(sg, vshr_n_u8(sg, 6));
michael@0 1235 dg = vaddl_u8(sg, vshr_n_u8(d, 1));
michael@0 1236
michael@0 1237 // pack high bits of each into 565 format (rgb, b is lsb)
michael@0 1238 dst8 = vshrq_n_u16(db, 3);
michael@0 1239 dst8 = vsliq_n_u16(dst8, vshrq_n_u16(dg, 2), 5);
michael@0 1240 dst8 = vsliq_n_u16(dst8, vshrq_n_u16(dr, 3), 11);
michael@0 1241
michael@0 1242 // store it
michael@0 1243 vst1q_u16(dst, dst8);
michael@0 1244
michael@0 1245 #if defined(DEBUG_S32_OPAQUE_DITHER)
michael@0 1246 // always good to know if we generated good results
michael@0 1247 {
michael@0 1248 int i, myx = x, myy = y;
michael@0 1249 DITHER_565_SCAN(myy);
michael@0 1250 for (i=0;i<UNROLL;i++) {
michael@0 1251 // the '!' in the asm block above post-incremented src by the 8 pixels it reads.
michael@0 1252 SkPMColor c = src[i-8];
michael@0 1253 unsigned dither = DITHER_VALUE(myx);
michael@0 1254 uint16_t val = SkDitherRGB32To565(c, dither);
michael@0 1255 if (val != dst[i]) {
michael@0 1256 SkDebugf("RBE: src %08x dither %02x, want %04x got %04x dbas[i] %02x\n",
michael@0 1257 c, dither, val, dst[i], dstart[i]);
michael@0 1258 }
michael@0 1259 DITHER_INC_X(myx);
michael@0 1260 }
michael@0 1261 }
michael@0 1262 #endif
michael@0 1263
michael@0 1264 dst += UNROLL;
michael@0 1265 // we don't need to increment src as the asm above has already done it
michael@0 1266 count -= UNROLL;
michael@0 1267 x += UNROLL; // probably superfluous
michael@0 1268 }
michael@0 1269 }
michael@0 1270 #undef UNROLL
michael@0 1271
michael@0 1272 // residuals
michael@0 1273 if (count > 0) {
michael@0 1274 DITHER_565_SCAN(y);
michael@0 1275 do {
michael@0 1276 SkPMColor c = *src++;
michael@0 1277 SkPMColorAssert(c);
michael@0 1278 SkASSERT(SkGetPackedA32(c) == 255);
michael@0 1279
michael@0 1280 unsigned dither = DITHER_VALUE(x);
michael@0 1281 *dst++ = SkDitherRGB32To565(c, dither);
michael@0 1282 DITHER_INC_X(x);
michael@0 1283 } while (--count != 0);
michael@0 1284 }
michael@0 1285 }
michael@0 1286
michael@0 1287 void Color32_arm_neon(SkPMColor* dst, const SkPMColor* src, int count,
michael@0 1288 SkPMColor color) {
michael@0 1289 if (count <= 0) {
michael@0 1290 return;
michael@0 1291 }
michael@0 1292
michael@0 1293 if (0 == color) {
michael@0 1294 if (src != dst) {
michael@0 1295 memcpy(dst, src, count * sizeof(SkPMColor));
michael@0 1296 }
michael@0 1297 return;
michael@0 1298 }
michael@0 1299
michael@0 1300 unsigned colorA = SkGetPackedA32(color);
michael@0 1301 if (255 == colorA) {
michael@0 1302 sk_memset32(dst, color, count);
michael@0 1303 } else {
michael@0 1304 unsigned scale = 256 - SkAlpha255To256(colorA);
michael@0 1305
michael@0 1306 if (count >= 8) {
michael@0 1307 // at the end of this assembly, count will have been decremented
michael@0 1308 // to a negative value. That is, if count mod 8 = x, it will be
michael@0 1309 // -8 +x coming out.
michael@0 1310 asm volatile (
michael@0 1311 PLD128(src, 0)
michael@0 1312
michael@0 1313 "vdup.32 q0, %[color] \n\t"
michael@0 1314
michael@0 1315 PLD128(src, 128)
michael@0 1316
michael@0 1317 // scale numerical interval [0-255], so load as 8 bits
michael@0 1318 "vdup.8 d2, %[scale] \n\t"
michael@0 1319
michael@0 1320 PLD128(src, 256)
michael@0 1321
michael@0 1322 "subs %[count], %[count], #8 \n\t"
michael@0 1323
michael@0 1324 PLD128(src, 384)
michael@0 1325
michael@0 1326 "Loop_Color32: \n\t"
michael@0 1327
michael@0 1328 // load src color, 8 pixels, 4 64 bit registers
michael@0 1329 // (and increment src).
michael@0 1330 "vld1.32 {d4-d7}, [%[src]]! \n\t"
michael@0 1331
michael@0 1332 PLD128(src, 384)
michael@0 1333
michael@0 1334 // multiply long by scale, 64 bits at a time,
michael@0 1335 // destination into a 128 bit register.
michael@0 1336 "vmull.u8 q4, d4, d2 \n\t"
michael@0 1337 "vmull.u8 q5, d5, d2 \n\t"
michael@0 1338 "vmull.u8 q6, d6, d2 \n\t"
michael@0 1339 "vmull.u8 q7, d7, d2 \n\t"
michael@0 1340
michael@0 1341 // shift the 128 bit registers, containing the 16
michael@0 1342 // bit scaled values back to 8 bits, narrowing the
michael@0 1343 // results to 64 bit registers.
michael@0 1344 "vshrn.i16 d8, q4, #8 \n\t"
michael@0 1345 "vshrn.i16 d9, q5, #8 \n\t"
michael@0 1346 "vshrn.i16 d10, q6, #8 \n\t"
michael@0 1347 "vshrn.i16 d11, q7, #8 \n\t"
michael@0 1348
michael@0 1349 // adding back the color, using 128 bit registers.
michael@0 1350 "vadd.i8 q6, q4, q0 \n\t"
michael@0 1351 "vadd.i8 q7, q5, q0 \n\t"
michael@0 1352
michael@0 1353 // store back the 8 calculated pixels (2 128 bit
michael@0 1354 // registers), and increment dst.
michael@0 1355 "vst1.32 {d12-d15}, [%[dst]]! \n\t"
michael@0 1356
michael@0 1357 "subs %[count], %[count], #8 \n\t"
michael@0 1358 "bge Loop_Color32 \n\t"
michael@0 1359 : [src] "+r" (src), [dst] "+r" (dst), [count] "+r" (count)
michael@0 1360 : [color] "r" (color), [scale] "r" (scale)
michael@0 1361 : "cc", "memory",
michael@0 1362 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
michael@0 1363 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15"
michael@0 1364 );
michael@0 1365 // At this point, if we went through the inline assembly, count is
michael@0 1366 // a negative value:
michael@0 1367 // if the value is -8, there is no pixel left to process.
michael@0 1368 // if the value is -7, there is one pixel left to process
michael@0 1369 // ...
michael@0 1370 // And'ing it with 7 will give us the number of pixels
michael@0 1371 // left to process.
michael@0 1372 count = count & 0x7;
michael@0 1373 }
michael@0 1374
michael@0 1375 while (count > 0) {
michael@0 1376 *dst = color + SkAlphaMulQ(*src, scale);
michael@0 1377 src += 1;
michael@0 1378 dst += 1;
michael@0 1379 count--;
michael@0 1380 }
michael@0 1381 }
michael@0 1382 }
michael@0 1383
michael@0 1384 ///////////////////////////////////////////////////////////////////////////////
michael@0 1385
michael@0 1386 const SkBlitRow::Proc sk_blitrow_platform_565_procs_arm_neon[] = {
michael@0 1387 // no dither
michael@0 1388 // NOTE: For the S32_D565_Blend function below, we don't have a special
michael@0 1389 // version that assumes that each source pixel is opaque. But our
michael@0 1390 // S32A is still faster than the default, so use it.
michael@0 1391 S32_D565_Opaque_neon,
michael@0 1392 S32A_D565_Blend_neon, // really S32_D565_Blend
michael@0 1393 S32A_D565_Opaque_neon,
michael@0 1394 S32A_D565_Blend_neon,
michael@0 1395
michael@0 1396 // dither
michael@0 1397 S32_D565_Opaque_Dither_neon,
michael@0 1398 S32_D565_Blend_Dither_neon,
michael@0 1399 S32A_D565_Opaque_Dither_neon,
michael@0 1400 NULL, // S32A_D565_Blend_Dither
michael@0 1401 };
michael@0 1402
michael@0 1403 const SkBlitRow::Proc32 sk_blitrow_platform_32_procs_arm_neon[] = {
michael@0 1404 NULL, // S32_Opaque,
michael@0 1405 S32_Blend_BlitRow32_neon, // S32_Blend,
michael@0 1406 /*
michael@0 1407 * We have two choices for S32A_Opaque procs. The one reads the src alpha
michael@0 1408 * value and attempts to optimize accordingly. The optimization is
michael@0 1409 * sensitive to the source content and is not a win in all cases. For
michael@0 1410 * example, if there are a lot of transitions between the alpha states,
michael@0 1411 * the performance will almost certainly be worse. However, for many
michael@0 1412 * common cases the performance is equivalent or better than the standard
michael@0 1413 * case where we do not inspect the src alpha.
michael@0 1414 */
michael@0 1415 #if SK_A32_SHIFT == 24
michael@0 1416 // This proc assumes the alpha value occupies bits 24-32 of each SkPMColor
michael@0 1417 S32A_Opaque_BlitRow32_neon_src_alpha, // S32A_Opaque,
michael@0 1418 #else
michael@0 1419 S32A_Opaque_BlitRow32_neon, // S32A_Opaque,
michael@0 1420 #endif
michael@0 1421 S32A_Blend_BlitRow32_neon // S32A_Blend
michael@0 1422 };

mercurial