michael@0: /* michael@0: * Copyright 2012 The Android Open Source Project michael@0: * michael@0: * Use of this source code is governed by a BSD-style license that can be michael@0: * found in the LICENSE file. michael@0: */ michael@0: michael@0: #include "SkBlitRow_opts_arm_neon.h" michael@0: michael@0: #include "SkBlitMask.h" michael@0: #include "SkBlitRow.h" michael@0: #include "SkColorPriv.h" michael@0: #include "SkDither.h" michael@0: #include "SkMathPriv.h" michael@0: #include "SkUtils.h" michael@0: michael@0: #include "SkCachePreload_arm.h" michael@0: #include "SkColor_opts_neon.h" michael@0: #include michael@0: michael@0: void S32_D565_Opaque_neon(uint16_t* SK_RESTRICT dst, michael@0: const SkPMColor* SK_RESTRICT src, int count, michael@0: U8CPU alpha, int /*x*/, int /*y*/) { michael@0: SkASSERT(255 == alpha); michael@0: michael@0: while (count >= 8) { michael@0: uint8x8x4_t vsrc; michael@0: uint16x8_t vdst; michael@0: michael@0: // Load michael@0: vsrc = vld4_u8((uint8_t*)src); michael@0: michael@0: // Convert src to 565 michael@0: vdst = SkPixel32ToPixel16_neon8(vsrc); michael@0: michael@0: // Store michael@0: vst1q_u16(dst, vdst); michael@0: michael@0: // Prepare next iteration michael@0: dst += 8; michael@0: src += 8; michael@0: count -= 8; michael@0: }; michael@0: michael@0: // Leftovers michael@0: while (count > 0) { michael@0: SkPMColor c = *src++; michael@0: SkPMColorAssert(c); michael@0: *dst = SkPixel32ToPixel16_ToU16(c); michael@0: dst++; michael@0: count--; michael@0: }; michael@0: } michael@0: michael@0: void S32A_D565_Opaque_neon(uint16_t* SK_RESTRICT dst, michael@0: const SkPMColor* SK_RESTRICT src, int count, michael@0: U8CPU alpha, int /*x*/, int /*y*/) { michael@0: SkASSERT(255 == alpha); michael@0: michael@0: if (count >= 8) { michael@0: uint16_t* SK_RESTRICT keep_dst = 0; michael@0: michael@0: asm volatile ( michael@0: "ands ip, %[count], #7 \n\t" michael@0: "vmov.u8 d31, #1<<7 \n\t" michael@0: "vld1.16 {q12}, [%[dst]] \n\t" michael@0: "vld4.8 {d0-d3}, [%[src]] \n\t" michael@0: // Thumb does not support the standard ARM conditional michael@0: // instructions but instead requires the 'it' instruction michael@0: // to signal conditional execution michael@0: "it eq \n\t" michael@0: "moveq ip, #8 \n\t" michael@0: "mov %[keep_dst], %[dst] \n\t" michael@0: michael@0: "add %[src], %[src], ip, LSL#2 \n\t" michael@0: "add %[dst], %[dst], ip, LSL#1 \n\t" michael@0: "subs %[count], %[count], ip \n\t" michael@0: "b 9f \n\t" michael@0: // LOOP michael@0: "2: \n\t" michael@0: michael@0: "vld1.16 {q12}, [%[dst]]! \n\t" michael@0: "vld4.8 {d0-d3}, [%[src]]! \n\t" michael@0: "vst1.16 {q10}, [%[keep_dst]] \n\t" michael@0: "sub %[keep_dst], %[dst], #8*2 \n\t" michael@0: "subs %[count], %[count], #8 \n\t" michael@0: "9: \n\t" michael@0: "pld [%[dst],#32] \n\t" michael@0: // expand 0565 q12 to 8888 {d4-d7} michael@0: "vmovn.u16 d4, q12 \n\t" michael@0: "vshr.u16 q11, q12, #5 \n\t" michael@0: "vshr.u16 q10, q12, #6+5 \n\t" michael@0: "vmovn.u16 d5, q11 \n\t" michael@0: "vmovn.u16 d6, q10 \n\t" michael@0: "vshl.u8 d4, d4, #3 \n\t" michael@0: "vshl.u8 d5, d5, #2 \n\t" michael@0: "vshl.u8 d6, d6, #3 \n\t" michael@0: michael@0: "vmovl.u8 q14, d31 \n\t" michael@0: "vmovl.u8 q13, d31 \n\t" michael@0: "vmovl.u8 q12, d31 \n\t" michael@0: michael@0: // duplicate in 4/2/1 & 8pix vsns michael@0: "vmvn.8 d30, d3 \n\t" michael@0: "vmlal.u8 q14, d30, d6 \n\t" michael@0: "vmlal.u8 q13, d30, d5 \n\t" michael@0: "vmlal.u8 q12, d30, d4 \n\t" michael@0: "vshr.u16 q8, q14, #5 \n\t" michael@0: "vshr.u16 q9, q13, #6 \n\t" michael@0: "vaddhn.u16 d6, q14, q8 \n\t" michael@0: "vshr.u16 q8, q12, #5 \n\t" michael@0: "vaddhn.u16 d5, q13, q9 \n\t" michael@0: "vqadd.u8 d6, d6, d0 \n\t" // moved up michael@0: "vaddhn.u16 d4, q12, q8 \n\t" michael@0: // intentionally don't calculate alpha michael@0: // result in d4-d6 michael@0: michael@0: "vqadd.u8 d5, d5, d1 \n\t" michael@0: "vqadd.u8 d4, d4, d2 \n\t" michael@0: michael@0: // pack 8888 {d4-d6} to 0565 q10 michael@0: "vshll.u8 q10, d6, #8 \n\t" michael@0: "vshll.u8 q3, d5, #8 \n\t" michael@0: "vshll.u8 q2, d4, #8 \n\t" michael@0: "vsri.u16 q10, q3, #5 \n\t" michael@0: "vsri.u16 q10, q2, #11 \n\t" michael@0: michael@0: "bne 2b \n\t" michael@0: michael@0: "1: \n\t" michael@0: "vst1.16 {q10}, [%[keep_dst]] \n\t" michael@0: : [count] "+r" (count) michael@0: : [dst] "r" (dst), [keep_dst] "r" (keep_dst), [src] "r" (src) michael@0: : "ip", "cc", "memory", "d0","d1","d2","d3","d4","d5","d6","d7", michael@0: "d16","d17","d18","d19","d20","d21","d22","d23","d24","d25","d26","d27","d28","d29", michael@0: "d30","d31" michael@0: ); michael@0: } michael@0: else michael@0: { // handle count < 8 michael@0: uint16_t* SK_RESTRICT keep_dst = 0; michael@0: michael@0: asm volatile ( michael@0: "vmov.u8 d31, #1<<7 \n\t" michael@0: "mov %[keep_dst], %[dst] \n\t" michael@0: michael@0: "tst %[count], #4 \n\t" michael@0: "beq 14f \n\t" michael@0: "vld1.16 {d25}, [%[dst]]! \n\t" michael@0: "vld1.32 {q1}, [%[src]]! \n\t" michael@0: michael@0: "14: \n\t" michael@0: "tst %[count], #2 \n\t" michael@0: "beq 12f \n\t" michael@0: "vld1.32 {d24[1]}, [%[dst]]! \n\t" michael@0: "vld1.32 {d1}, [%[src]]! \n\t" michael@0: michael@0: "12: \n\t" michael@0: "tst %[count], #1 \n\t" michael@0: "beq 11f \n\t" michael@0: "vld1.16 {d24[1]}, [%[dst]]! \n\t" michael@0: "vld1.32 {d0[1]}, [%[src]]! \n\t" michael@0: michael@0: "11: \n\t" michael@0: // unzips achieve the same as a vld4 operation michael@0: "vuzpq.u16 q0, q1 \n\t" michael@0: "vuzp.u8 d0, d1 \n\t" michael@0: "vuzp.u8 d2, d3 \n\t" michael@0: // expand 0565 q12 to 8888 {d4-d7} michael@0: "vmovn.u16 d4, q12 \n\t" michael@0: "vshr.u16 q11, q12, #5 \n\t" michael@0: "vshr.u16 q10, q12, #6+5 \n\t" michael@0: "vmovn.u16 d5, q11 \n\t" michael@0: "vmovn.u16 d6, q10 \n\t" michael@0: "vshl.u8 d4, d4, #3 \n\t" michael@0: "vshl.u8 d5, d5, #2 \n\t" michael@0: "vshl.u8 d6, d6, #3 \n\t" michael@0: michael@0: "vmovl.u8 q14, d31 \n\t" michael@0: "vmovl.u8 q13, d31 \n\t" michael@0: "vmovl.u8 q12, d31 \n\t" michael@0: michael@0: // duplicate in 4/2/1 & 8pix vsns michael@0: "vmvn.8 d30, d3 \n\t" michael@0: "vmlal.u8 q14, d30, d6 \n\t" michael@0: "vmlal.u8 q13, d30, d5 \n\t" michael@0: "vmlal.u8 q12, d30, d4 \n\t" michael@0: "vshr.u16 q8, q14, #5 \n\t" michael@0: "vshr.u16 q9, q13, #6 \n\t" michael@0: "vaddhn.u16 d6, q14, q8 \n\t" michael@0: "vshr.u16 q8, q12, #5 \n\t" michael@0: "vaddhn.u16 d5, q13, q9 \n\t" michael@0: "vqadd.u8 d6, d6, d0 \n\t" // moved up michael@0: "vaddhn.u16 d4, q12, q8 \n\t" michael@0: // intentionally don't calculate alpha michael@0: // result in d4-d6 michael@0: michael@0: "vqadd.u8 d5, d5, d1 \n\t" michael@0: "vqadd.u8 d4, d4, d2 \n\t" michael@0: michael@0: // pack 8888 {d4-d6} to 0565 q10 michael@0: "vshll.u8 q10, d6, #8 \n\t" michael@0: "vshll.u8 q3, d5, #8 \n\t" michael@0: "vshll.u8 q2, d4, #8 \n\t" michael@0: "vsri.u16 q10, q3, #5 \n\t" michael@0: "vsri.u16 q10, q2, #11 \n\t" michael@0: michael@0: // store michael@0: "tst %[count], #4 \n\t" michael@0: "beq 24f \n\t" michael@0: "vst1.16 {d21}, [%[keep_dst]]! \n\t" michael@0: michael@0: "24: \n\t" michael@0: "tst %[count], #2 \n\t" michael@0: "beq 22f \n\t" michael@0: "vst1.32 {d20[1]}, [%[keep_dst]]! \n\t" michael@0: michael@0: "22: \n\t" michael@0: "tst %[count], #1 \n\t" michael@0: "beq 21f \n\t" michael@0: "vst1.16 {d20[1]}, [%[keep_dst]]! \n\t" michael@0: michael@0: "21: \n\t" michael@0: : [count] "+r" (count) michael@0: : [dst] "r" (dst), [keep_dst] "r" (keep_dst), [src] "r" (src) michael@0: : "ip", "cc", "memory", "d0","d1","d2","d3","d4","d5","d6","d7", michael@0: "d16","d17","d18","d19","d20","d21","d22","d23","d24","d25","d26","d27","d28","d29", michael@0: "d30","d31" michael@0: ); michael@0: } michael@0: } michael@0: michael@0: static inline uint16x8_t SkDiv255Round_neon8(uint16x8_t prod) { michael@0: prod += vdupq_n_u16(128); michael@0: prod += vshrq_n_u16(prod, 8); michael@0: return vshrq_n_u16(prod, 8); michael@0: } michael@0: michael@0: void S32A_D565_Blend_neon(uint16_t* SK_RESTRICT dst, michael@0: const SkPMColor* SK_RESTRICT src, int count, michael@0: U8CPU alpha, int /*x*/, int /*y*/) { michael@0: SkASSERT(255 > alpha); michael@0: michael@0: /* This code implements a Neon version of S32A_D565_Blend. The results have michael@0: * a few mismatches compared to the original code. These mismatches never michael@0: * exceed 1. michael@0: */ michael@0: michael@0: if (count >= 8) { michael@0: uint16x8_t valpha_max, vmask_blue; michael@0: uint8x8_t valpha; michael@0: michael@0: // prepare constants michael@0: valpha_max = vmovq_n_u16(255); michael@0: valpha = vdup_n_u8(alpha); michael@0: vmask_blue = vmovq_n_u16(SK_B16_MASK); michael@0: michael@0: do { michael@0: uint16x8_t vdst, vdst_r, vdst_g, vdst_b; michael@0: uint16x8_t vres_a, vres_r, vres_g, vres_b; michael@0: uint8x8x4_t vsrc; michael@0: michael@0: // load pixels michael@0: vdst = vld1q_u16(dst); michael@0: #if (__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 6)) michael@0: asm ( michael@0: "vld4.u8 %h[vsrc], [%[src]]!" michael@0: : [vsrc] "=w" (vsrc), [src] "+&r" (src) michael@0: : : michael@0: ); michael@0: #else michael@0: register uint8x8_t d0 asm("d0"); michael@0: register uint8x8_t d1 asm("d1"); michael@0: register uint8x8_t d2 asm("d2"); michael@0: register uint8x8_t d3 asm("d3"); michael@0: michael@0: asm volatile ( michael@0: "vld4.u8 {d0-d3},[%[src]]!;" michael@0: : "=w" (d0), "=w" (d1), "=w" (d2), "=w" (d3), michael@0: [src] "+&r" (src) michael@0: : : michael@0: ); michael@0: vsrc.val[0] = d0; michael@0: vsrc.val[1] = d1; michael@0: vsrc.val[2] = d2; michael@0: vsrc.val[3] = d3; michael@0: #endif michael@0: michael@0: michael@0: // deinterleave dst michael@0: vdst_g = vshlq_n_u16(vdst, SK_R16_BITS); // shift green to top of lanes michael@0: vdst_b = vdst & vmask_blue; // extract blue michael@0: vdst_r = vshrq_n_u16(vdst, SK_R16_SHIFT); // extract red michael@0: vdst_g = vshrq_n_u16(vdst_g, SK_R16_BITS + SK_B16_BITS); // extract green michael@0: michael@0: // shift src to 565 michael@0: vsrc.val[NEON_R] = vshr_n_u8(vsrc.val[NEON_R], 8 - SK_R16_BITS); michael@0: vsrc.val[NEON_G] = vshr_n_u8(vsrc.val[NEON_G], 8 - SK_G16_BITS); michael@0: vsrc.val[NEON_B] = vshr_n_u8(vsrc.val[NEON_B], 8 - SK_B16_BITS); michael@0: michael@0: // calc src * src_scale michael@0: vres_a = vmull_u8(vsrc.val[NEON_A], valpha); michael@0: vres_r = vmull_u8(vsrc.val[NEON_R], valpha); michael@0: vres_g = vmull_u8(vsrc.val[NEON_G], valpha); michael@0: vres_b = vmull_u8(vsrc.val[NEON_B], valpha); michael@0: michael@0: // prepare dst_scale michael@0: vres_a = SkDiv255Round_neon8(vres_a); michael@0: vres_a = valpha_max - vres_a; // 255 - (sa * src_scale) / 255 michael@0: michael@0: // add dst * dst_scale to previous result michael@0: vres_r = vmlaq_u16(vres_r, vdst_r, vres_a); michael@0: vres_g = vmlaq_u16(vres_g, vdst_g, vres_a); michael@0: vres_b = vmlaq_u16(vres_b, vdst_b, vres_a); michael@0: michael@0: #ifdef S32A_D565_BLEND_EXACT michael@0: // It is possible to get exact results with this but it is slow, michael@0: // even slower than C code in some cases michael@0: vres_r = SkDiv255Round_neon8(vres_r); michael@0: vres_g = SkDiv255Round_neon8(vres_g); michael@0: vres_b = SkDiv255Round_neon8(vres_b); michael@0: #else michael@0: vres_r = vrshrq_n_u16(vres_r, 8); michael@0: vres_g = vrshrq_n_u16(vres_g, 8); michael@0: vres_b = vrshrq_n_u16(vres_b, 8); michael@0: #endif michael@0: // pack result michael@0: vres_b = vsliq_n_u16(vres_b, vres_g, SK_G16_SHIFT); // insert green into blue michael@0: vres_b = vsliq_n_u16(vres_b, vres_r, SK_R16_SHIFT); // insert red into green/blue michael@0: michael@0: // store michael@0: vst1q_u16(dst, vres_b); michael@0: dst += 8; michael@0: count -= 8; michael@0: } while (count >= 8); michael@0: } michael@0: michael@0: // leftovers michael@0: while (count-- > 0) { michael@0: SkPMColor sc = *src++; michael@0: if (sc) { michael@0: uint16_t dc = *dst; michael@0: unsigned dst_scale = 255 - SkMulDiv255Round(SkGetPackedA32(sc), alpha); michael@0: unsigned dr = SkMulS16(SkPacked32ToR16(sc), alpha) + SkMulS16(SkGetPackedR16(dc), dst_scale); michael@0: unsigned dg = SkMulS16(SkPacked32ToG16(sc), alpha) + SkMulS16(SkGetPackedG16(dc), dst_scale); michael@0: unsigned db = SkMulS16(SkPacked32ToB16(sc), alpha) + SkMulS16(SkGetPackedB16(dc), dst_scale); michael@0: *dst = SkPackRGB16(SkDiv255Round(dr), SkDiv255Round(dg), SkDiv255Round(db)); michael@0: } michael@0: dst += 1; michael@0: } michael@0: } michael@0: michael@0: /* dither matrix for Neon, derived from gDitherMatrix_3Bit_16. michael@0: * each dither value is spaced out into byte lanes, and repeated michael@0: * to allow an 8-byte load from offsets 0, 1, 2 or 3 from the michael@0: * start of each row. michael@0: */ michael@0: static const uint8_t gDitherMatrix_Neon[48] = { michael@0: 0, 4, 1, 5, 0, 4, 1, 5, 0, 4, 1, 5, michael@0: 6, 2, 7, 3, 6, 2, 7, 3, 6, 2, 7, 3, michael@0: 1, 5, 0, 4, 1, 5, 0, 4, 1, 5, 0, 4, michael@0: 7, 3, 6, 2, 7, 3, 6, 2, 7, 3, 6, 2, michael@0: michael@0: }; michael@0: michael@0: void S32_D565_Blend_Dither_neon(uint16_t *dst, const SkPMColor *src, michael@0: int count, U8CPU alpha, int x, int y) michael@0: { michael@0: michael@0: SkASSERT(255 > alpha); michael@0: michael@0: // rescale alpha to range 1 - 256 michael@0: int scale = SkAlpha255To256(alpha); michael@0: michael@0: if (count >= 8) { michael@0: /* select row and offset for dither array */ michael@0: const uint8_t *dstart = &gDitherMatrix_Neon[(y&3)*12 + (x&3)]; michael@0: michael@0: uint8x8_t vdither = vld1_u8(dstart); // load dither values michael@0: uint8x8_t vdither_g = vshr_n_u8(vdither, 1); // calc. green dither values michael@0: michael@0: int16x8_t vscale = vdupq_n_s16(scale); // duplicate scale into neon reg michael@0: uint16x8_t vmask_b = vdupq_n_u16(0x1F); // set up blue mask michael@0: michael@0: do { michael@0: michael@0: uint8x8_t vsrc_r, vsrc_g, vsrc_b; michael@0: uint8x8_t vsrc565_r, vsrc565_g, vsrc565_b; michael@0: uint16x8_t vsrc_dit_r, vsrc_dit_g, vsrc_dit_b; michael@0: uint16x8_t vsrc_res_r, vsrc_res_g, vsrc_res_b; michael@0: uint16x8_t vdst; michael@0: uint16x8_t vdst_r, vdst_g, vdst_b; michael@0: int16x8_t vres_r, vres_g, vres_b; michael@0: int8x8_t vres8_r, vres8_g, vres8_b; michael@0: michael@0: // Load source and add dither michael@0: { michael@0: register uint8x8_t d0 asm("d0"); michael@0: register uint8x8_t d1 asm("d1"); michael@0: register uint8x8_t d2 asm("d2"); michael@0: register uint8x8_t d3 asm("d3"); michael@0: michael@0: asm ( michael@0: "vld4.8 {d0-d3},[%[src]]! /* r=%P0 g=%P1 b=%P2 a=%P3 */" michael@0: : "=w" (d0), "=w" (d1), "=w" (d2), "=w" (d3), [src] "+&r" (src) michael@0: : michael@0: ); michael@0: vsrc_g = d1; michael@0: #if SK_PMCOLOR_BYTE_ORDER(B,G,R,A) michael@0: vsrc_r = d2; vsrc_b = d0; michael@0: #elif SK_PMCOLOR_BYTE_ORDER(R,G,B,A) michael@0: vsrc_r = d0; vsrc_b = d2; michael@0: #endif michael@0: } michael@0: michael@0: vsrc565_g = vshr_n_u8(vsrc_g, 6); // calc. green >> 6 michael@0: vsrc565_r = vshr_n_u8(vsrc_r, 5); // calc. red >> 5 michael@0: vsrc565_b = vshr_n_u8(vsrc_b, 5); // calc. blue >> 5 michael@0: michael@0: vsrc_dit_g = vaddl_u8(vsrc_g, vdither_g); // add in dither to green and widen michael@0: vsrc_dit_r = vaddl_u8(vsrc_r, vdither); // add in dither to red and widen michael@0: vsrc_dit_b = vaddl_u8(vsrc_b, vdither); // add in dither to blue and widen michael@0: michael@0: vsrc_dit_r = vsubw_u8(vsrc_dit_r, vsrc565_r); // sub shifted red from result michael@0: vsrc_dit_g = vsubw_u8(vsrc_dit_g, vsrc565_g); // sub shifted green from result michael@0: vsrc_dit_b = vsubw_u8(vsrc_dit_b, vsrc565_b); // sub shifted blue from result michael@0: michael@0: vsrc_res_r = vshrq_n_u16(vsrc_dit_r, 3); michael@0: vsrc_res_g = vshrq_n_u16(vsrc_dit_g, 2); michael@0: vsrc_res_b = vshrq_n_u16(vsrc_dit_b, 3); michael@0: michael@0: // Load dst and unpack michael@0: vdst = vld1q_u16(dst); michael@0: vdst_g = vshrq_n_u16(vdst, 5); // shift down to get green michael@0: vdst_r = vshrq_n_u16(vshlq_n_u16(vdst, 5), 5+5); // double shift to extract red michael@0: vdst_b = vandq_u16(vdst, vmask_b); // mask to get blue michael@0: michael@0: // subtract dst from src and widen michael@0: vres_r = vsubq_s16(vreinterpretq_s16_u16(vsrc_res_r), vreinterpretq_s16_u16(vdst_r)); michael@0: vres_g = vsubq_s16(vreinterpretq_s16_u16(vsrc_res_g), vreinterpretq_s16_u16(vdst_g)); michael@0: vres_b = vsubq_s16(vreinterpretq_s16_u16(vsrc_res_b), vreinterpretq_s16_u16(vdst_b)); michael@0: michael@0: // multiply diffs by scale and shift michael@0: vres_r = vmulq_s16(vres_r, vscale); michael@0: vres_g = vmulq_s16(vres_g, vscale); michael@0: vres_b = vmulq_s16(vres_b, vscale); michael@0: michael@0: vres8_r = vshrn_n_s16(vres_r, 8); michael@0: vres8_g = vshrn_n_s16(vres_g, 8); michael@0: vres8_b = vshrn_n_s16(vres_b, 8); michael@0: michael@0: // add dst to result michael@0: vres_r = vaddw_s8(vreinterpretq_s16_u16(vdst_r), vres8_r); michael@0: vres_g = vaddw_s8(vreinterpretq_s16_u16(vdst_g), vres8_g); michael@0: vres_b = vaddw_s8(vreinterpretq_s16_u16(vdst_b), vres8_b); michael@0: michael@0: // put result into 565 format michael@0: vres_b = vsliq_n_s16(vres_b, vres_g, 5); // shift up green and insert into blue michael@0: vres_b = vsliq_n_s16(vres_b, vres_r, 6+5); // shift up red and insert into blue michael@0: michael@0: // Store result michael@0: vst1q_u16(dst, vreinterpretq_u16_s16(vres_b)); michael@0: michael@0: // Next iteration michael@0: dst += 8; michael@0: count -= 8; michael@0: michael@0: } while (count >= 8); michael@0: } michael@0: michael@0: // Leftovers michael@0: if (count > 0) { michael@0: int scale = SkAlpha255To256(alpha); michael@0: DITHER_565_SCAN(y); michael@0: do { michael@0: SkPMColor c = *src++; michael@0: SkPMColorAssert(c); michael@0: michael@0: int dither = DITHER_VALUE(x); michael@0: int sr = SkGetPackedR32(c); michael@0: int sg = SkGetPackedG32(c); michael@0: int sb = SkGetPackedB32(c); michael@0: sr = SkDITHER_R32To565(sr, dither); michael@0: sg = SkDITHER_G32To565(sg, dither); michael@0: sb = SkDITHER_B32To565(sb, dither); michael@0: michael@0: uint16_t d = *dst; michael@0: *dst++ = SkPackRGB16(SkAlphaBlend(sr, SkGetPackedR16(d), scale), michael@0: SkAlphaBlend(sg, SkGetPackedG16(d), scale), michael@0: SkAlphaBlend(sb, SkGetPackedB16(d), scale)); michael@0: DITHER_INC_X(x); michael@0: } while (--count != 0); michael@0: } michael@0: } michael@0: michael@0: void S32A_Opaque_BlitRow32_neon(SkPMColor* SK_RESTRICT dst, michael@0: const SkPMColor* SK_RESTRICT src, michael@0: int count, U8CPU alpha) { michael@0: michael@0: SkASSERT(255 == alpha); michael@0: if (count > 0) { michael@0: michael@0: michael@0: uint8x8_t alpha_mask; michael@0: michael@0: static const uint8_t alpha_mask_setup[] = {3,3,3,3,7,7,7,7}; michael@0: alpha_mask = vld1_u8(alpha_mask_setup); michael@0: michael@0: /* do the NEON unrolled code */ michael@0: #define UNROLL 4 michael@0: while (count >= UNROLL) { michael@0: uint8x8_t src_raw, dst_raw, dst_final; michael@0: uint8x8_t src_raw_2, dst_raw_2, dst_final_2; michael@0: michael@0: /* The two prefetches below may make the code slighlty michael@0: * slower for small values of count but are worth having michael@0: * in the general case. michael@0: */ michael@0: __builtin_prefetch(src+32); michael@0: __builtin_prefetch(dst+32); michael@0: michael@0: /* get the source */ michael@0: src_raw = vreinterpret_u8_u32(vld1_u32(src)); michael@0: #if UNROLL > 2 michael@0: src_raw_2 = vreinterpret_u8_u32(vld1_u32(src+2)); michael@0: #endif michael@0: michael@0: /* get and hold the dst too */ michael@0: dst_raw = vreinterpret_u8_u32(vld1_u32(dst)); michael@0: #if UNROLL > 2 michael@0: dst_raw_2 = vreinterpret_u8_u32(vld1_u32(dst+2)); michael@0: #endif michael@0: michael@0: /* 1st and 2nd bits of the unrolling */ michael@0: { michael@0: uint8x8_t dst_cooked; michael@0: uint16x8_t dst_wide; michael@0: uint8x8_t alpha_narrow; michael@0: uint16x8_t alpha_wide; michael@0: michael@0: /* get the alphas spread out properly */ michael@0: alpha_narrow = vtbl1_u8(src_raw, alpha_mask); michael@0: alpha_wide = vsubw_u8(vdupq_n_u16(256), alpha_narrow); michael@0: michael@0: /* spread the dest */ michael@0: dst_wide = vmovl_u8(dst_raw); michael@0: michael@0: /* alpha mul the dest */ michael@0: dst_wide = vmulq_u16 (dst_wide, alpha_wide); michael@0: dst_cooked = vshrn_n_u16(dst_wide, 8); michael@0: michael@0: /* sum -- ignoring any byte lane overflows */ michael@0: dst_final = vadd_u8(src_raw, dst_cooked); michael@0: } michael@0: michael@0: #if UNROLL > 2 michael@0: /* the 3rd and 4th bits of our unrolling */ michael@0: { michael@0: uint8x8_t dst_cooked; michael@0: uint16x8_t dst_wide; michael@0: uint8x8_t alpha_narrow; michael@0: uint16x8_t alpha_wide; michael@0: michael@0: alpha_narrow = vtbl1_u8(src_raw_2, alpha_mask); michael@0: alpha_wide = vsubw_u8(vdupq_n_u16(256), alpha_narrow); michael@0: michael@0: /* spread the dest */ michael@0: dst_wide = vmovl_u8(dst_raw_2); michael@0: michael@0: /* alpha mul the dest */ michael@0: dst_wide = vmulq_u16 (dst_wide, alpha_wide); michael@0: dst_cooked = vshrn_n_u16(dst_wide, 8); michael@0: michael@0: /* sum -- ignoring any byte lane overflows */ michael@0: dst_final_2 = vadd_u8(src_raw_2, dst_cooked); michael@0: } michael@0: #endif michael@0: michael@0: vst1_u32(dst, vreinterpret_u32_u8(dst_final)); michael@0: #if UNROLL > 2 michael@0: vst1_u32(dst+2, vreinterpret_u32_u8(dst_final_2)); michael@0: #endif michael@0: michael@0: src += UNROLL; michael@0: dst += UNROLL; michael@0: count -= UNROLL; michael@0: } michael@0: #undef UNROLL michael@0: michael@0: /* do any residual iterations */ michael@0: while (--count >= 0) { michael@0: *dst = SkPMSrcOver(*src, *dst); michael@0: src += 1; michael@0: dst += 1; michael@0: } michael@0: } michael@0: } michael@0: michael@0: void S32A_Opaque_BlitRow32_neon_src_alpha(SkPMColor* SK_RESTRICT dst, michael@0: const SkPMColor* SK_RESTRICT src, michael@0: int count, U8CPU alpha) { michael@0: SkASSERT(255 == alpha); michael@0: michael@0: if (count <= 0) michael@0: return; michael@0: michael@0: /* Use these to check if src is transparent or opaque */ michael@0: const unsigned int ALPHA_OPAQ = 0xFF000000; michael@0: const unsigned int ALPHA_TRANS = 0x00FFFFFF; michael@0: michael@0: #define UNROLL 4 michael@0: const SkPMColor* SK_RESTRICT src_end = src + count - (UNROLL + 1); michael@0: const SkPMColor* SK_RESTRICT src_temp = src; michael@0: michael@0: /* set up the NEON variables */ michael@0: uint8x8_t alpha_mask; michael@0: static const uint8_t alpha_mask_setup[] = {3,3,3,3,7,7,7,7}; michael@0: alpha_mask = vld1_u8(alpha_mask_setup); michael@0: michael@0: uint8x8_t src_raw, dst_raw, dst_final; michael@0: uint8x8_t src_raw_2, dst_raw_2, dst_final_2; michael@0: uint8x8_t dst_cooked; michael@0: uint16x8_t dst_wide; michael@0: uint8x8_t alpha_narrow; michael@0: uint16x8_t alpha_wide; michael@0: michael@0: /* choose the first processing type */ michael@0: if( src >= src_end) michael@0: goto TAIL; michael@0: if(*src <= ALPHA_TRANS) michael@0: goto ALPHA_0; michael@0: if(*src >= ALPHA_OPAQ) michael@0: goto ALPHA_255; michael@0: /* fall-thru */ michael@0: michael@0: ALPHA_1_TO_254: michael@0: do { michael@0: michael@0: /* get the source */ michael@0: src_raw = vreinterpret_u8_u32(vld1_u32(src)); michael@0: src_raw_2 = vreinterpret_u8_u32(vld1_u32(src+2)); michael@0: michael@0: /* get and hold the dst too */ michael@0: dst_raw = vreinterpret_u8_u32(vld1_u32(dst)); michael@0: dst_raw_2 = vreinterpret_u8_u32(vld1_u32(dst+2)); michael@0: michael@0: michael@0: /* get the alphas spread out properly */ michael@0: alpha_narrow = vtbl1_u8(src_raw, alpha_mask); michael@0: /* reflect SkAlpha255To256() semantics a+1 vs a+a>>7 */ michael@0: /* we collapsed (255-a)+1 ... */ michael@0: alpha_wide = vsubw_u8(vdupq_n_u16(256), alpha_narrow); michael@0: michael@0: /* spread the dest */ michael@0: dst_wide = vmovl_u8(dst_raw); michael@0: michael@0: /* alpha mul the dest */ michael@0: dst_wide = vmulq_u16 (dst_wide, alpha_wide); michael@0: dst_cooked = vshrn_n_u16(dst_wide, 8); michael@0: michael@0: /* sum -- ignoring any byte lane overflows */ michael@0: dst_final = vadd_u8(src_raw, dst_cooked); michael@0: michael@0: alpha_narrow = vtbl1_u8(src_raw_2, alpha_mask); michael@0: /* reflect SkAlpha255To256() semantics a+1 vs a+a>>7 */ michael@0: /* we collapsed (255-a)+1 ... */ michael@0: alpha_wide = vsubw_u8(vdupq_n_u16(256), alpha_narrow); michael@0: michael@0: /* spread the dest */ michael@0: dst_wide = vmovl_u8(dst_raw_2); michael@0: michael@0: /* alpha mul the dest */ michael@0: dst_wide = vmulq_u16 (dst_wide, alpha_wide); michael@0: dst_cooked = vshrn_n_u16(dst_wide, 8); michael@0: michael@0: /* sum -- ignoring any byte lane overflows */ michael@0: dst_final_2 = vadd_u8(src_raw_2, dst_cooked); michael@0: michael@0: vst1_u32(dst, vreinterpret_u32_u8(dst_final)); michael@0: vst1_u32(dst+2, vreinterpret_u32_u8(dst_final_2)); michael@0: michael@0: src += UNROLL; michael@0: dst += UNROLL; michael@0: michael@0: /* if 2 of the next pixels aren't between 1 and 254 michael@0: it might make sense to go to the optimized loops */ michael@0: if((src[0] <= ALPHA_TRANS && src[1] <= ALPHA_TRANS) || (src[0] >= ALPHA_OPAQ && src[1] >= ALPHA_OPAQ)) michael@0: break; michael@0: michael@0: } while(src < src_end); michael@0: michael@0: if (src >= src_end) michael@0: goto TAIL; michael@0: michael@0: if(src[0] >= ALPHA_OPAQ && src[1] >= ALPHA_OPAQ) michael@0: goto ALPHA_255; michael@0: michael@0: /*fall-thru*/ michael@0: michael@0: ALPHA_0: michael@0: michael@0: /*In this state, we know the current alpha is 0 and michael@0: we optimize for the next alpha also being zero. */ michael@0: src_temp = src; //so we don't have to increment dst every time michael@0: do { michael@0: if(*(++src) > ALPHA_TRANS) michael@0: break; michael@0: if(*(++src) > ALPHA_TRANS) michael@0: break; michael@0: if(*(++src) > ALPHA_TRANS) michael@0: break; michael@0: if(*(++src) > ALPHA_TRANS) michael@0: break; michael@0: } while(src < src_end); michael@0: michael@0: dst += (src - src_temp); michael@0: michael@0: /* no longer alpha 0, so determine where to go next. */ michael@0: if( src >= src_end) michael@0: goto TAIL; michael@0: if(*src >= ALPHA_OPAQ) michael@0: goto ALPHA_255; michael@0: else michael@0: goto ALPHA_1_TO_254; michael@0: michael@0: ALPHA_255: michael@0: while((src[0] & src[1] & src[2] & src[3]) >= ALPHA_OPAQ) { michael@0: dst[0]=src[0]; michael@0: dst[1]=src[1]; michael@0: dst[2]=src[2]; michael@0: dst[3]=src[3]; michael@0: src+=UNROLL; michael@0: dst+=UNROLL; michael@0: if(src >= src_end) michael@0: goto TAIL; michael@0: } michael@0: michael@0: //Handle remainder. michael@0: if(*src >= ALPHA_OPAQ) { *dst++ = *src++; michael@0: if(*src >= ALPHA_OPAQ) { *dst++ = *src++; michael@0: if(*src >= ALPHA_OPAQ) { *dst++ = *src++; } michael@0: } michael@0: } michael@0: michael@0: if( src >= src_end) michael@0: goto TAIL; michael@0: if(*src <= ALPHA_TRANS) michael@0: goto ALPHA_0; michael@0: else michael@0: goto ALPHA_1_TO_254; michael@0: michael@0: TAIL: michael@0: /* do any residual iterations */ michael@0: src_end += UNROLL + 1; //goto the real end michael@0: while(src != src_end) { michael@0: if( *src != 0 ) { michael@0: if( *src >= ALPHA_OPAQ ) { michael@0: *dst = *src; michael@0: } michael@0: else { michael@0: *dst = SkPMSrcOver(*src, *dst); michael@0: } michael@0: } michael@0: src++; michael@0: dst++; michael@0: } michael@0: michael@0: #undef UNROLL michael@0: return; michael@0: } michael@0: michael@0: /* Neon version of S32_Blend_BlitRow32() michael@0: * portable version is in src/core/SkBlitRow_D32.cpp michael@0: */ michael@0: void S32_Blend_BlitRow32_neon(SkPMColor* SK_RESTRICT dst, michael@0: const SkPMColor* SK_RESTRICT src, michael@0: int count, U8CPU alpha) { michael@0: SkASSERT(alpha <= 255); michael@0: michael@0: if (count <= 0) { michael@0: return; michael@0: } michael@0: michael@0: uint16_t src_scale = SkAlpha255To256(alpha); michael@0: uint16_t dst_scale = 256 - src_scale; michael@0: michael@0: while (count >= 2) { michael@0: uint8x8_t vsrc, vdst, vres; michael@0: uint16x8_t vsrc_wide, vdst_wide; michael@0: michael@0: /* These commented prefetches are a big win for count michael@0: * values > 64 on an A9 (Pandaboard) but hurt by 10% for count = 4. michael@0: * They also hurt a little (<5%) on an A15 michael@0: */ michael@0: //__builtin_prefetch(src+32); michael@0: //__builtin_prefetch(dst+32); michael@0: michael@0: // Load michael@0: vsrc = vreinterpret_u8_u32(vld1_u32(src)); michael@0: vdst = vreinterpret_u8_u32(vld1_u32(dst)); michael@0: michael@0: // Process src michael@0: vsrc_wide = vmovl_u8(vsrc); michael@0: vsrc_wide = vmulq_u16(vsrc_wide, vdupq_n_u16(src_scale)); michael@0: michael@0: // Process dst michael@0: vdst_wide = vmull_u8(vdst, vdup_n_u8(dst_scale)); michael@0: michael@0: // Combine michael@0: vres = vshrn_n_u16(vdst_wide, 8) + vshrn_n_u16(vsrc_wide, 8); michael@0: michael@0: // Store michael@0: vst1_u32(dst, vreinterpret_u32_u8(vres)); michael@0: michael@0: src += 2; michael@0: dst += 2; michael@0: count -= 2; michael@0: } michael@0: michael@0: if (count == 1) { michael@0: uint8x8_t vsrc = vdup_n_u8(0), vdst = vdup_n_u8(0), vres; michael@0: uint16x8_t vsrc_wide, vdst_wide; michael@0: michael@0: // Load michael@0: vsrc = vreinterpret_u8_u32(vld1_lane_u32(src, vreinterpret_u32_u8(vsrc), 0)); michael@0: vdst = vreinterpret_u8_u32(vld1_lane_u32(dst, vreinterpret_u32_u8(vdst), 0)); michael@0: michael@0: // Process michael@0: vsrc_wide = vmovl_u8(vsrc); michael@0: vsrc_wide = vmulq_u16(vsrc_wide, vdupq_n_u16(src_scale)); michael@0: vdst_wide = vmull_u8(vdst, vdup_n_u8(dst_scale)); michael@0: vres = vshrn_n_u16(vdst_wide, 8) + vshrn_n_u16(vsrc_wide, 8); michael@0: michael@0: // Store michael@0: vst1_lane_u32(dst, vreinterpret_u32_u8(vres), 0); michael@0: } michael@0: } michael@0: michael@0: void S32A_Blend_BlitRow32_neon(SkPMColor* SK_RESTRICT dst, michael@0: const SkPMColor* SK_RESTRICT src, michael@0: int count, U8CPU alpha) { michael@0: michael@0: SkASSERT(255 >= alpha); michael@0: michael@0: if (count <= 0) { michael@0: return; michael@0: } michael@0: michael@0: unsigned alpha256 = SkAlpha255To256(alpha); michael@0: michael@0: // First deal with odd counts michael@0: if (count & 1) { michael@0: uint8x8_t vsrc = vdup_n_u8(0), vdst = vdup_n_u8(0), vres; michael@0: uint16x8_t vdst_wide, vsrc_wide; michael@0: unsigned dst_scale; michael@0: michael@0: // Load michael@0: vsrc = vreinterpret_u8_u32(vld1_lane_u32(src, vreinterpret_u32_u8(vsrc), 0)); michael@0: vdst = vreinterpret_u8_u32(vld1_lane_u32(dst, vreinterpret_u32_u8(vdst), 0)); michael@0: michael@0: // Calc dst_scale michael@0: dst_scale = vget_lane_u8(vsrc, 3); michael@0: dst_scale *= alpha256; michael@0: dst_scale >>= 8; michael@0: dst_scale = 256 - dst_scale; michael@0: michael@0: // Process src michael@0: vsrc_wide = vmovl_u8(vsrc); michael@0: vsrc_wide = vmulq_n_u16(vsrc_wide, alpha256); michael@0: michael@0: // Process dst michael@0: vdst_wide = vmovl_u8(vdst); michael@0: vdst_wide = vmulq_n_u16(vdst_wide, dst_scale); michael@0: michael@0: // Combine michael@0: vres = vshrn_n_u16(vdst_wide, 8) + vshrn_n_u16(vsrc_wide, 8); michael@0: michael@0: vst1_lane_u32(dst, vreinterpret_u32_u8(vres), 0); michael@0: dst++; michael@0: src++; michael@0: count--; michael@0: } michael@0: michael@0: if (count) { michael@0: uint8x8_t alpha_mask; michael@0: static const uint8_t alpha_mask_setup[] = {3,3,3,3,7,7,7,7}; michael@0: alpha_mask = vld1_u8(alpha_mask_setup); michael@0: michael@0: do { michael@0: michael@0: uint8x8_t vsrc, vdst, vres, vsrc_alphas; michael@0: uint16x8_t vdst_wide, vsrc_wide, vsrc_scale, vdst_scale; michael@0: michael@0: __builtin_prefetch(src+32); michael@0: __builtin_prefetch(dst+32); michael@0: michael@0: // Load michael@0: vsrc = vreinterpret_u8_u32(vld1_u32(src)); michael@0: vdst = vreinterpret_u8_u32(vld1_u32(dst)); michael@0: michael@0: // Prepare src_scale michael@0: vsrc_scale = vdupq_n_u16(alpha256); michael@0: michael@0: // Calc dst_scale michael@0: vsrc_alphas = vtbl1_u8(vsrc, alpha_mask); michael@0: vdst_scale = vmovl_u8(vsrc_alphas); michael@0: vdst_scale *= vsrc_scale; michael@0: vdst_scale = vshrq_n_u16(vdst_scale, 8); michael@0: vdst_scale = vsubq_u16(vdupq_n_u16(256), vdst_scale); michael@0: michael@0: // Process src michael@0: vsrc_wide = vmovl_u8(vsrc); michael@0: vsrc_wide *= vsrc_scale; michael@0: michael@0: // Process dst michael@0: vdst_wide = vmovl_u8(vdst); michael@0: vdst_wide *= vdst_scale; michael@0: michael@0: // Combine michael@0: vres = vshrn_n_u16(vdst_wide, 8) + vshrn_n_u16(vsrc_wide, 8); michael@0: michael@0: vst1_u32(dst, vreinterpret_u32_u8(vres)); michael@0: michael@0: src += 2; michael@0: dst += 2; michael@0: count -= 2; michael@0: } while(count); michael@0: } michael@0: } michael@0: michael@0: /////////////////////////////////////////////////////////////////////////////// michael@0: michael@0: #undef DEBUG_OPAQUE_DITHER michael@0: michael@0: #if defined(DEBUG_OPAQUE_DITHER) michael@0: static void showme8(char *str, void *p, int len) michael@0: { michael@0: static char buf[256]; michael@0: char tbuf[32]; michael@0: int i; michael@0: char *pc = (char*) p; michael@0: sprintf(buf,"%8s:", str); michael@0: for(i=0;i= UNROLL) { michael@0: michael@0: #if defined(DEBUG_OPAQUE_DITHER) michael@0: uint16_t tmpbuf[UNROLL]; michael@0: int td[UNROLL]; michael@0: int tdv[UNROLL]; michael@0: int ta[UNROLL]; michael@0: int tap[UNROLL]; michael@0: uint16_t in_dst[UNROLL]; michael@0: int offset = 0; michael@0: int noisy = 0; michael@0: #endif michael@0: michael@0: uint8x8_t dbase; michael@0: const uint8_t *dstart = &gDitherMatrix_Neon[(y&3)*12 + (x&3)]; michael@0: dbase = vld1_u8(dstart); michael@0: michael@0: do { michael@0: uint8x8_t sr, sg, sb, sa, d; michael@0: uint16x8_t dst8, scale8, alpha8; michael@0: uint16x8_t dst_r, dst_g, dst_b; michael@0: michael@0: #if defined(DEBUG_OPAQUE_DITHER) michael@0: // calculate 8 elements worth into a temp buffer michael@0: { michael@0: int my_y = y; michael@0: int my_x = x; michael@0: SkPMColor* my_src = (SkPMColor*)src; michael@0: uint16_t* my_dst = dst; michael@0: int i; michael@0: michael@0: DITHER_565_SCAN(my_y); michael@0: for(i = 0; i < UNROLL; i++) { michael@0: SkPMColor c = *my_src++; michael@0: SkPMColorAssert(c); michael@0: if (c) { michael@0: unsigned a = SkGetPackedA32(c); michael@0: michael@0: int d = SkAlphaMul(DITHER_VALUE(my_x), SkAlpha255To256(a)); michael@0: tdv[i] = DITHER_VALUE(my_x); michael@0: ta[i] = a; michael@0: tap[i] = SkAlpha255To256(a); michael@0: td[i] = d; michael@0: michael@0: unsigned sr = SkGetPackedR32(c); michael@0: unsigned sg = SkGetPackedG32(c); michael@0: unsigned sb = SkGetPackedB32(c); michael@0: sr = SkDITHER_R32_FOR_565(sr, d); michael@0: sg = SkDITHER_G32_FOR_565(sg, d); michael@0: sb = SkDITHER_B32_FOR_565(sb, d); michael@0: michael@0: uint32_t src_expanded = (sg << 24) | (sr << 13) | (sb << 2); michael@0: uint32_t dst_expanded = SkExpand_rgb_16(*my_dst); michael@0: dst_expanded = dst_expanded * (SkAlpha255To256(255 - a) >> 3); michael@0: // now src and dst expanded are in g:11 r:10 x:1 b:10 michael@0: tmpbuf[i] = SkCompact_rgb_16((src_expanded + dst_expanded) >> 5); michael@0: td[i] = d; michael@0: } else { michael@0: tmpbuf[i] = *my_dst; michael@0: ta[i] = tdv[i] = td[i] = 0xbeef; michael@0: } michael@0: in_dst[i] = *my_dst; michael@0: my_dst += 1; michael@0: DITHER_INC_X(my_x); michael@0: } michael@0: } michael@0: #endif michael@0: michael@0: michael@0: { michael@0: register uint8x8_t d0 asm("d0"); michael@0: register uint8x8_t d1 asm("d1"); michael@0: register uint8x8_t d2 asm("d2"); michael@0: register uint8x8_t d3 asm("d3"); michael@0: michael@0: asm ("vld4.8 {d0-d3},[%[src]]! /* r=%P0 g=%P1 b=%P2 a=%P3 */" michael@0: : "=w" (d0), "=w" (d1), "=w" (d2), "=w" (d3), [src] "+r" (src) michael@0: : michael@0: ); michael@0: #if SK_PMCOLOR_BYTE_ORDER(B,G,R,A) michael@0: sr = d2; sg = d1; sb = d0; sa = d3; michael@0: #elif SK_PMCOLOR_BYTE_ORDER(R,G,B,A) michael@0: sr = d0; sg = d1; sb = d2; sa = d3; michael@0: #endif michael@0: } michael@0: michael@0: /* calculate 'd', which will be 0..7 michael@0: * dbase[] is 0..7; alpha is 0..256; 16 bits suffice michael@0: */ michael@0: alpha8 = vmovl_u8(dbase); michael@0: alpha8 = vmlal_u8(alpha8, sa, dbase); michael@0: d = vshrn_n_u16(alpha8, 8); // narrowing too michael@0: michael@0: // sr = sr - (sr>>5) + d michael@0: /* watching for 8-bit overflow. d is 0..7; risky range of michael@0: * sr is >248; and then (sr>>5) is 7 so it offsets 'd'; michael@0: * safe as long as we do ((sr-sr>>5) + d) michael@0: */ michael@0: sr = vsub_u8(sr, vshr_n_u8(sr, 5)); michael@0: sr = vadd_u8(sr, d); michael@0: michael@0: // sb = sb - (sb>>5) + d michael@0: sb = vsub_u8(sb, vshr_n_u8(sb, 5)); michael@0: sb = vadd_u8(sb, d); michael@0: michael@0: // sg = sg - (sg>>6) + d>>1; similar logic for overflows michael@0: sg = vsub_u8(sg, vshr_n_u8(sg, 6)); michael@0: sg = vadd_u8(sg, vshr_n_u8(d,1)); michael@0: michael@0: // need to pick up 8 dst's -- at 16 bits each, 128 bits michael@0: dst8 = vld1q_u16(dst); michael@0: dst_b = vandq_u16(dst8, vdupq_n_u16(SK_B16_MASK)); michael@0: dst_g = vshrq_n_u16(vshlq_n_u16(dst8, SK_R16_BITS), SK_R16_BITS + SK_B16_BITS); michael@0: dst_r = vshrq_n_u16(dst8, SK_R16_SHIFT); // clearing hi bits michael@0: michael@0: // blend michael@0: scale8 = vsubw_u8(vdupq_n_u16(256), sa); michael@0: michael@0: // combine the addq and mul, save 3 insns michael@0: scale8 = vshrq_n_u16(scale8, 3); michael@0: dst_b = vmlaq_u16(vshll_n_u8(sb,2), dst_b, scale8); michael@0: dst_g = vmlaq_u16(vshll_n_u8(sg,3), dst_g, scale8); michael@0: dst_r = vmlaq_u16(vshll_n_u8(sr,2), dst_r, scale8); michael@0: michael@0: // repack to store michael@0: dst8 = vshrq_n_u16(dst_b, 5); michael@0: dst8 = vsliq_n_u16(dst8, vshrq_n_u16(dst_g, 5), 5); michael@0: dst8 = vsliq_n_u16(dst8, vshrq_n_u16(dst_r,5), 11); michael@0: michael@0: vst1q_u16(dst, dst8); michael@0: michael@0: #if defined(DEBUG_OPAQUE_DITHER) michael@0: // verify my 8 elements match the temp buffer michael@0: { michael@0: int i, bad=0; michael@0: static int invocation; michael@0: michael@0: for (i = 0; i < UNROLL; i++) { michael@0: if (tmpbuf[i] != dst[i]) { michael@0: bad=1; michael@0: } michael@0: } michael@0: if (bad) { michael@0: SkDebugf("BAD S32A_D565_Opaque_Dither_neon(); invocation %d offset %d\n", michael@0: invocation, offset); michael@0: SkDebugf(" alpha 0x%x\n", alpha); michael@0: for (i = 0; i < UNROLL; i++) michael@0: SkDebugf("%2d: %s %04x w %04x id %04x s %08x d %04x %04x %04x %04x\n", michael@0: i, ((tmpbuf[i] != dst[i])?"BAD":"got"), dst[i], tmpbuf[i], michael@0: in_dst[i], src[i-8], td[i], tdv[i], tap[i], ta[i]); michael@0: michael@0: showme16("alpha8", &alpha8, sizeof(alpha8)); michael@0: showme16("scale8", &scale8, sizeof(scale8)); michael@0: showme8("d", &d, sizeof(d)); michael@0: showme16("dst8", &dst8, sizeof(dst8)); michael@0: showme16("dst_b", &dst_b, sizeof(dst_b)); michael@0: showme16("dst_g", &dst_g, sizeof(dst_g)); michael@0: showme16("dst_r", &dst_r, sizeof(dst_r)); michael@0: showme8("sb", &sb, sizeof(sb)); michael@0: showme8("sg", &sg, sizeof(sg)); michael@0: showme8("sr", &sr, sizeof(sr)); michael@0: michael@0: return; michael@0: } michael@0: offset += UNROLL; michael@0: invocation++; michael@0: } michael@0: #endif michael@0: dst += UNROLL; michael@0: count -= UNROLL; michael@0: // skip x += UNROLL, since it's unchanged mod-4 michael@0: } while (count >= UNROLL); michael@0: } michael@0: #undef UNROLL michael@0: michael@0: // residuals michael@0: if (count > 0) { michael@0: DITHER_565_SCAN(y); michael@0: do { michael@0: SkPMColor c = *src++; michael@0: SkPMColorAssert(c); michael@0: if (c) { michael@0: unsigned a = SkGetPackedA32(c); michael@0: michael@0: // dither and alpha are just temporary variables to work-around michael@0: // an ICE in debug. michael@0: unsigned dither = DITHER_VALUE(x); michael@0: unsigned alpha = SkAlpha255To256(a); michael@0: int d = SkAlphaMul(dither, alpha); michael@0: michael@0: unsigned sr = SkGetPackedR32(c); michael@0: unsigned sg = SkGetPackedG32(c); michael@0: unsigned sb = SkGetPackedB32(c); michael@0: sr = SkDITHER_R32_FOR_565(sr, d); michael@0: sg = SkDITHER_G32_FOR_565(sg, d); michael@0: sb = SkDITHER_B32_FOR_565(sb, d); michael@0: michael@0: uint32_t src_expanded = (sg << 24) | (sr << 13) | (sb << 2); michael@0: uint32_t dst_expanded = SkExpand_rgb_16(*dst); michael@0: dst_expanded = dst_expanded * (SkAlpha255To256(255 - a) >> 3); michael@0: // now src and dst expanded are in g:11 r:10 x:1 b:10 michael@0: *dst = SkCompact_rgb_16((src_expanded + dst_expanded) >> 5); michael@0: } michael@0: dst += 1; michael@0: DITHER_INC_X(x); michael@0: } while (--count != 0); michael@0: } michael@0: } michael@0: michael@0: /////////////////////////////////////////////////////////////////////////////// michael@0: michael@0: #undef DEBUG_S32_OPAQUE_DITHER michael@0: michael@0: void S32_D565_Opaque_Dither_neon(uint16_t* SK_RESTRICT dst, michael@0: const SkPMColor* SK_RESTRICT src, michael@0: int count, U8CPU alpha, int x, int y) { michael@0: SkASSERT(255 == alpha); michael@0: michael@0: #define UNROLL 8 michael@0: if (count >= UNROLL) { michael@0: uint8x8_t d; michael@0: const uint8_t *dstart = &gDitherMatrix_Neon[(y&3)*12 + (x&3)]; michael@0: d = vld1_u8(dstart); michael@0: michael@0: while (count >= UNROLL) { michael@0: uint8x8_t sr, sg, sb; michael@0: uint16x8_t dr, dg, db; michael@0: uint16x8_t dst8; michael@0: michael@0: { michael@0: register uint8x8_t d0 asm("d0"); michael@0: register uint8x8_t d1 asm("d1"); michael@0: register uint8x8_t d2 asm("d2"); michael@0: register uint8x8_t d3 asm("d3"); michael@0: michael@0: asm ( michael@0: "vld4.8 {d0-d3},[%[src]]! /* r=%P0 g=%P1 b=%P2 a=%P3 */" michael@0: : "=w" (d0), "=w" (d1), "=w" (d2), "=w" (d3), [src] "+&r" (src) michael@0: : michael@0: ); michael@0: sg = d1; michael@0: #if SK_PMCOLOR_BYTE_ORDER(B,G,R,A) michael@0: sr = d2; sb = d0; michael@0: #elif SK_PMCOLOR_BYTE_ORDER(R,G,B,A) michael@0: sr = d0; sb = d2; michael@0: #endif michael@0: } michael@0: /* XXX: if we want to prefetch, hide it in the above asm() michael@0: * using the gcc __builtin_prefetch(), the prefetch will michael@0: * fall to the bottom of the loop -- it won't stick up michael@0: * at the top of the loop, just after the vld4. michael@0: */ michael@0: michael@0: // sr = sr - (sr>>5) + d michael@0: sr = vsub_u8(sr, vshr_n_u8(sr, 5)); michael@0: dr = vaddl_u8(sr, d); michael@0: michael@0: // sb = sb - (sb>>5) + d michael@0: sb = vsub_u8(sb, vshr_n_u8(sb, 5)); michael@0: db = vaddl_u8(sb, d); michael@0: michael@0: // sg = sg - (sg>>6) + d>>1; similar logic for overflows michael@0: sg = vsub_u8(sg, vshr_n_u8(sg, 6)); michael@0: dg = vaddl_u8(sg, vshr_n_u8(d, 1)); michael@0: michael@0: // pack high bits of each into 565 format (rgb, b is lsb) michael@0: dst8 = vshrq_n_u16(db, 3); michael@0: dst8 = vsliq_n_u16(dst8, vshrq_n_u16(dg, 2), 5); michael@0: dst8 = vsliq_n_u16(dst8, vshrq_n_u16(dr, 3), 11); michael@0: michael@0: // store it michael@0: vst1q_u16(dst, dst8); michael@0: michael@0: #if defined(DEBUG_S32_OPAQUE_DITHER) michael@0: // always good to know if we generated good results michael@0: { michael@0: int i, myx = x, myy = y; michael@0: DITHER_565_SCAN(myy); michael@0: for (i=0;i 0) { michael@0: DITHER_565_SCAN(y); michael@0: do { michael@0: SkPMColor c = *src++; michael@0: SkPMColorAssert(c); michael@0: SkASSERT(SkGetPackedA32(c) == 255); michael@0: michael@0: unsigned dither = DITHER_VALUE(x); michael@0: *dst++ = SkDitherRGB32To565(c, dither); michael@0: DITHER_INC_X(x); michael@0: } while (--count != 0); michael@0: } michael@0: } michael@0: michael@0: void Color32_arm_neon(SkPMColor* dst, const SkPMColor* src, int count, michael@0: SkPMColor color) { michael@0: if (count <= 0) { michael@0: return; michael@0: } michael@0: michael@0: if (0 == color) { michael@0: if (src != dst) { michael@0: memcpy(dst, src, count * sizeof(SkPMColor)); michael@0: } michael@0: return; michael@0: } michael@0: michael@0: unsigned colorA = SkGetPackedA32(color); michael@0: if (255 == colorA) { michael@0: sk_memset32(dst, color, count); michael@0: } else { michael@0: unsigned scale = 256 - SkAlpha255To256(colorA); michael@0: michael@0: if (count >= 8) { michael@0: // at the end of this assembly, count will have been decremented michael@0: // to a negative value. That is, if count mod 8 = x, it will be michael@0: // -8 +x coming out. michael@0: asm volatile ( michael@0: PLD128(src, 0) michael@0: michael@0: "vdup.32 q0, %[color] \n\t" michael@0: michael@0: PLD128(src, 128) michael@0: michael@0: // scale numerical interval [0-255], so load as 8 bits michael@0: "vdup.8 d2, %[scale] \n\t" michael@0: michael@0: PLD128(src, 256) michael@0: michael@0: "subs %[count], %[count], #8 \n\t" michael@0: michael@0: PLD128(src, 384) michael@0: michael@0: "Loop_Color32: \n\t" michael@0: michael@0: // load src color, 8 pixels, 4 64 bit registers michael@0: // (and increment src). michael@0: "vld1.32 {d4-d7}, [%[src]]! \n\t" michael@0: michael@0: PLD128(src, 384) michael@0: michael@0: // multiply long by scale, 64 bits at a time, michael@0: // destination into a 128 bit register. michael@0: "vmull.u8 q4, d4, d2 \n\t" michael@0: "vmull.u8 q5, d5, d2 \n\t" michael@0: "vmull.u8 q6, d6, d2 \n\t" michael@0: "vmull.u8 q7, d7, d2 \n\t" michael@0: michael@0: // shift the 128 bit registers, containing the 16 michael@0: // bit scaled values back to 8 bits, narrowing the michael@0: // results to 64 bit registers. michael@0: "vshrn.i16 d8, q4, #8 \n\t" michael@0: "vshrn.i16 d9, q5, #8 \n\t" michael@0: "vshrn.i16 d10, q6, #8 \n\t" michael@0: "vshrn.i16 d11, q7, #8 \n\t" michael@0: michael@0: // adding back the color, using 128 bit registers. michael@0: "vadd.i8 q6, q4, q0 \n\t" michael@0: "vadd.i8 q7, q5, q0 \n\t" michael@0: michael@0: // store back the 8 calculated pixels (2 128 bit michael@0: // registers), and increment dst. michael@0: "vst1.32 {d12-d15}, [%[dst]]! \n\t" michael@0: michael@0: "subs %[count], %[count], #8 \n\t" michael@0: "bge Loop_Color32 \n\t" michael@0: : [src] "+r" (src), [dst] "+r" (dst), [count] "+r" (count) michael@0: : [color] "r" (color), [scale] "r" (scale) michael@0: : "cc", "memory", michael@0: "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", michael@0: "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15" michael@0: ); michael@0: // At this point, if we went through the inline assembly, count is michael@0: // a negative value: michael@0: // if the value is -8, there is no pixel left to process. michael@0: // if the value is -7, there is one pixel left to process michael@0: // ... michael@0: // And'ing it with 7 will give us the number of pixels michael@0: // left to process. michael@0: count = count & 0x7; michael@0: } michael@0: michael@0: while (count > 0) { michael@0: *dst = color + SkAlphaMulQ(*src, scale); michael@0: src += 1; michael@0: dst += 1; michael@0: count--; michael@0: } michael@0: } michael@0: } michael@0: michael@0: /////////////////////////////////////////////////////////////////////////////// michael@0: michael@0: const SkBlitRow::Proc sk_blitrow_platform_565_procs_arm_neon[] = { michael@0: // no dither michael@0: // NOTE: For the S32_D565_Blend function below, we don't have a special michael@0: // version that assumes that each source pixel is opaque. But our michael@0: // S32A is still faster than the default, so use it. michael@0: S32_D565_Opaque_neon, michael@0: S32A_D565_Blend_neon, // really S32_D565_Blend michael@0: S32A_D565_Opaque_neon, michael@0: S32A_D565_Blend_neon, michael@0: michael@0: // dither michael@0: S32_D565_Opaque_Dither_neon, michael@0: S32_D565_Blend_Dither_neon, michael@0: S32A_D565_Opaque_Dither_neon, michael@0: NULL, // S32A_D565_Blend_Dither michael@0: }; michael@0: michael@0: const SkBlitRow::Proc32 sk_blitrow_platform_32_procs_arm_neon[] = { michael@0: NULL, // S32_Opaque, michael@0: S32_Blend_BlitRow32_neon, // S32_Blend, michael@0: /* michael@0: * We have two choices for S32A_Opaque procs. The one reads the src alpha michael@0: * value and attempts to optimize accordingly. The optimization is michael@0: * sensitive to the source content and is not a win in all cases. For michael@0: * example, if there are a lot of transitions between the alpha states, michael@0: * the performance will almost certainly be worse. However, for many michael@0: * common cases the performance is equivalent or better than the standard michael@0: * case where we do not inspect the src alpha. michael@0: */ michael@0: #if SK_A32_SHIFT == 24 michael@0: // This proc assumes the alpha value occupies bits 24-32 of each SkPMColor michael@0: S32A_Opaque_BlitRow32_neon_src_alpha, // S32A_Opaque, michael@0: #else michael@0: S32A_Opaque_BlitRow32_neon, // S32A_Opaque, michael@0: #endif michael@0: S32A_Blend_BlitRow32_neon // S32A_Blend michael@0: };