michael@0: /* michael@0: * Copyright 2012 The Android Open Source Project michael@0: * michael@0: * Use of this source code is governed by a BSD-style license that can be michael@0: * found in the LICENSE file. michael@0: */ michael@0: michael@0: #include "SkBlitRow.h" michael@0: #include "SkColorPriv.h" michael@0: #include "SkDither.h" michael@0: #include "SkMathPriv.h" michael@0: #include "SkUtils.h" michael@0: #include "SkUtilsArm.h" michael@0: michael@0: #include "SkCachePreload_arm.h" michael@0: michael@0: // Define USE_NEON_CODE to indicate that we need to build NEON routines michael@0: #define USE_NEON_CODE (!SK_ARM_NEON_IS_NONE) michael@0: michael@0: // Define USE_ARM_CODE to indicate that we need to build ARM routines michael@0: #define USE_ARM_CODE (!SK_ARM_NEON_IS_ALWAYS) michael@0: michael@0: #if USE_NEON_CODE michael@0: #include "SkBlitRow_opts_arm_neon.h" michael@0: #endif michael@0: michael@0: #if USE_ARM_CODE michael@0: michael@0: static void S32A_D565_Opaque(uint16_t* SK_RESTRICT dst, michael@0: const SkPMColor* SK_RESTRICT src, int count, michael@0: U8CPU alpha, int /*x*/, int /*y*/) { michael@0: SkASSERT(255 == alpha); michael@0: michael@0: asm volatile ( michael@0: "1: \n\t" michael@0: "ldr r3, [%[src]], #4 \n\t" michael@0: "cmp r3, #0xff000000 \n\t" michael@0: "blo 2f \n\t" michael@0: "and r4, r3, #0x0000f8 \n\t" michael@0: "and r5, r3, #0x00fc00 \n\t" michael@0: "and r6, r3, #0xf80000 \n\t" michael@0: #ifdef SK_ARM_HAS_EDSP michael@0: "pld [r1, #32] \n\t" michael@0: #endif michael@0: "lsl r3, r4, #8 \n\t" michael@0: "orr r3, r3, r5, lsr #5 \n\t" michael@0: "orr r3, r3, r6, lsr #19 \n\t" michael@0: "subs %[count], %[count], #1 \n\t" michael@0: "strh r3, [%[dst]], #2 \n\t" michael@0: "bne 1b \n\t" michael@0: "b 4f \n\t" michael@0: "2: \n\t" michael@0: "lsrs r7, r3, #24 \n\t" michael@0: "beq 3f \n\t" michael@0: "ldrh r4, [%[dst]] \n\t" michael@0: "rsb r7, r7, #255 \n\t" michael@0: "and r6, r4, #0x001f \n\t" michael@0: #if SK_ARM_ARCH <= 6 michael@0: "lsl r5, r4, #21 \n\t" michael@0: "lsr r5, r5, #26 \n\t" michael@0: #else michael@0: "ubfx r5, r4, #5, #6 \n\t" michael@0: #endif michael@0: #ifdef SK_ARM_HAS_EDSP michael@0: "pld [r0, #16] \n\t" michael@0: #endif michael@0: "lsr r4, r4, #11 \n\t" michael@0: #ifdef SK_ARM_HAS_EDSP michael@0: "smulbb r6, r6, r7 \n\t" michael@0: "smulbb r5, r5, r7 \n\t" michael@0: "smulbb r4, r4, r7 \n\t" michael@0: #else michael@0: "mul r6, r6, r7 \n\t" michael@0: "mul r5, r5, r7 \n\t" michael@0: "mul r4, r4, r7 \n\t" michael@0: #endif michael@0: #if SK_ARM_ARCH >= 6 michael@0: "uxtb r7, r3, ROR #16 \n\t" michael@0: "uxtb ip, r3, ROR #8 \n\t" michael@0: #else michael@0: "mov ip, #0xff \n\t" michael@0: "and r7, ip, r3, ROR #16 \n\t" michael@0: "and ip, ip, r3, ROR #8 \n\t" michael@0: #endif michael@0: "and r3, r3, #0xff \n\t" michael@0: "add r6, r6, #16 \n\t" michael@0: "add r5, r5, #32 \n\t" michael@0: "add r4, r4, #16 \n\t" michael@0: "add r6, r6, r6, lsr #5 \n\t" michael@0: "add r5, r5, r5, lsr #6 \n\t" michael@0: "add r4, r4, r4, lsr #5 \n\t" michael@0: "add r6, r7, r6, lsr #5 \n\t" michael@0: "add r5, ip, r5, lsr #6 \n\t" michael@0: "add r4, r3, r4, lsr #5 \n\t" michael@0: "lsr r6, r6, #3 \n\t" michael@0: "and r5, r5, #0xfc \n\t" michael@0: "and r4, r4, #0xf8 \n\t" michael@0: "orr r6, r6, r5, lsl #3 \n\t" michael@0: "orr r4, r6, r4, lsl #8 \n\t" michael@0: "strh r4, [%[dst]], #2 \n\t" michael@0: #ifdef SK_ARM_HAS_EDSP michael@0: "pld [r1, #32] \n\t" michael@0: #endif michael@0: "subs %[count], %[count], #1 \n\t" michael@0: "bne 1b \n\t" michael@0: "b 4f \n\t" michael@0: "3: \n\t" michael@0: "subs %[count], %[count], #1 \n\t" michael@0: "add %[dst], %[dst], #2 \n\t" michael@0: "bne 1b \n\t" michael@0: "4: \n\t" michael@0: : [dst] "+r" (dst), [src] "+r" (src), [count] "+r" (count) michael@0: : michael@0: : "memory", "cc", "r3", "r4", "r5", "r6", "r7", "ip" michael@0: ); michael@0: } michael@0: michael@0: static void S32A_Opaque_BlitRow32_arm(SkPMColor* SK_RESTRICT dst, michael@0: const SkPMColor* SK_RESTRICT src, michael@0: int count, U8CPU alpha) { michael@0: michael@0: SkASSERT(255 == alpha); michael@0: michael@0: asm volatile ( michael@0: "cmp %[count], #0 \n\t" /* comparing count with 0 */ michael@0: "beq 3f \n\t" /* if zero exit */ michael@0: michael@0: "mov ip, #0xff \n\t" /* load the 0xff mask in ip */ michael@0: "orr ip, ip, ip, lsl #16 \n\t" /* convert it to 0xff00ff in ip */ michael@0: michael@0: "cmp %[count], #2 \n\t" /* compare count with 2 */ michael@0: "blt 2f \n\t" /* if less than 2 -> single loop */ michael@0: michael@0: /* Double Loop */ michael@0: "1: \n\t" /* */ michael@0: "ldm %[src]!, {r5,r6} \n\t" /* load the src(s) at r5-r6 */ michael@0: "ldm %[dst], {r7,r8} \n\t" /* loading dst(s) into r7-r8 */ michael@0: "lsr r4, r5, #24 \n\t" /* extracting the alpha from source and storing it to r4 */ michael@0: michael@0: /* ----------- */ michael@0: "and r9, ip, r7 \n\t" /* r9 = br masked by ip */ michael@0: "rsb r4, r4, #256 \n\t" /* subtracting the alpha from 256 -> r4=scale */ michael@0: "and r10, ip, r7, lsr #8 \n\t" /* r10 = ag masked by ip */ michael@0: michael@0: "mul r9, r9, r4 \n\t" /* br = br * scale */ michael@0: "mul r10, r10, r4 \n\t" /* ag = ag * scale */ michael@0: "and r9, ip, r9, lsr #8 \n\t" /* lsr br by 8 and mask it */ michael@0: michael@0: "and r10, r10, ip, lsl #8 \n\t" /* mask ag with reverse mask */ michael@0: "lsr r4, r6, #24 \n\t" /* extracting the alpha from source and storing it to r4 */ michael@0: "orr r7, r9, r10 \n\t" /* br | ag*/ michael@0: michael@0: "add r7, r5, r7 \n\t" /* dst = src + calc dest(r7) */ michael@0: "rsb r4, r4, #256 \n\t" /* subtracting the alpha from 255 -> r4=scale */ michael@0: michael@0: /* ----------- */ michael@0: "and r9, ip, r8 \n\t" /* r9 = br masked by ip */ michael@0: michael@0: "and r10, ip, r8, lsr #8 \n\t" /* r10 = ag masked by ip */ michael@0: "mul r9, r9, r4 \n\t" /* br = br * scale */ michael@0: "sub %[count], %[count], #2 \n\t" michael@0: "mul r10, r10, r4 \n\t" /* ag = ag * scale */ michael@0: michael@0: "and r9, ip, r9, lsr #8 \n\t" /* lsr br by 8 and mask it */ michael@0: "and r10, r10, ip, lsl #8 \n\t" /* mask ag with reverse mask */ michael@0: "cmp %[count], #1 \n\t" /* comparing count with 1 */ michael@0: "orr r8, r9, r10 \n\t" /* br | ag */ michael@0: michael@0: "add r8, r6, r8 \n\t" /* dst = src + calc dest(r8) */ michael@0: michael@0: /* ----------------- */ michael@0: "stm %[dst]!, {r7,r8} \n\t" /* *dst = r7, increment dst by two (each times 4) */ michael@0: /* ----------------- */ michael@0: michael@0: "bgt 1b \n\t" /* if greater than 1 -> reloop */ michael@0: "blt 3f \n\t" /* if less than 1 -> exit */ michael@0: michael@0: /* Single Loop */ michael@0: "2: \n\t" /* */ michael@0: "ldr r5, [%[src]], #4 \n\t" /* load the src pointer into r5 r5=src */ michael@0: "ldr r7, [%[dst]] \n\t" /* loading dst into r7 */ michael@0: "lsr r4, r5, #24 \n\t" /* extracting the alpha from source and storing it to r4 */ michael@0: michael@0: /* ----------- */ michael@0: "and r9, ip, r7 \n\t" /* r9 = br masked by ip */ michael@0: "rsb r4, r4, #256 \n\t" /* subtracting the alpha from 256 -> r4=scale */ michael@0: michael@0: "and r10, ip, r7, lsr #8 \n\t" /* r10 = ag masked by ip */ michael@0: "mul r9, r9, r4 \n\t" /* br = br * scale */ michael@0: "mul r10, r10, r4 \n\t" /* ag = ag * scale */ michael@0: "and r9, ip, r9, lsr #8 \n\t" /* lsr br by 8 and mask it */ michael@0: michael@0: "and r10, r10, ip, lsl #8 \n\t" /* mask ag */ michael@0: "orr r7, r9, r10 \n\t" /* br | ag */ michael@0: michael@0: "add r7, r5, r7 \n\t" /* *dst = src + calc dest(r7) */ michael@0: michael@0: /* ----------------- */ michael@0: "str r7, [%[dst]], #4 \n\t" /* *dst = r7, increment dst by one (times 4) */ michael@0: /* ----------------- */ michael@0: michael@0: "3: \n\t" /* */ michael@0: : [dst] "+r" (dst), [src] "+r" (src), [count] "+r" (count) michael@0: : michael@0: : "cc", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "ip", "memory" michael@0: ); michael@0: } michael@0: michael@0: /* michael@0: * ARM asm version of S32A_Blend_BlitRow32 michael@0: */ michael@0: void S32A_Blend_BlitRow32_arm(SkPMColor* SK_RESTRICT dst, michael@0: const SkPMColor* SK_RESTRICT src, michael@0: int count, U8CPU alpha) { michael@0: asm volatile ( michael@0: "cmp %[count], #0 \n\t" /* comparing count with 0 */ michael@0: "beq 3f \n\t" /* if zero exit */ michael@0: michael@0: "mov r12, #0xff \n\t" /* load the 0xff mask in r12 */ michael@0: "orr r12, r12, r12, lsl #16 \n\t" /* convert it to 0xff00ff in r12 */ michael@0: michael@0: /* src1,2_scale */ michael@0: "add %[alpha], %[alpha], #1 \n\t" /* loading %[alpha]=src_scale=alpha+1 */ michael@0: michael@0: "cmp %[count], #2 \n\t" /* comparing count with 2 */ michael@0: "blt 2f \n\t" /* if less than 2 -> single loop */ michael@0: michael@0: /* Double Loop */ michael@0: "1: \n\t" /* */ michael@0: "ldm %[src]!, {r5, r6} \n\t" /* loading src pointers into r5 and r6 */ michael@0: "ldm %[dst], {r7, r8} \n\t" /* loading dst pointers into r7 and r8 */ michael@0: michael@0: /* dst1_scale and dst2_scale*/ michael@0: "lsr r9, r5, #24 \n\t" /* src >> 24 */ michael@0: "lsr r10, r6, #24 \n\t" /* src >> 24 */ michael@0: #ifdef SK_ARM_HAS_EDSP michael@0: "smulbb r9, r9, %[alpha] \n\t" /* r9 = SkMulS16 r9 with src_scale */ michael@0: "smulbb r10, r10, %[alpha] \n\t" /* r10 = SkMulS16 r10 with src_scale */ michael@0: #else michael@0: "mul r9, r9, %[alpha] \n\t" /* r9 = SkMulS16 r9 with src_scale */ michael@0: "mul r10, r10, %[alpha] \n\t" /* r10 = SkMulS16 r10 with src_scale */ michael@0: #endif michael@0: "lsr r9, r9, #8 \n\t" /* r9 >> 8 */ michael@0: "lsr r10, r10, #8 \n\t" /* r10 >> 8 */ michael@0: "rsb r9, r9, #256 \n\t" /* dst1_scale = r9 = 255 - r9 + 1 */ michael@0: "rsb r10, r10, #256 \n\t" /* dst2_scale = r10 = 255 - r10 + 1 */ michael@0: michael@0: /* ---------------------- */ michael@0: michael@0: /* src1, src1_scale */ michael@0: "and r11, r12, r5, lsr #8 \n\t" /* ag = r11 = r5 masked by r12 lsr by #8 */ michael@0: "and r4, r12, r5 \n\t" /* rb = r4 = r5 masked by r12 */ michael@0: "mul r11, r11, %[alpha] \n\t" /* ag = r11 times src_scale */ michael@0: "mul r4, r4, %[alpha] \n\t" /* rb = r4 times src_scale */ michael@0: "and r11, r11, r12, lsl #8 \n\t" /* ag masked by reverse mask (r12) */ michael@0: "and r4, r12, r4, lsr #8 \n\t" /* rb masked by mask (r12) */ michael@0: "orr r5, r11, r4 \n\t" /* r5 = (src1, src_scale) */ michael@0: michael@0: /* dst1, dst1_scale */ michael@0: "and r11, r12, r7, lsr #8 \n\t" /* ag = r11 = r7 masked by r12 lsr by #8 */ michael@0: "and r4, r12, r7 \n\t" /* rb = r4 = r7 masked by r12 */ michael@0: "mul r11, r11, r9 \n\t" /* ag = r11 times dst_scale (r9) */ michael@0: "mul r4, r4, r9 \n\t" /* rb = r4 times dst_scale (r9) */ michael@0: "and r11, r11, r12, lsl #8 \n\t" /* ag masked by reverse mask (r12) */ michael@0: "and r4, r12, r4, lsr #8 \n\t" /* rb masked by mask (r12) */ michael@0: "orr r9, r11, r4 \n\t" /* r9 = (dst1, dst_scale) */ michael@0: michael@0: /* ---------------------- */ michael@0: "add r9, r5, r9 \n\t" /* *dst = src plus dst both scaled */ michael@0: /* ---------------------- */ michael@0: michael@0: /* ====================== */ michael@0: michael@0: /* src2, src2_scale */ michael@0: "and r11, r12, r6, lsr #8 \n\t" /* ag = r11 = r6 masked by r12 lsr by #8 */ michael@0: "and r4, r12, r6 \n\t" /* rb = r4 = r6 masked by r12 */ michael@0: "mul r11, r11, %[alpha] \n\t" /* ag = r11 times src_scale */ michael@0: "mul r4, r4, %[alpha] \n\t" /* rb = r4 times src_scale */ michael@0: "and r11, r11, r12, lsl #8 \n\t" /* ag masked by reverse mask (r12) */ michael@0: "and r4, r12, r4, lsr #8 \n\t" /* rb masked by mask (r12) */ michael@0: "orr r6, r11, r4 \n\t" /* r6 = (src2, src_scale) */ michael@0: michael@0: /* dst2, dst2_scale */ michael@0: "and r11, r12, r8, lsr #8 \n\t" /* ag = r11 = r8 masked by r12 lsr by #8 */ michael@0: "and r4, r12, r8 \n\t" /* rb = r4 = r8 masked by r12 */ michael@0: "mul r11, r11, r10 \n\t" /* ag = r11 times dst_scale (r10) */ michael@0: "mul r4, r4, r10 \n\t" /* rb = r4 times dst_scale (r6) */ michael@0: "and r11, r11, r12, lsl #8 \n\t" /* ag masked by reverse mask (r12) */ michael@0: "and r4, r12, r4, lsr #8 \n\t" /* rb masked by mask (r12) */ michael@0: "orr r10, r11, r4 \n\t" /* r10 = (dst2, dst_scale) */ michael@0: michael@0: "sub %[count], %[count], #2 \n\t" /* decrease count by 2 */ michael@0: /* ---------------------- */ michael@0: "add r10, r6, r10 \n\t" /* *dst = src plus dst both scaled */ michael@0: /* ---------------------- */ michael@0: "cmp %[count], #1 \n\t" /* compare count with 1 */ michael@0: /* ----------------- */ michael@0: "stm %[dst]!, {r9, r10} \n\t" /* copy r9 and r10 to r7 and r8 respectively */ michael@0: /* ----------------- */ michael@0: michael@0: "bgt 1b \n\t" /* if %[count] greater than 1 reloop */ michael@0: "blt 3f \n\t" /* if %[count] less than 1 exit */ michael@0: /* else get into the single loop */ michael@0: /* Single Loop */ michael@0: "2: \n\t" /* */ michael@0: "ldr r5, [%[src]], #4 \n\t" /* loading src pointer into r5: r5=src */ michael@0: "ldr r7, [%[dst]] \n\t" /* loading dst pointer into r7: r7=dst */ michael@0: michael@0: "lsr r6, r5, #24 \n\t" /* src >> 24 */ michael@0: "and r8, r12, r5, lsr #8 \n\t" /* ag = r8 = r5 masked by r12 lsr by #8 */ michael@0: #ifdef SK_ARM_HAS_EDSP michael@0: "smulbb r6, r6, %[alpha] \n\t" /* r6 = SkMulS16 with src_scale */ michael@0: #else michael@0: "mul r6, r6, %[alpha] \n\t" /* r6 = SkMulS16 with src_scale */ michael@0: #endif michael@0: "and r9, r12, r5 \n\t" /* rb = r9 = r5 masked by r12 */ michael@0: "lsr r6, r6, #8 \n\t" /* r6 >> 8 */ michael@0: "mul r8, r8, %[alpha] \n\t" /* ag = r8 times scale */ michael@0: "rsb r6, r6, #256 \n\t" /* r6 = 255 - r6 + 1 */ michael@0: michael@0: /* src, src_scale */ michael@0: "mul r9, r9, %[alpha] \n\t" /* rb = r9 times scale */ michael@0: "and r8, r8, r12, lsl #8 \n\t" /* ag masked by reverse mask (r12) */ michael@0: "and r9, r12, r9, lsr #8 \n\t" /* rb masked by mask (r12) */ michael@0: "orr r10, r8, r9 \n\t" /* r10 = (scr, src_scale) */ michael@0: michael@0: /* dst, dst_scale */ michael@0: "and r8, r12, r7, lsr #8 \n\t" /* ag = r8 = r7 masked by r12 lsr by #8 */ michael@0: "and r9, r12, r7 \n\t" /* rb = r9 = r7 masked by r12 */ michael@0: "mul r8, r8, r6 \n\t" /* ag = r8 times scale (r6) */ michael@0: "mul r9, r9, r6 \n\t" /* rb = r9 times scale (r6) */ michael@0: "and r8, r8, r12, lsl #8 \n\t" /* ag masked by reverse mask (r12) */ michael@0: "and r9, r12, r9, lsr #8 \n\t" /* rb masked by mask (r12) */ michael@0: "orr r7, r8, r9 \n\t" /* r7 = (dst, dst_scale) */ michael@0: michael@0: "add r10, r7, r10 \n\t" /* *dst = src plus dst both scaled */ michael@0: michael@0: /* ----------------- */ michael@0: "str r10, [%[dst]], #4 \n\t" /* *dst = r10, postincrement dst by one (times 4) */ michael@0: /* ----------------- */ michael@0: michael@0: "3: \n\t" /* */ michael@0: : [dst] "+r" (dst), [src] "+r" (src), [count] "+r" (count), [alpha] "+r" (alpha) michael@0: : michael@0: : "cc", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "memory" michael@0: ); michael@0: michael@0: } michael@0: michael@0: /////////////////////////////////////////////////////////////////////////////// michael@0: michael@0: static const SkBlitRow::Proc sk_blitrow_platform_565_procs_arm[] = { michael@0: // no dither michael@0: // NOTE: For the functions below, we don't have a special version michael@0: // that assumes that each source pixel is opaque. But our S32A is michael@0: // still faster than the default, so use it. michael@0: S32A_D565_Opaque, // S32_D565_Opaque michael@0: NULL, // S32_D565_Blend michael@0: S32A_D565_Opaque, // S32A_D565_Opaque michael@0: NULL, // S32A_D565_Blend michael@0: michael@0: // dither michael@0: NULL, // S32_D565_Opaque_Dither michael@0: NULL, // S32_D565_Blend_Dither michael@0: NULL, // S32A_D565_Opaque_Dither michael@0: NULL, // S32A_D565_Blend_Dither michael@0: }; michael@0: michael@0: static const SkBlitRow::Proc32 sk_blitrow_platform_32_procs_arm[] = { michael@0: NULL, // S32_Opaque, michael@0: NULL, // S32_Blend, michael@0: S32A_Opaque_BlitRow32_arm, // S32A_Opaque, michael@0: S32A_Blend_BlitRow32_arm // S32A_Blend michael@0: }; michael@0: michael@0: #endif // USE_ARM_CODE michael@0: michael@0: SkBlitRow::Proc SkBlitRow::PlatformProcs565(unsigned flags) { michael@0: return SK_ARM_NEON_WRAP(sk_blitrow_platform_565_procs_arm)[flags]; michael@0: } michael@0: michael@0: SkBlitRow::Proc32 SkBlitRow::PlatformProcs32(unsigned flags) { michael@0: return SK_ARM_NEON_WRAP(sk_blitrow_platform_32_procs_arm)[flags]; michael@0: } michael@0: michael@0: /////////////////////////////////////////////////////////////////////////////// michael@0: #define Color32_arm NULL michael@0: SkBlitRow::ColorProc SkBlitRow::PlatformColorProc() { michael@0: return SK_ARM_NEON_WRAP(Color32_arm); michael@0: } michael@0: michael@0: SkBlitRow::ColorRectProc PlatformColorRectProcFactory() { michael@0: return NULL; michael@0: }