michael@0: /* michael@0: * Copyright © 2009 Nokia Corporation michael@0: * michael@0: * Permission is hereby granted, free of charge, to any person obtaining a michael@0: * copy of this software and associated documentation files (the "Software"), michael@0: * to deal in the Software without restriction, including without limitation michael@0: * the rights to use, copy, modify, merge, publish, distribute, sublicense, michael@0: * and/or sell copies of the Software, and to permit persons to whom the michael@0: * Software is furnished to do so, subject to the following conditions: michael@0: * michael@0: * The above copyright notice and this permission notice (including the next michael@0: * paragraph) shall be included in all copies or substantial portions of the michael@0: * Software. michael@0: * michael@0: * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR michael@0: * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, michael@0: * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL michael@0: * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER michael@0: * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING michael@0: * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER michael@0: * DEALINGS IN THE SOFTWARE. michael@0: * michael@0: * Author: Siarhei Siamashka (siarhei.siamashka@nokia.com) michael@0: */ michael@0: michael@0: /* michael@0: * This file contains implementations of NEON optimized pixel processing michael@0: * functions. There is no full and detailed tutorial, but some functions michael@0: * (those which are exposing some new or interesting features) are michael@0: * extensively commented and can be used as examples. michael@0: * michael@0: * You may want to have a look at the comments for following functions: michael@0: * - pixman_composite_over_8888_0565_asm_neon michael@0: * - pixman_composite_over_n_8_0565_asm_neon michael@0: */ michael@0: michael@0: /* Prevent the stack from becoming executable for no reason... */ michael@0: #if defined(__linux__) && defined(__ELF__) michael@0: .section .note.GNU-stack,"",%progbits michael@0: #endif michael@0: michael@0: .text michael@0: .fpu neon michael@0: .arch armv7a michael@0: .object_arch armv4 michael@0: .eabi_attribute 10, 0 /* suppress Tag_FP_arch */ michael@0: .eabi_attribute 12, 0 /* suppress Tag_Advanced_SIMD_arch */ michael@0: .arm michael@0: .altmacro michael@0: .p2align 2 michael@0: michael@0: #include "pixman-private.h" michael@0: #include "pixman-arm-neon-asm.h" michael@0: michael@0: /* Global configuration options and preferences */ michael@0: michael@0: /* michael@0: * The code can optionally make use of unaligned memory accesses to improve michael@0: * performance of handling leading/trailing pixels for each scanline. michael@0: * Configuration variable RESPECT_STRICT_ALIGNMENT can be set to 0 for michael@0: * example in linux if unaligned memory accesses are not configured to michael@0: * generate.exceptions. michael@0: */ michael@0: .set RESPECT_STRICT_ALIGNMENT, 1 michael@0: michael@0: /* michael@0: * Set default prefetch type. There is a choice between the following options: michael@0: * michael@0: * PREFETCH_TYPE_NONE (may be useful for the ARM cores where PLD is set to work michael@0: * as NOP to workaround some HW bugs or for whatever other reason) michael@0: * michael@0: * PREFETCH_TYPE_SIMPLE (may be useful for simple single-issue ARM cores where michael@0: * advanced prefetch intruduces heavy overhead) michael@0: * michael@0: * PREFETCH_TYPE_ADVANCED (useful for superscalar cores such as ARM Cortex-A8 michael@0: * which can run ARM and NEON instructions simultaneously so that extra ARM michael@0: * instructions do not add (many) extra cycles, but improve prefetch efficiency) michael@0: * michael@0: * Note: some types of function can't support advanced prefetch and fallback michael@0: * to simple one (those which handle 24bpp pixels) michael@0: */ michael@0: .set PREFETCH_TYPE_DEFAULT, PREFETCH_TYPE_ADVANCED michael@0: michael@0: /* Prefetch distance in pixels for simple prefetch */ michael@0: .set PREFETCH_DISTANCE_SIMPLE, 64 michael@0: michael@0: /* michael@0: * Implementation of pixman_composite_over_8888_0565_asm_neon michael@0: * michael@0: * This function takes a8r8g8b8 source buffer, r5g6b5 destination buffer and michael@0: * performs OVER compositing operation. Function fast_composite_over_8888_0565 michael@0: * from pixman-fast-path.c does the same in C and can be used as a reference. michael@0: * michael@0: * First we need to have some NEON assembly code which can do the actual michael@0: * operation on the pixels and provide it to the template macro. michael@0: * michael@0: * Template macro quite conveniently takes care of emitting all the necessary michael@0: * code for memory reading and writing (including quite tricky cases of michael@0: * handling unaligned leading/trailing pixels), so we only need to deal with michael@0: * the data in NEON registers. michael@0: * michael@0: * NEON registers allocation in general is recommented to be the following: michael@0: * d0, d1, d2, d3 - contain loaded source pixel data michael@0: * d4, d5, d6, d7 - contain loaded destination pixels (if they are needed) michael@0: * d24, d25, d26, d27 - contain loading mask pixel data (if mask is used) michael@0: * d28, d29, d30, d31 - place for storing the result (destination pixels) michael@0: * michael@0: * As can be seen above, four 64-bit NEON registers are used for keeping michael@0: * intermediate pixel data and up to 8 pixels can be processed in one step michael@0: * for 32bpp formats (16 pixels for 16bpp, 32 pixels for 8bpp). michael@0: * michael@0: * This particular function uses the following registers allocation: michael@0: * d0, d1, d2, d3 - contain loaded source pixel data michael@0: * d4, d5 - contain loaded destination pixels (they are needed) michael@0: * d28, d29 - place for storing the result (destination pixels) michael@0: */ michael@0: michael@0: /* michael@0: * Step one. We need to have some code to do some arithmetics on pixel data. michael@0: * This is implemented as a pair of macros: '*_head' and '*_tail'. When used michael@0: * back-to-back, they take pixel data from {d0, d1, d2, d3} and {d4, d5}, michael@0: * perform all the needed calculations and write the result to {d28, d29}. michael@0: * The rationale for having two macros and not just one will be explained michael@0: * later. In practice, any single monolitic function which does the work can michael@0: * be split into two parts in any arbitrary way without affecting correctness. michael@0: * michael@0: * There is one special trick here too. Common template macro can optionally michael@0: * make our life a bit easier by doing R, G, B, A color components michael@0: * deinterleaving for 32bpp pixel formats (and this feature is used in michael@0: * 'pixman_composite_over_8888_0565_asm_neon' function). So it means that michael@0: * instead of having 8 packed pixels in {d0, d1, d2, d3} registers, we michael@0: * actually use d0 register for blue channel (a vector of eight 8-bit michael@0: * values), d1 register for green, d2 for red and d3 for alpha. This michael@0: * simple conversion can be also done with a few NEON instructions: michael@0: * michael@0: * Packed to planar conversion: michael@0: * vuzp.8 d0, d1 michael@0: * vuzp.8 d2, d3 michael@0: * vuzp.8 d1, d3 michael@0: * vuzp.8 d0, d2 michael@0: * michael@0: * Planar to packed conversion: michael@0: * vzip.8 d0, d2 michael@0: * vzip.8 d1, d3 michael@0: * vzip.8 d2, d3 michael@0: * vzip.8 d0, d1 michael@0: * michael@0: * But pixel can be loaded directly in planar format using VLD4.8 NEON michael@0: * instruction. It is 1 cycle slower than VLD1.32, so this is not always michael@0: * desirable, that's why deinterleaving is optional. michael@0: * michael@0: * But anyway, here is the code: michael@0: */ michael@0: .macro pixman_composite_over_8888_0565_process_pixblock_head michael@0: /* convert 8 r5g6b5 pixel data from {d4, d5} to planar 8-bit format michael@0: and put data into d6 - red, d7 - green, d30 - blue */ michael@0: vshrn.u16 d6, q2, #8 michael@0: vshrn.u16 d7, q2, #3 michael@0: vsli.u16 q2, q2, #5 michael@0: vsri.u8 d6, d6, #5 michael@0: vmvn.8 d3, d3 /* invert source alpha */ michael@0: vsri.u8 d7, d7, #6 michael@0: vshrn.u16 d30, q2, #2 michael@0: /* now do alpha blending, storing results in 8-bit planar format michael@0: into d16 - red, d19 - green, d18 - blue */ michael@0: vmull.u8 q10, d3, d6 michael@0: vmull.u8 q11, d3, d7 michael@0: vmull.u8 q12, d3, d30 michael@0: vrshr.u16 q13, q10, #8 michael@0: vrshr.u16 q3, q11, #8 michael@0: vrshr.u16 q15, q12, #8 michael@0: vraddhn.u16 d20, q10, q13 michael@0: vraddhn.u16 d23, q11, q3 michael@0: vraddhn.u16 d22, q12, q15 michael@0: .endm michael@0: michael@0: .macro pixman_composite_over_8888_0565_process_pixblock_tail michael@0: /* ... continue alpha blending */ michael@0: vqadd.u8 d16, d2, d20 michael@0: vqadd.u8 q9, q0, q11 michael@0: /* convert the result to r5g6b5 and store it into {d28, d29} */ michael@0: vshll.u8 q14, d16, #8 michael@0: vshll.u8 q8, d19, #8 michael@0: vshll.u8 q9, d18, #8 michael@0: vsri.u16 q14, q8, #5 michael@0: vsri.u16 q14, q9, #11 michael@0: .endm michael@0: michael@0: /* michael@0: * OK, now we got almost everything that we need. Using the above two michael@0: * macros, the work can be done right. But now we want to optimize michael@0: * it a bit. ARM Cortex-A8 is an in-order core, and benefits really michael@0: * a lot from good code scheduling and software pipelining. michael@0: * michael@0: * Let's construct some code, which will run in the core main loop. michael@0: * Some pseudo-code of the main loop will look like this: michael@0: * head michael@0: * while (...) { michael@0: * tail michael@0: * head michael@0: * } michael@0: * tail michael@0: * michael@0: * It may look a bit weird, but this setup allows to hide instruction michael@0: * latencies better and also utilize dual-issue capability more michael@0: * efficiently (make pairs of load-store and ALU instructions). michael@0: * michael@0: * So what we need now is a '*_tail_head' macro, which will be used michael@0: * in the core main loop. A trivial straightforward implementation michael@0: * of this macro would look like this: michael@0: * michael@0: * pixman_composite_over_8888_0565_process_pixblock_tail michael@0: * vst1.16 {d28, d29}, [DST_W, :128]! michael@0: * vld1.16 {d4, d5}, [DST_R, :128]! michael@0: * vld4.32 {d0, d1, d2, d3}, [SRC]! michael@0: * pixman_composite_over_8888_0565_process_pixblock_head michael@0: * cache_preload 8, 8 michael@0: * michael@0: * Now it also got some VLD/VST instructions. We simply can't move from michael@0: * processing one block of pixels to the other one with just arithmetics. michael@0: * The previously processed data needs to be written to memory and new michael@0: * data needs to be fetched. Fortunately, this main loop does not deal michael@0: * with partial leading/trailing pixels and can load/store a full block michael@0: * of pixels in a bulk. Additionally, destination buffer is already michael@0: * 16 bytes aligned here (which is good for performance). michael@0: * michael@0: * New things here are DST_R, DST_W, SRC and MASK identifiers. These michael@0: * are the aliases for ARM registers which are used as pointers for michael@0: * accessing data. We maintain separate pointers for reading and writing michael@0: * destination buffer (DST_R and DST_W). michael@0: * michael@0: * Another new thing is 'cache_preload' macro. It is used for prefetching michael@0: * data into CPU L2 cache and improve performance when dealing with large michael@0: * images which are far larger than cache size. It uses one argument michael@0: * (actually two, but they need to be the same here) - number of pixels michael@0: * in a block. Looking into 'pixman-arm-neon-asm.h' can provide some michael@0: * details about this macro. Moreover, if good performance is needed michael@0: * the code from this macro needs to be copied into '*_tail_head' macro michael@0: * and mixed with the rest of code for optimal instructions scheduling. michael@0: * We are actually doing it below. michael@0: * michael@0: * Now after all the explanations, here is the optimized code. michael@0: * Different instruction streams (originaling from '*_head', '*_tail' michael@0: * and 'cache_preload' macro) use different indentation levels for michael@0: * better readability. Actually taking the code from one of these michael@0: * indentation levels and ignoring a few VLD/VST instructions would michael@0: * result in exactly the code from '*_head', '*_tail' or 'cache_preload' michael@0: * macro! michael@0: */ michael@0: michael@0: #if 1 michael@0: michael@0: .macro pixman_composite_over_8888_0565_process_pixblock_tail_head michael@0: vqadd.u8 d16, d2, d20 michael@0: vld1.16 {d4, d5}, [DST_R, :128]! michael@0: vqadd.u8 q9, q0, q11 michael@0: vshrn.u16 d6, q2, #8 michael@0: fetch_src_pixblock michael@0: vshrn.u16 d7, q2, #3 michael@0: vsli.u16 q2, q2, #5 michael@0: vshll.u8 q14, d16, #8 michael@0: PF add PF_X, PF_X, #8 michael@0: vshll.u8 q8, d19, #8 michael@0: PF tst PF_CTL, #0xF michael@0: vsri.u8 d6, d6, #5 michael@0: PF addne PF_X, PF_X, #8 michael@0: vmvn.8 d3, d3 michael@0: PF subne PF_CTL, PF_CTL, #1 michael@0: vsri.u8 d7, d7, #6 michael@0: vshrn.u16 d30, q2, #2 michael@0: vmull.u8 q10, d3, d6 michael@0: PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift] michael@0: vmull.u8 q11, d3, d7 michael@0: vmull.u8 q12, d3, d30 michael@0: PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift] michael@0: vsri.u16 q14, q8, #5 michael@0: PF cmp PF_X, ORIG_W michael@0: vshll.u8 q9, d18, #8 michael@0: vrshr.u16 q13, q10, #8 michael@0: PF subge PF_X, PF_X, ORIG_W michael@0: vrshr.u16 q3, q11, #8 michael@0: vrshr.u16 q15, q12, #8 michael@0: PF subges PF_CTL, PF_CTL, #0x10 michael@0: vsri.u16 q14, q9, #11 michael@0: PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]! michael@0: vraddhn.u16 d20, q10, q13 michael@0: vraddhn.u16 d23, q11, q3 michael@0: PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]! michael@0: vraddhn.u16 d22, q12, q15 michael@0: vst1.16 {d28, d29}, [DST_W, :128]! michael@0: .endm michael@0: michael@0: #else michael@0: michael@0: /* If we did not care much about the performance, we would just use this... */ michael@0: .macro pixman_composite_over_8888_0565_process_pixblock_tail_head michael@0: pixman_composite_over_8888_0565_process_pixblock_tail michael@0: vst1.16 {d28, d29}, [DST_W, :128]! michael@0: vld1.16 {d4, d5}, [DST_R, :128]! michael@0: fetch_src_pixblock michael@0: pixman_composite_over_8888_0565_process_pixblock_head michael@0: cache_preload 8, 8 michael@0: .endm michael@0: michael@0: #endif michael@0: michael@0: /* michael@0: * And now the final part. We are using 'generate_composite_function' macro michael@0: * to put all the stuff together. We are specifying the name of the function michael@0: * which we want to get, number of bits per pixel for the source, mask and michael@0: * destination (0 if unused, like mask in this case). Next come some bit michael@0: * flags: michael@0: * FLAG_DST_READWRITE - tells that the destination buffer is both read michael@0: * and written, for write-only buffer we would use michael@0: * FLAG_DST_WRITEONLY flag instead michael@0: * FLAG_DEINTERLEAVE_32BPP - tells that we prefer to work with planar data michael@0: * and separate color channels for 32bpp format. michael@0: * The next things are: michael@0: * - the number of pixels processed per iteration (8 in this case, because michael@0: * that's the maximum what can fit into four 64-bit NEON registers). michael@0: * - prefetch distance, measured in pixel blocks. In this case it is 5 times michael@0: * by 8 pixels. That would be 40 pixels, or up to 160 bytes. Optimal michael@0: * prefetch distance can be selected by running some benchmarks. michael@0: * michael@0: * After that we specify some macros, these are 'default_init', michael@0: * 'default_cleanup' here which are empty (but it is possible to have custom michael@0: * init/cleanup macros to be able to save/restore some extra NEON registers michael@0: * like d8-d15 or do anything else) followed by michael@0: * 'pixman_composite_over_8888_0565_process_pixblock_head', michael@0: * 'pixman_composite_over_8888_0565_process_pixblock_tail' and michael@0: * 'pixman_composite_over_8888_0565_process_pixblock_tail_head' michael@0: * which we got implemented above. michael@0: * michael@0: * The last part is the NEON registers allocation scheme. michael@0: */ michael@0: generate_composite_function \ michael@0: pixman_composite_over_8888_0565_asm_neon, 32, 0, 16, \ michael@0: FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: 5, /* prefetch distance */ \ michael@0: default_init, \ michael@0: default_cleanup, \ michael@0: pixman_composite_over_8888_0565_process_pixblock_head, \ michael@0: pixman_composite_over_8888_0565_process_pixblock_tail, \ michael@0: pixman_composite_over_8888_0565_process_pixblock_tail_head, \ michael@0: 28, /* dst_w_basereg */ \ michael@0: 4, /* dst_r_basereg */ \ michael@0: 0, /* src_basereg */ \ michael@0: 24 /* mask_basereg */ michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_over_n_0565_process_pixblock_head michael@0: /* convert 8 r5g6b5 pixel data from {d4, d5} to planar 8-bit format michael@0: and put data into d6 - red, d7 - green, d30 - blue */ michael@0: vshrn.u16 d6, q2, #8 michael@0: vshrn.u16 d7, q2, #3 michael@0: vsli.u16 q2, q2, #5 michael@0: vsri.u8 d6, d6, #5 michael@0: vsri.u8 d7, d7, #6 michael@0: vshrn.u16 d30, q2, #2 michael@0: /* now do alpha blending, storing results in 8-bit planar format michael@0: into d16 - red, d19 - green, d18 - blue */ michael@0: vmull.u8 q10, d3, d6 michael@0: vmull.u8 q11, d3, d7 michael@0: vmull.u8 q12, d3, d30 michael@0: vrshr.u16 q13, q10, #8 michael@0: vrshr.u16 q3, q11, #8 michael@0: vrshr.u16 q15, q12, #8 michael@0: vraddhn.u16 d20, q10, q13 michael@0: vraddhn.u16 d23, q11, q3 michael@0: vraddhn.u16 d22, q12, q15 michael@0: .endm michael@0: michael@0: .macro pixman_composite_over_n_0565_process_pixblock_tail michael@0: /* ... continue alpha blending */ michael@0: vqadd.u8 d16, d2, d20 michael@0: vqadd.u8 q9, q0, q11 michael@0: /* convert the result to r5g6b5 and store it into {d28, d29} */ michael@0: vshll.u8 q14, d16, #8 michael@0: vshll.u8 q8, d19, #8 michael@0: vshll.u8 q9, d18, #8 michael@0: vsri.u16 q14, q8, #5 michael@0: vsri.u16 q14, q9, #11 michael@0: .endm michael@0: michael@0: /* TODO: expand macros and do better instructions scheduling */ michael@0: .macro pixman_composite_over_n_0565_process_pixblock_tail_head michael@0: pixman_composite_over_n_0565_process_pixblock_tail michael@0: vld1.16 {d4, d5}, [DST_R, :128]! michael@0: vst1.16 {d28, d29}, [DST_W, :128]! michael@0: pixman_composite_over_n_0565_process_pixblock_head michael@0: cache_preload 8, 8 michael@0: .endm michael@0: michael@0: .macro pixman_composite_over_n_0565_init michael@0: add DUMMY, sp, #ARGS_STACK_OFFSET michael@0: vld1.32 {d3[0]}, [DUMMY] michael@0: vdup.8 d0, d3[0] michael@0: vdup.8 d1, d3[1] michael@0: vdup.8 d2, d3[2] michael@0: vdup.8 d3, d3[3] michael@0: vmvn.8 d3, d3 /* invert source alpha */ michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_over_n_0565_asm_neon, 0, 0, 16, \ michael@0: FLAG_DST_READWRITE, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: 5, /* prefetch distance */ \ michael@0: pixman_composite_over_n_0565_init, \ michael@0: default_cleanup, \ michael@0: pixman_composite_over_n_0565_process_pixblock_head, \ michael@0: pixman_composite_over_n_0565_process_pixblock_tail, \ michael@0: pixman_composite_over_n_0565_process_pixblock_tail_head, \ michael@0: 28, /* dst_w_basereg */ \ michael@0: 4, /* dst_r_basereg */ \ michael@0: 0, /* src_basereg */ \ michael@0: 24 /* mask_basereg */ michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_src_8888_0565_process_pixblock_head michael@0: vshll.u8 q8, d1, #8 michael@0: vshll.u8 q14, d2, #8 michael@0: vshll.u8 q9, d0, #8 michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_8888_0565_process_pixblock_tail michael@0: vsri.u16 q14, q8, #5 michael@0: vsri.u16 q14, q9, #11 michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_8888_0565_process_pixblock_tail_head michael@0: vsri.u16 q14, q8, #5 michael@0: PF add PF_X, PF_X, #8 michael@0: PF tst PF_CTL, #0xF michael@0: fetch_src_pixblock michael@0: PF addne PF_X, PF_X, #8 michael@0: PF subne PF_CTL, PF_CTL, #1 michael@0: vsri.u16 q14, q9, #11 michael@0: PF cmp PF_X, ORIG_W michael@0: PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift] michael@0: vshll.u8 q8, d1, #8 michael@0: vst1.16 {d28, d29}, [DST_W, :128]! michael@0: PF subge PF_X, PF_X, ORIG_W michael@0: PF subges PF_CTL, PF_CTL, #0x10 michael@0: vshll.u8 q14, d2, #8 michael@0: PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]! michael@0: vshll.u8 q9, d0, #8 michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_src_8888_0565_asm_neon, 32, 0, 16, \ michael@0: FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: 10, /* prefetch distance */ \ michael@0: default_init, \ michael@0: default_cleanup, \ michael@0: pixman_composite_src_8888_0565_process_pixblock_head, \ michael@0: pixman_composite_src_8888_0565_process_pixblock_tail, \ michael@0: pixman_composite_src_8888_0565_process_pixblock_tail_head michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_src_0565_8888_process_pixblock_head michael@0: vshrn.u16 d30, q0, #8 michael@0: vshrn.u16 d29, q0, #3 michael@0: vsli.u16 q0, q0, #5 michael@0: vmov.u8 d31, #255 michael@0: vsri.u8 d30, d30, #5 michael@0: vsri.u8 d29, d29, #6 michael@0: vshrn.u16 d28, q0, #2 michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_0565_8888_process_pixblock_tail michael@0: .endm michael@0: michael@0: /* TODO: expand macros and do better instructions scheduling */ michael@0: .macro pixman_composite_src_0565_8888_process_pixblock_tail_head michael@0: pixman_composite_src_0565_8888_process_pixblock_tail michael@0: vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! michael@0: fetch_src_pixblock michael@0: pixman_composite_src_0565_8888_process_pixblock_head michael@0: cache_preload 8, 8 michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_src_0565_8888_asm_neon, 16, 0, 32, \ michael@0: FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: 10, /* prefetch distance */ \ michael@0: default_init, \ michael@0: default_cleanup, \ michael@0: pixman_composite_src_0565_8888_process_pixblock_head, \ michael@0: pixman_composite_src_0565_8888_process_pixblock_tail, \ michael@0: pixman_composite_src_0565_8888_process_pixblock_tail_head michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_add_8_8_process_pixblock_head michael@0: vqadd.u8 q14, q0, q2 michael@0: vqadd.u8 q15, q1, q3 michael@0: .endm michael@0: michael@0: .macro pixman_composite_add_8_8_process_pixblock_tail michael@0: .endm michael@0: michael@0: .macro pixman_composite_add_8_8_process_pixblock_tail_head michael@0: fetch_src_pixblock michael@0: PF add PF_X, PF_X, #32 michael@0: PF tst PF_CTL, #0xF michael@0: vld1.8 {d4, d5, d6, d7}, [DST_R, :128]! michael@0: PF addne PF_X, PF_X, #32 michael@0: PF subne PF_CTL, PF_CTL, #1 michael@0: vst1.8 {d28, d29, d30, d31}, [DST_W, :128]! michael@0: PF cmp PF_X, ORIG_W michael@0: PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift] michael@0: PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift] michael@0: PF subge PF_X, PF_X, ORIG_W michael@0: PF subges PF_CTL, PF_CTL, #0x10 michael@0: vqadd.u8 q14, q0, q2 michael@0: PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]! michael@0: PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]! michael@0: vqadd.u8 q15, q1, q3 michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_add_8_8_asm_neon, 8, 0, 8, \ michael@0: FLAG_DST_READWRITE, \ michael@0: 32, /* number of pixels, processed in a single block */ \ michael@0: 10, /* prefetch distance */ \ michael@0: default_init, \ michael@0: default_cleanup, \ michael@0: pixman_composite_add_8_8_process_pixblock_head, \ michael@0: pixman_composite_add_8_8_process_pixblock_tail, \ michael@0: pixman_composite_add_8_8_process_pixblock_tail_head michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_add_8888_8888_process_pixblock_tail_head michael@0: fetch_src_pixblock michael@0: PF add PF_X, PF_X, #8 michael@0: PF tst PF_CTL, #0xF michael@0: vld1.32 {d4, d5, d6, d7}, [DST_R, :128]! michael@0: PF addne PF_X, PF_X, #8 michael@0: PF subne PF_CTL, PF_CTL, #1 michael@0: vst1.32 {d28, d29, d30, d31}, [DST_W, :128]! michael@0: PF cmp PF_X, ORIG_W michael@0: PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift] michael@0: PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift] michael@0: PF subge PF_X, PF_X, ORIG_W michael@0: PF subges PF_CTL, PF_CTL, #0x10 michael@0: vqadd.u8 q14, q0, q2 michael@0: PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]! michael@0: PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]! michael@0: vqadd.u8 q15, q1, q3 michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_add_8888_8888_asm_neon, 32, 0, 32, \ michael@0: FLAG_DST_READWRITE, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: 10, /* prefetch distance */ \ michael@0: default_init, \ michael@0: default_cleanup, \ michael@0: pixman_composite_add_8_8_process_pixblock_head, \ michael@0: pixman_composite_add_8_8_process_pixblock_tail, \ michael@0: pixman_composite_add_8888_8888_process_pixblock_tail_head michael@0: michael@0: generate_composite_function_single_scanline \ michael@0: pixman_composite_scanline_add_asm_neon, 32, 0, 32, \ michael@0: FLAG_DST_READWRITE, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: default_init, \ michael@0: default_cleanup, \ michael@0: pixman_composite_add_8_8_process_pixblock_head, \ michael@0: pixman_composite_add_8_8_process_pixblock_tail, \ michael@0: pixman_composite_add_8888_8888_process_pixblock_tail_head michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_out_reverse_8888_8888_process_pixblock_head michael@0: vmvn.8 d24, d3 /* get inverted alpha */ michael@0: /* do alpha blending */ michael@0: vmull.u8 q8, d24, d4 michael@0: vmull.u8 q9, d24, d5 michael@0: vmull.u8 q10, d24, d6 michael@0: vmull.u8 q11, d24, d7 michael@0: .endm michael@0: michael@0: .macro pixman_composite_out_reverse_8888_8888_process_pixblock_tail michael@0: vrshr.u16 q14, q8, #8 michael@0: vrshr.u16 q15, q9, #8 michael@0: vrshr.u16 q12, q10, #8 michael@0: vrshr.u16 q13, q11, #8 michael@0: vraddhn.u16 d28, q14, q8 michael@0: vraddhn.u16 d29, q15, q9 michael@0: vraddhn.u16 d30, q12, q10 michael@0: vraddhn.u16 d31, q13, q11 michael@0: .endm michael@0: michael@0: .macro pixman_composite_out_reverse_8888_8888_process_pixblock_tail_head michael@0: vld4.8 {d4, d5, d6, d7}, [DST_R, :128]! michael@0: vrshr.u16 q14, q8, #8 michael@0: PF add PF_X, PF_X, #8 michael@0: PF tst PF_CTL, #0xF michael@0: vrshr.u16 q15, q9, #8 michael@0: vrshr.u16 q12, q10, #8 michael@0: vrshr.u16 q13, q11, #8 michael@0: PF addne PF_X, PF_X, #8 michael@0: PF subne PF_CTL, PF_CTL, #1 michael@0: vraddhn.u16 d28, q14, q8 michael@0: vraddhn.u16 d29, q15, q9 michael@0: PF cmp PF_X, ORIG_W michael@0: vraddhn.u16 d30, q12, q10 michael@0: vraddhn.u16 d31, q13, q11 michael@0: fetch_src_pixblock michael@0: PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift] michael@0: vmvn.8 d22, d3 michael@0: PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift] michael@0: vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! michael@0: PF subge PF_X, PF_X, ORIG_W michael@0: vmull.u8 q8, d22, d4 michael@0: PF subges PF_CTL, PF_CTL, #0x10 michael@0: vmull.u8 q9, d22, d5 michael@0: PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]! michael@0: vmull.u8 q10, d22, d6 michael@0: PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]! michael@0: vmull.u8 q11, d22, d7 michael@0: .endm michael@0: michael@0: generate_composite_function_single_scanline \ michael@0: pixman_composite_scanline_out_reverse_asm_neon, 32, 0, 32, \ michael@0: FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: default_init, \ michael@0: default_cleanup, \ michael@0: pixman_composite_out_reverse_8888_8888_process_pixblock_head, \ michael@0: pixman_composite_out_reverse_8888_8888_process_pixblock_tail, \ michael@0: pixman_composite_out_reverse_8888_8888_process_pixblock_tail_head michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_over_8888_8888_process_pixblock_head michael@0: pixman_composite_out_reverse_8888_8888_process_pixblock_head michael@0: .endm michael@0: michael@0: .macro pixman_composite_over_8888_8888_process_pixblock_tail michael@0: pixman_composite_out_reverse_8888_8888_process_pixblock_tail michael@0: vqadd.u8 q14, q0, q14 michael@0: vqadd.u8 q15, q1, q15 michael@0: .endm michael@0: michael@0: .macro pixman_composite_over_8888_8888_process_pixblock_tail_head michael@0: vld4.8 {d4, d5, d6, d7}, [DST_R, :128]! michael@0: vrshr.u16 q14, q8, #8 michael@0: PF add PF_X, PF_X, #8 michael@0: PF tst PF_CTL, #0xF michael@0: vrshr.u16 q15, q9, #8 michael@0: vrshr.u16 q12, q10, #8 michael@0: vrshr.u16 q13, q11, #8 michael@0: PF addne PF_X, PF_X, #8 michael@0: PF subne PF_CTL, PF_CTL, #1 michael@0: vraddhn.u16 d28, q14, q8 michael@0: vraddhn.u16 d29, q15, q9 michael@0: PF cmp PF_X, ORIG_W michael@0: vraddhn.u16 d30, q12, q10 michael@0: vraddhn.u16 d31, q13, q11 michael@0: vqadd.u8 q14, q0, q14 michael@0: vqadd.u8 q15, q1, q15 michael@0: fetch_src_pixblock michael@0: PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift] michael@0: vmvn.8 d22, d3 michael@0: PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift] michael@0: vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! michael@0: PF subge PF_X, PF_X, ORIG_W michael@0: vmull.u8 q8, d22, d4 michael@0: PF subges PF_CTL, PF_CTL, #0x10 michael@0: vmull.u8 q9, d22, d5 michael@0: PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]! michael@0: vmull.u8 q10, d22, d6 michael@0: PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]! michael@0: vmull.u8 q11, d22, d7 michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_over_8888_8888_asm_neon, 32, 0, 32, \ michael@0: FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: 5, /* prefetch distance */ \ michael@0: default_init, \ michael@0: default_cleanup, \ michael@0: pixman_composite_over_8888_8888_process_pixblock_head, \ michael@0: pixman_composite_over_8888_8888_process_pixblock_tail, \ michael@0: pixman_composite_over_8888_8888_process_pixblock_tail_head michael@0: michael@0: generate_composite_function_single_scanline \ michael@0: pixman_composite_scanline_over_asm_neon, 32, 0, 32, \ michael@0: FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: default_init, \ michael@0: default_cleanup, \ michael@0: pixman_composite_over_8888_8888_process_pixblock_head, \ michael@0: pixman_composite_over_8888_8888_process_pixblock_tail, \ michael@0: pixman_composite_over_8888_8888_process_pixblock_tail_head michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_over_n_8888_process_pixblock_head michael@0: /* deinterleaved source pixels in {d0, d1, d2, d3} */ michael@0: /* inverted alpha in {d24} */ michael@0: /* destination pixels in {d4, d5, d6, d7} */ michael@0: vmull.u8 q8, d24, d4 michael@0: vmull.u8 q9, d24, d5 michael@0: vmull.u8 q10, d24, d6 michael@0: vmull.u8 q11, d24, d7 michael@0: .endm michael@0: michael@0: .macro pixman_composite_over_n_8888_process_pixblock_tail michael@0: vrshr.u16 q14, q8, #8 michael@0: vrshr.u16 q15, q9, #8 michael@0: vrshr.u16 q2, q10, #8 michael@0: vrshr.u16 q3, q11, #8 michael@0: vraddhn.u16 d28, q14, q8 michael@0: vraddhn.u16 d29, q15, q9 michael@0: vraddhn.u16 d30, q2, q10 michael@0: vraddhn.u16 d31, q3, q11 michael@0: vqadd.u8 q14, q0, q14 michael@0: vqadd.u8 q15, q1, q15 michael@0: .endm michael@0: michael@0: .macro pixman_composite_over_n_8888_process_pixblock_tail_head michael@0: vrshr.u16 q14, q8, #8 michael@0: vrshr.u16 q15, q9, #8 michael@0: vrshr.u16 q2, q10, #8 michael@0: vrshr.u16 q3, q11, #8 michael@0: vraddhn.u16 d28, q14, q8 michael@0: vraddhn.u16 d29, q15, q9 michael@0: vraddhn.u16 d30, q2, q10 michael@0: vraddhn.u16 d31, q3, q11 michael@0: vld4.8 {d4, d5, d6, d7}, [DST_R, :128]! michael@0: vqadd.u8 q14, q0, q14 michael@0: PF add PF_X, PF_X, #8 michael@0: PF tst PF_CTL, #0x0F michael@0: PF addne PF_X, PF_X, #8 michael@0: PF subne PF_CTL, PF_CTL, #1 michael@0: vqadd.u8 q15, q1, q15 michael@0: PF cmp PF_X, ORIG_W michael@0: vmull.u8 q8, d24, d4 michael@0: PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift] michael@0: vmull.u8 q9, d24, d5 michael@0: PF subge PF_X, PF_X, ORIG_W michael@0: vmull.u8 q10, d24, d6 michael@0: PF subges PF_CTL, PF_CTL, #0x10 michael@0: vmull.u8 q11, d24, d7 michael@0: PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]! michael@0: vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! michael@0: .endm michael@0: michael@0: .macro pixman_composite_over_n_8888_init michael@0: add DUMMY, sp, #ARGS_STACK_OFFSET michael@0: vld1.32 {d3[0]}, [DUMMY] michael@0: vdup.8 d0, d3[0] michael@0: vdup.8 d1, d3[1] michael@0: vdup.8 d2, d3[2] michael@0: vdup.8 d3, d3[3] michael@0: vmvn.8 d24, d3 /* get inverted alpha */ michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_over_n_8888_asm_neon, 0, 0, 32, \ michael@0: FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: 5, /* prefetch distance */ \ michael@0: pixman_composite_over_n_8888_init, \ michael@0: default_cleanup, \ michael@0: pixman_composite_over_8888_8888_process_pixblock_head, \ michael@0: pixman_composite_over_8888_8888_process_pixblock_tail, \ michael@0: pixman_composite_over_n_8888_process_pixblock_tail_head michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_over_reverse_n_8888_process_pixblock_tail_head michael@0: vrshr.u16 q14, q8, #8 michael@0: PF add PF_X, PF_X, #8 michael@0: PF tst PF_CTL, #0xF michael@0: vrshr.u16 q15, q9, #8 michael@0: vrshr.u16 q12, q10, #8 michael@0: vrshr.u16 q13, q11, #8 michael@0: PF addne PF_X, PF_X, #8 michael@0: PF subne PF_CTL, PF_CTL, #1 michael@0: vraddhn.u16 d28, q14, q8 michael@0: vraddhn.u16 d29, q15, q9 michael@0: PF cmp PF_X, ORIG_W michael@0: vraddhn.u16 d30, q12, q10 michael@0: vraddhn.u16 d31, q13, q11 michael@0: vqadd.u8 q14, q0, q14 michael@0: vqadd.u8 q15, q1, q15 michael@0: vld4.8 {d0, d1, d2, d3}, [DST_R, :128]! michael@0: vmvn.8 d22, d3 michael@0: PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift] michael@0: vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! michael@0: PF subge PF_X, PF_X, ORIG_W michael@0: vmull.u8 q8, d22, d4 michael@0: PF subges PF_CTL, PF_CTL, #0x10 michael@0: vmull.u8 q9, d22, d5 michael@0: vmull.u8 q10, d22, d6 michael@0: PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]! michael@0: vmull.u8 q11, d22, d7 michael@0: .endm michael@0: michael@0: .macro pixman_composite_over_reverse_n_8888_init michael@0: add DUMMY, sp, #ARGS_STACK_OFFSET michael@0: vld1.32 {d7[0]}, [DUMMY] michael@0: vdup.8 d4, d7[0] michael@0: vdup.8 d5, d7[1] michael@0: vdup.8 d6, d7[2] michael@0: vdup.8 d7, d7[3] michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_over_reverse_n_8888_asm_neon, 0, 0, 32, \ michael@0: FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: 5, /* prefetch distance */ \ michael@0: pixman_composite_over_reverse_n_8888_init, \ michael@0: default_cleanup, \ michael@0: pixman_composite_over_8888_8888_process_pixblock_head, \ michael@0: pixman_composite_over_8888_8888_process_pixblock_tail, \ michael@0: pixman_composite_over_reverse_n_8888_process_pixblock_tail_head, \ michael@0: 28, /* dst_w_basereg */ \ michael@0: 0, /* dst_r_basereg */ \ michael@0: 4, /* src_basereg */ \ michael@0: 24 /* mask_basereg */ michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_over_8888_8_0565_process_pixblock_head michael@0: vmull.u8 q0, d24, d8 /* IN for SRC pixels (part1) */ michael@0: vmull.u8 q1, d24, d9 michael@0: vmull.u8 q6, d24, d10 michael@0: vmull.u8 q7, d24, d11 michael@0: vshrn.u16 d6, q2, #8 /* convert DST_R data to 32-bpp (part1) */ michael@0: vshrn.u16 d7, q2, #3 michael@0: vsli.u16 q2, q2, #5 michael@0: vrshr.u16 q8, q0, #8 /* IN for SRC pixels (part2) */ michael@0: vrshr.u16 q9, q1, #8 michael@0: vrshr.u16 q10, q6, #8 michael@0: vrshr.u16 q11, q7, #8 michael@0: vraddhn.u16 d0, q0, q8 michael@0: vraddhn.u16 d1, q1, q9 michael@0: vraddhn.u16 d2, q6, q10 michael@0: vraddhn.u16 d3, q7, q11 michael@0: vsri.u8 d6, d6, #5 /* convert DST_R data to 32-bpp (part2) */ michael@0: vsri.u8 d7, d7, #6 michael@0: vmvn.8 d3, d3 michael@0: vshrn.u16 d30, q2, #2 michael@0: vmull.u8 q8, d3, d6 /* now do alpha blending */ michael@0: vmull.u8 q9, d3, d7 michael@0: vmull.u8 q10, d3, d30 michael@0: .endm michael@0: michael@0: .macro pixman_composite_over_8888_8_0565_process_pixblock_tail michael@0: /* 3 cycle bubble (after vmull.u8) */ michael@0: vrshr.u16 q13, q8, #8 michael@0: vrshr.u16 q11, q9, #8 michael@0: vrshr.u16 q15, q10, #8 michael@0: vraddhn.u16 d16, q8, q13 michael@0: vraddhn.u16 d27, q9, q11 michael@0: vraddhn.u16 d26, q10, q15 michael@0: vqadd.u8 d16, d2, d16 michael@0: /* 1 cycle bubble */ michael@0: vqadd.u8 q9, q0, q13 michael@0: vshll.u8 q14, d16, #8 /* convert to 16bpp */ michael@0: vshll.u8 q8, d19, #8 michael@0: vshll.u8 q9, d18, #8 michael@0: vsri.u16 q14, q8, #5 michael@0: /* 1 cycle bubble */ michael@0: vsri.u16 q14, q9, #11 michael@0: .endm michael@0: michael@0: .macro pixman_composite_over_8888_8_0565_process_pixblock_tail_head michael@0: vld1.16 {d4, d5}, [DST_R, :128]! michael@0: vshrn.u16 d6, q2, #8 michael@0: fetch_mask_pixblock michael@0: vshrn.u16 d7, q2, #3 michael@0: fetch_src_pixblock michael@0: vmull.u8 q6, d24, d10 michael@0: vrshr.u16 q13, q8, #8 michael@0: vrshr.u16 q11, q9, #8 michael@0: vrshr.u16 q15, q10, #8 michael@0: vraddhn.u16 d16, q8, q13 michael@0: vraddhn.u16 d27, q9, q11 michael@0: vraddhn.u16 d26, q10, q15 michael@0: vqadd.u8 d16, d2, d16 michael@0: vmull.u8 q1, d24, d9 michael@0: vqadd.u8 q9, q0, q13 michael@0: vshll.u8 q14, d16, #8 michael@0: vmull.u8 q0, d24, d8 michael@0: vshll.u8 q8, d19, #8 michael@0: vshll.u8 q9, d18, #8 michael@0: vsri.u16 q14, q8, #5 michael@0: vmull.u8 q7, d24, d11 michael@0: vsri.u16 q14, q9, #11 michael@0: michael@0: cache_preload 8, 8 michael@0: michael@0: vsli.u16 q2, q2, #5 michael@0: vrshr.u16 q8, q0, #8 michael@0: vrshr.u16 q9, q1, #8 michael@0: vrshr.u16 q10, q6, #8 michael@0: vrshr.u16 q11, q7, #8 michael@0: vraddhn.u16 d0, q0, q8 michael@0: vraddhn.u16 d1, q1, q9 michael@0: vraddhn.u16 d2, q6, q10 michael@0: vraddhn.u16 d3, q7, q11 michael@0: vsri.u8 d6, d6, #5 michael@0: vsri.u8 d7, d7, #6 michael@0: vmvn.8 d3, d3 michael@0: vshrn.u16 d30, q2, #2 michael@0: vst1.16 {d28, d29}, [DST_W, :128]! michael@0: vmull.u8 q8, d3, d6 michael@0: vmull.u8 q9, d3, d7 michael@0: vmull.u8 q10, d3, d30 michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_over_8888_8_0565_asm_neon, 32, 8, 16, \ michael@0: FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: 5, /* prefetch distance */ \ michael@0: default_init_need_all_regs, \ michael@0: default_cleanup_need_all_regs, \ michael@0: pixman_composite_over_8888_8_0565_process_pixblock_head, \ michael@0: pixman_composite_over_8888_8_0565_process_pixblock_tail, \ michael@0: pixman_composite_over_8888_8_0565_process_pixblock_tail_head, \ michael@0: 28, /* dst_w_basereg */ \ michael@0: 4, /* dst_r_basereg */ \ michael@0: 8, /* src_basereg */ \ michael@0: 24 /* mask_basereg */ michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: /* michael@0: * This function needs a special initialization of solid mask. michael@0: * Solid source pixel data is fetched from stack at ARGS_STACK_OFFSET michael@0: * offset, split into color components and replicated in d8-d11 michael@0: * registers. Additionally, this function needs all the NEON registers, michael@0: * so it has to save d8-d15 registers which are callee saved according michael@0: * to ABI. These registers are restored from 'cleanup' macro. All the michael@0: * other NEON registers are caller saved, so can be clobbered freely michael@0: * without introducing any problems. michael@0: */ michael@0: .macro pixman_composite_over_n_8_0565_init michael@0: add DUMMY, sp, #ARGS_STACK_OFFSET michael@0: .vsave {d8-d15} michael@0: vpush {d8-d15} michael@0: vld1.32 {d11[0]}, [DUMMY] michael@0: vdup.8 d8, d11[0] michael@0: vdup.8 d9, d11[1] michael@0: vdup.8 d10, d11[2] michael@0: vdup.8 d11, d11[3] michael@0: .endm michael@0: michael@0: .macro pixman_composite_over_n_8_0565_cleanup michael@0: vpop {d8-d15} michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_over_n_8_0565_asm_neon, 0, 8, 16, \ michael@0: FLAG_DST_READWRITE, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: 5, /* prefetch distance */ \ michael@0: pixman_composite_over_n_8_0565_init, \ michael@0: pixman_composite_over_n_8_0565_cleanup, \ michael@0: pixman_composite_over_8888_8_0565_process_pixblock_head, \ michael@0: pixman_composite_over_8888_8_0565_process_pixblock_tail, \ michael@0: pixman_composite_over_8888_8_0565_process_pixblock_tail_head michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_over_8888_n_0565_init michael@0: add DUMMY, sp, #(ARGS_STACK_OFFSET + 8) michael@0: .vsave {d8-d15} michael@0: vpush {d8-d15} michael@0: vld1.32 {d24[0]}, [DUMMY] michael@0: vdup.8 d24, d24[3] michael@0: .endm michael@0: michael@0: .macro pixman_composite_over_8888_n_0565_cleanup michael@0: vpop {d8-d15} michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_over_8888_n_0565_asm_neon, 32, 0, 16, \ michael@0: FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: 5, /* prefetch distance */ \ michael@0: pixman_composite_over_8888_n_0565_init, \ michael@0: pixman_composite_over_8888_n_0565_cleanup, \ michael@0: pixman_composite_over_8888_8_0565_process_pixblock_head, \ michael@0: pixman_composite_over_8888_8_0565_process_pixblock_tail, \ michael@0: pixman_composite_over_8888_8_0565_process_pixblock_tail_head, \ michael@0: 28, /* dst_w_basereg */ \ michael@0: 4, /* dst_r_basereg */ \ michael@0: 8, /* src_basereg */ \ michael@0: 24 /* mask_basereg */ michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_src_0565_0565_process_pixblock_head michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_0565_0565_process_pixblock_tail michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_0565_0565_process_pixblock_tail_head michael@0: vst1.16 {d0, d1, d2, d3}, [DST_W, :128]! michael@0: fetch_src_pixblock michael@0: cache_preload 16, 16 michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_src_0565_0565_asm_neon, 16, 0, 16, \ michael@0: FLAG_DST_WRITEONLY, \ michael@0: 16, /* number of pixels, processed in a single block */ \ michael@0: 10, /* prefetch distance */ \ michael@0: default_init, \ michael@0: default_cleanup, \ michael@0: pixman_composite_src_0565_0565_process_pixblock_head, \ michael@0: pixman_composite_src_0565_0565_process_pixblock_tail, \ michael@0: pixman_composite_src_0565_0565_process_pixblock_tail_head, \ michael@0: 0, /* dst_w_basereg */ \ michael@0: 0, /* dst_r_basereg */ \ michael@0: 0, /* src_basereg */ \ michael@0: 0 /* mask_basereg */ michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_src_n_8_process_pixblock_head michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_n_8_process_pixblock_tail michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_n_8_process_pixblock_tail_head michael@0: vst1.8 {d0, d1, d2, d3}, [DST_W, :128]! michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_n_8_init michael@0: add DUMMY, sp, #ARGS_STACK_OFFSET michael@0: vld1.32 {d0[0]}, [DUMMY] michael@0: vsli.u64 d0, d0, #8 michael@0: vsli.u64 d0, d0, #16 michael@0: vsli.u64 d0, d0, #32 michael@0: vorr d1, d0, d0 michael@0: vorr q1, q0, q0 michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_n_8_cleanup michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_src_n_8_asm_neon, 0, 0, 8, \ michael@0: FLAG_DST_WRITEONLY, \ michael@0: 32, /* number of pixels, processed in a single block */ \ michael@0: 0, /* prefetch distance */ \ michael@0: pixman_composite_src_n_8_init, \ michael@0: pixman_composite_src_n_8_cleanup, \ michael@0: pixman_composite_src_n_8_process_pixblock_head, \ michael@0: pixman_composite_src_n_8_process_pixblock_tail, \ michael@0: pixman_composite_src_n_8_process_pixblock_tail_head, \ michael@0: 0, /* dst_w_basereg */ \ michael@0: 0, /* dst_r_basereg */ \ michael@0: 0, /* src_basereg */ \ michael@0: 0 /* mask_basereg */ michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_src_n_0565_process_pixblock_head michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_n_0565_process_pixblock_tail michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_n_0565_process_pixblock_tail_head michael@0: vst1.16 {d0, d1, d2, d3}, [DST_W, :128]! michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_n_0565_init michael@0: add DUMMY, sp, #ARGS_STACK_OFFSET michael@0: vld1.32 {d0[0]}, [DUMMY] michael@0: vsli.u64 d0, d0, #16 michael@0: vsli.u64 d0, d0, #32 michael@0: vorr d1, d0, d0 michael@0: vorr q1, q0, q0 michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_n_0565_cleanup michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_src_n_0565_asm_neon, 0, 0, 16, \ michael@0: FLAG_DST_WRITEONLY, \ michael@0: 16, /* number of pixels, processed in a single block */ \ michael@0: 0, /* prefetch distance */ \ michael@0: pixman_composite_src_n_0565_init, \ michael@0: pixman_composite_src_n_0565_cleanup, \ michael@0: pixman_composite_src_n_0565_process_pixblock_head, \ michael@0: pixman_composite_src_n_0565_process_pixblock_tail, \ michael@0: pixman_composite_src_n_0565_process_pixblock_tail_head, \ michael@0: 0, /* dst_w_basereg */ \ michael@0: 0, /* dst_r_basereg */ \ michael@0: 0, /* src_basereg */ \ michael@0: 0 /* mask_basereg */ michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_src_n_8888_process_pixblock_head michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_n_8888_process_pixblock_tail michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_n_8888_process_pixblock_tail_head michael@0: vst1.32 {d0, d1, d2, d3}, [DST_W, :128]! michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_n_8888_init michael@0: add DUMMY, sp, #ARGS_STACK_OFFSET michael@0: vld1.32 {d0[0]}, [DUMMY] michael@0: vsli.u64 d0, d0, #32 michael@0: vorr d1, d0, d0 michael@0: vorr q1, q0, q0 michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_n_8888_cleanup michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_src_n_8888_asm_neon, 0, 0, 32, \ michael@0: FLAG_DST_WRITEONLY, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: 0, /* prefetch distance */ \ michael@0: pixman_composite_src_n_8888_init, \ michael@0: pixman_composite_src_n_8888_cleanup, \ michael@0: pixman_composite_src_n_8888_process_pixblock_head, \ michael@0: pixman_composite_src_n_8888_process_pixblock_tail, \ michael@0: pixman_composite_src_n_8888_process_pixblock_tail_head, \ michael@0: 0, /* dst_w_basereg */ \ michael@0: 0, /* dst_r_basereg */ \ michael@0: 0, /* src_basereg */ \ michael@0: 0 /* mask_basereg */ michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_src_8888_8888_process_pixblock_head michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_8888_8888_process_pixblock_tail michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_8888_8888_process_pixblock_tail_head michael@0: vst1.32 {d0, d1, d2, d3}, [DST_W, :128]! michael@0: fetch_src_pixblock michael@0: cache_preload 8, 8 michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_src_8888_8888_asm_neon, 32, 0, 32, \ michael@0: FLAG_DST_WRITEONLY, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: 10, /* prefetch distance */ \ michael@0: default_init, \ michael@0: default_cleanup, \ michael@0: pixman_composite_src_8888_8888_process_pixblock_head, \ michael@0: pixman_composite_src_8888_8888_process_pixblock_tail, \ michael@0: pixman_composite_src_8888_8888_process_pixblock_tail_head, \ michael@0: 0, /* dst_w_basereg */ \ michael@0: 0, /* dst_r_basereg */ \ michael@0: 0, /* src_basereg */ \ michael@0: 0 /* mask_basereg */ michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_src_x888_8888_process_pixblock_head michael@0: vorr q0, q0, q2 michael@0: vorr q1, q1, q2 michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_x888_8888_process_pixblock_tail michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_x888_8888_process_pixblock_tail_head michael@0: vst1.32 {d0, d1, d2, d3}, [DST_W, :128]! michael@0: fetch_src_pixblock michael@0: vorr q0, q0, q2 michael@0: vorr q1, q1, q2 michael@0: cache_preload 8, 8 michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_x888_8888_init michael@0: vmov.u8 q2, #0xFF michael@0: vshl.u32 q2, q2, #24 michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_src_x888_8888_asm_neon, 32, 0, 32, \ michael@0: FLAG_DST_WRITEONLY, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: 10, /* prefetch distance */ \ michael@0: pixman_composite_src_x888_8888_init, \ michael@0: default_cleanup, \ michael@0: pixman_composite_src_x888_8888_process_pixblock_head, \ michael@0: pixman_composite_src_x888_8888_process_pixblock_tail, \ michael@0: pixman_composite_src_x888_8888_process_pixblock_tail_head, \ michael@0: 0, /* dst_w_basereg */ \ michael@0: 0, /* dst_r_basereg */ \ michael@0: 0, /* src_basereg */ \ michael@0: 0 /* mask_basereg */ michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_src_n_8_8888_process_pixblock_head michael@0: /* expecting solid source in {d0, d1, d2, d3} */ michael@0: /* mask is in d24 (d25, d26, d27 are unused) */ michael@0: michael@0: /* in */ michael@0: vmull.u8 q8, d24, d0 michael@0: vmull.u8 q9, d24, d1 michael@0: vmull.u8 q10, d24, d2 michael@0: vmull.u8 q11, d24, d3 michael@0: vrsra.u16 q8, q8, #8 michael@0: vrsra.u16 q9, q9, #8 michael@0: vrsra.u16 q10, q10, #8 michael@0: vrsra.u16 q11, q11, #8 michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_n_8_8888_process_pixblock_tail michael@0: vrshrn.u16 d28, q8, #8 michael@0: vrshrn.u16 d29, q9, #8 michael@0: vrshrn.u16 d30, q10, #8 michael@0: vrshrn.u16 d31, q11, #8 michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_n_8_8888_process_pixblock_tail_head michael@0: fetch_mask_pixblock michael@0: PF add PF_X, PF_X, #8 michael@0: vrshrn.u16 d28, q8, #8 michael@0: PF tst PF_CTL, #0x0F michael@0: vrshrn.u16 d29, q9, #8 michael@0: PF addne PF_X, PF_X, #8 michael@0: vrshrn.u16 d30, q10, #8 michael@0: PF subne PF_CTL, PF_CTL, #1 michael@0: vrshrn.u16 d31, q11, #8 michael@0: PF cmp PF_X, ORIG_W michael@0: vmull.u8 q8, d24, d0 michael@0: PF pld, [PF_MASK, PF_X, lsl #mask_bpp_shift] michael@0: vmull.u8 q9, d24, d1 michael@0: PF subge PF_X, PF_X, ORIG_W michael@0: vmull.u8 q10, d24, d2 michael@0: PF subges PF_CTL, PF_CTL, #0x10 michael@0: vmull.u8 q11, d24, d3 michael@0: PF ldrgeb DUMMY, [PF_MASK, MASK_STRIDE, lsl #mask_bpp_shift]! michael@0: vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! michael@0: vrsra.u16 q8, q8, #8 michael@0: vrsra.u16 q9, q9, #8 michael@0: vrsra.u16 q10, q10, #8 michael@0: vrsra.u16 q11, q11, #8 michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_n_8_8888_init michael@0: add DUMMY, sp, #ARGS_STACK_OFFSET michael@0: vld1.32 {d3[0]}, [DUMMY] michael@0: vdup.8 d0, d3[0] michael@0: vdup.8 d1, d3[1] michael@0: vdup.8 d2, d3[2] michael@0: vdup.8 d3, d3[3] michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_n_8_8888_cleanup michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_src_n_8_8888_asm_neon, 0, 8, 32, \ michael@0: FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: 5, /* prefetch distance */ \ michael@0: pixman_composite_src_n_8_8888_init, \ michael@0: pixman_composite_src_n_8_8888_cleanup, \ michael@0: pixman_composite_src_n_8_8888_process_pixblock_head, \ michael@0: pixman_composite_src_n_8_8888_process_pixblock_tail, \ michael@0: pixman_composite_src_n_8_8888_process_pixblock_tail_head, \ michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_src_n_8_8_process_pixblock_head michael@0: vmull.u8 q0, d24, d16 michael@0: vmull.u8 q1, d25, d16 michael@0: vmull.u8 q2, d26, d16 michael@0: vmull.u8 q3, d27, d16 michael@0: vrsra.u16 q0, q0, #8 michael@0: vrsra.u16 q1, q1, #8 michael@0: vrsra.u16 q2, q2, #8 michael@0: vrsra.u16 q3, q3, #8 michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_n_8_8_process_pixblock_tail michael@0: vrshrn.u16 d28, q0, #8 michael@0: vrshrn.u16 d29, q1, #8 michael@0: vrshrn.u16 d30, q2, #8 michael@0: vrshrn.u16 d31, q3, #8 michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_n_8_8_process_pixblock_tail_head michael@0: fetch_mask_pixblock michael@0: PF add PF_X, PF_X, #8 michael@0: vrshrn.u16 d28, q0, #8 michael@0: PF tst PF_CTL, #0x0F michael@0: vrshrn.u16 d29, q1, #8 michael@0: PF addne PF_X, PF_X, #8 michael@0: vrshrn.u16 d30, q2, #8 michael@0: PF subne PF_CTL, PF_CTL, #1 michael@0: vrshrn.u16 d31, q3, #8 michael@0: PF cmp PF_X, ORIG_W michael@0: vmull.u8 q0, d24, d16 michael@0: PF pld, [PF_MASK, PF_X, lsl #mask_bpp_shift] michael@0: vmull.u8 q1, d25, d16 michael@0: PF subge PF_X, PF_X, ORIG_W michael@0: vmull.u8 q2, d26, d16 michael@0: PF subges PF_CTL, PF_CTL, #0x10 michael@0: vmull.u8 q3, d27, d16 michael@0: PF ldrgeb DUMMY, [PF_MASK, MASK_STRIDE, lsl #mask_bpp_shift]! michael@0: vst1.8 {d28, d29, d30, d31}, [DST_W, :128]! michael@0: vrsra.u16 q0, q0, #8 michael@0: vrsra.u16 q1, q1, #8 michael@0: vrsra.u16 q2, q2, #8 michael@0: vrsra.u16 q3, q3, #8 michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_n_8_8_init michael@0: add DUMMY, sp, #ARGS_STACK_OFFSET michael@0: vld1.32 {d16[0]}, [DUMMY] michael@0: vdup.8 d16, d16[3] michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_n_8_8_cleanup michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_src_n_8_8_asm_neon, 0, 8, 8, \ michael@0: FLAG_DST_WRITEONLY, \ michael@0: 32, /* number of pixels, processed in a single block */ \ michael@0: 5, /* prefetch distance */ \ michael@0: pixman_composite_src_n_8_8_init, \ michael@0: pixman_composite_src_n_8_8_cleanup, \ michael@0: pixman_composite_src_n_8_8_process_pixblock_head, \ michael@0: pixman_composite_src_n_8_8_process_pixblock_tail, \ michael@0: pixman_composite_src_n_8_8_process_pixblock_tail_head michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_over_n_8_8888_process_pixblock_head michael@0: /* expecting deinterleaved source data in {d8, d9, d10, d11} */ michael@0: /* d8 - blue, d9 - green, d10 - red, d11 - alpha */ michael@0: /* and destination data in {d4, d5, d6, d7} */ michael@0: /* mask is in d24 (d25, d26, d27 are unused) */ michael@0: michael@0: /* in */ michael@0: vmull.u8 q6, d24, d8 michael@0: vmull.u8 q7, d24, d9 michael@0: vmull.u8 q8, d24, d10 michael@0: vmull.u8 q9, d24, d11 michael@0: vrshr.u16 q10, q6, #8 michael@0: vrshr.u16 q11, q7, #8 michael@0: vrshr.u16 q12, q8, #8 michael@0: vrshr.u16 q13, q9, #8 michael@0: vraddhn.u16 d0, q6, q10 michael@0: vraddhn.u16 d1, q7, q11 michael@0: vraddhn.u16 d2, q8, q12 michael@0: vraddhn.u16 d3, q9, q13 michael@0: vmvn.8 d25, d3 /* get inverted alpha */ michael@0: /* source: d0 - blue, d1 - green, d2 - red, d3 - alpha */ michael@0: /* destination: d4 - blue, d5 - green, d6 - red, d7 - alpha */ michael@0: /* now do alpha blending */ michael@0: vmull.u8 q8, d25, d4 michael@0: vmull.u8 q9, d25, d5 michael@0: vmull.u8 q10, d25, d6 michael@0: vmull.u8 q11, d25, d7 michael@0: .endm michael@0: michael@0: .macro pixman_composite_over_n_8_8888_process_pixblock_tail michael@0: vrshr.u16 q14, q8, #8 michael@0: vrshr.u16 q15, q9, #8 michael@0: vrshr.u16 q6, q10, #8 michael@0: vrshr.u16 q7, q11, #8 michael@0: vraddhn.u16 d28, q14, q8 michael@0: vraddhn.u16 d29, q15, q9 michael@0: vraddhn.u16 d30, q6, q10 michael@0: vraddhn.u16 d31, q7, q11 michael@0: vqadd.u8 q14, q0, q14 michael@0: vqadd.u8 q15, q1, q15 michael@0: .endm michael@0: michael@0: .macro pixman_composite_over_n_8_8888_process_pixblock_tail_head michael@0: vrshr.u16 q14, q8, #8 michael@0: vld4.8 {d4, d5, d6, d7}, [DST_R, :128]! michael@0: vrshr.u16 q15, q9, #8 michael@0: fetch_mask_pixblock michael@0: vrshr.u16 q6, q10, #8 michael@0: PF add PF_X, PF_X, #8 michael@0: vrshr.u16 q7, q11, #8 michael@0: PF tst PF_CTL, #0x0F michael@0: vraddhn.u16 d28, q14, q8 michael@0: PF addne PF_X, PF_X, #8 michael@0: vraddhn.u16 d29, q15, q9 michael@0: PF subne PF_CTL, PF_CTL, #1 michael@0: vraddhn.u16 d30, q6, q10 michael@0: PF cmp PF_X, ORIG_W michael@0: vraddhn.u16 d31, q7, q11 michael@0: PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift] michael@0: vmull.u8 q6, d24, d8 michael@0: PF pld, [PF_MASK, PF_X, lsl #mask_bpp_shift] michael@0: vmull.u8 q7, d24, d9 michael@0: PF subge PF_X, PF_X, ORIG_W michael@0: vmull.u8 q8, d24, d10 michael@0: PF subges PF_CTL, PF_CTL, #0x10 michael@0: vmull.u8 q9, d24, d11 michael@0: PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]! michael@0: vqadd.u8 q14, q0, q14 michael@0: PF ldrgeb DUMMY, [PF_MASK, MASK_STRIDE, lsl #mask_bpp_shift]! michael@0: vqadd.u8 q15, q1, q15 michael@0: vrshr.u16 q10, q6, #8 michael@0: vrshr.u16 q11, q7, #8 michael@0: vrshr.u16 q12, q8, #8 michael@0: vrshr.u16 q13, q9, #8 michael@0: vraddhn.u16 d0, q6, q10 michael@0: vraddhn.u16 d1, q7, q11 michael@0: vraddhn.u16 d2, q8, q12 michael@0: vraddhn.u16 d3, q9, q13 michael@0: vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! michael@0: vmvn.8 d25, d3 michael@0: vmull.u8 q8, d25, d4 michael@0: vmull.u8 q9, d25, d5 michael@0: vmull.u8 q10, d25, d6 michael@0: vmull.u8 q11, d25, d7 michael@0: .endm michael@0: michael@0: .macro pixman_composite_over_n_8_8888_init michael@0: add DUMMY, sp, #ARGS_STACK_OFFSET michael@0: .vsave {d8-d15} michael@0: vpush {d8-d15} michael@0: vld1.32 {d11[0]}, [DUMMY] michael@0: vdup.8 d8, d11[0] michael@0: vdup.8 d9, d11[1] michael@0: vdup.8 d10, d11[2] michael@0: vdup.8 d11, d11[3] michael@0: .endm michael@0: michael@0: .macro pixman_composite_over_n_8_8888_cleanup michael@0: vpop {d8-d15} michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_over_n_8_8888_asm_neon, 0, 8, 32, \ michael@0: FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: 5, /* prefetch distance */ \ michael@0: pixman_composite_over_n_8_8888_init, \ michael@0: pixman_composite_over_n_8_8888_cleanup, \ michael@0: pixman_composite_over_n_8_8888_process_pixblock_head, \ michael@0: pixman_composite_over_n_8_8888_process_pixblock_tail, \ michael@0: pixman_composite_over_n_8_8888_process_pixblock_tail_head michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_over_n_8_8_process_pixblock_head michael@0: vmull.u8 q0, d24, d8 michael@0: vmull.u8 q1, d25, d8 michael@0: vmull.u8 q6, d26, d8 michael@0: vmull.u8 q7, d27, d8 michael@0: vrshr.u16 q10, q0, #8 michael@0: vrshr.u16 q11, q1, #8 michael@0: vrshr.u16 q12, q6, #8 michael@0: vrshr.u16 q13, q7, #8 michael@0: vraddhn.u16 d0, q0, q10 michael@0: vraddhn.u16 d1, q1, q11 michael@0: vraddhn.u16 d2, q6, q12 michael@0: vraddhn.u16 d3, q7, q13 michael@0: vmvn.8 q12, q0 michael@0: vmvn.8 q13, q1 michael@0: vmull.u8 q8, d24, d4 michael@0: vmull.u8 q9, d25, d5 michael@0: vmull.u8 q10, d26, d6 michael@0: vmull.u8 q11, d27, d7 michael@0: .endm michael@0: michael@0: .macro pixman_composite_over_n_8_8_process_pixblock_tail michael@0: vrshr.u16 q14, q8, #8 michael@0: vrshr.u16 q15, q9, #8 michael@0: vrshr.u16 q12, q10, #8 michael@0: vrshr.u16 q13, q11, #8 michael@0: vraddhn.u16 d28, q14, q8 michael@0: vraddhn.u16 d29, q15, q9 michael@0: vraddhn.u16 d30, q12, q10 michael@0: vraddhn.u16 d31, q13, q11 michael@0: vqadd.u8 q14, q0, q14 michael@0: vqadd.u8 q15, q1, q15 michael@0: .endm michael@0: michael@0: /* TODO: expand macros and do better instructions scheduling */ michael@0: .macro pixman_composite_over_n_8_8_process_pixblock_tail_head michael@0: vld1.8 {d4, d5, d6, d7}, [DST_R, :128]! michael@0: pixman_composite_over_n_8_8_process_pixblock_tail michael@0: fetch_mask_pixblock michael@0: cache_preload 32, 32 michael@0: vst1.8 {d28, d29, d30, d31}, [DST_W, :128]! michael@0: pixman_composite_over_n_8_8_process_pixblock_head michael@0: .endm michael@0: michael@0: .macro pixman_composite_over_n_8_8_init michael@0: add DUMMY, sp, #ARGS_STACK_OFFSET michael@0: .vsave {d8-d15} michael@0: vpush {d8-d15} michael@0: vld1.32 {d8[0]}, [DUMMY] michael@0: vdup.8 d8, d8[3] michael@0: .endm michael@0: michael@0: .macro pixman_composite_over_n_8_8_cleanup michael@0: vpop {d8-d15} michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_over_n_8_8_asm_neon, 0, 8, 8, \ michael@0: FLAG_DST_READWRITE, \ michael@0: 32, /* number of pixels, processed in a single block */ \ michael@0: 5, /* prefetch distance */ \ michael@0: pixman_composite_over_n_8_8_init, \ michael@0: pixman_composite_over_n_8_8_cleanup, \ michael@0: pixman_composite_over_n_8_8_process_pixblock_head, \ michael@0: pixman_composite_over_n_8_8_process_pixblock_tail, \ michael@0: pixman_composite_over_n_8_8_process_pixblock_tail_head michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_over_n_8888_8888_ca_process_pixblock_head michael@0: /* michael@0: * 'combine_mask_ca' replacement michael@0: * michael@0: * input: solid src (n) in {d8, d9, d10, d11} michael@0: * dest in {d4, d5, d6, d7 } michael@0: * mask in {d24, d25, d26, d27} michael@0: * output: updated src in {d0, d1, d2, d3 } michael@0: * updated mask in {d24, d25, d26, d3 } michael@0: */ michael@0: vmull.u8 q0, d24, d8 michael@0: vmull.u8 q1, d25, d9 michael@0: vmull.u8 q6, d26, d10 michael@0: vmull.u8 q7, d27, d11 michael@0: vmull.u8 q9, d11, d25 michael@0: vmull.u8 q12, d11, d24 michael@0: vmull.u8 q13, d11, d26 michael@0: vrshr.u16 q8, q0, #8 michael@0: vrshr.u16 q10, q1, #8 michael@0: vrshr.u16 q11, q6, #8 michael@0: vraddhn.u16 d0, q0, q8 michael@0: vraddhn.u16 d1, q1, q10 michael@0: vraddhn.u16 d2, q6, q11 michael@0: vrshr.u16 q11, q12, #8 michael@0: vrshr.u16 q8, q9, #8 michael@0: vrshr.u16 q6, q13, #8 michael@0: vrshr.u16 q10, q7, #8 michael@0: vraddhn.u16 d24, q12, q11 michael@0: vraddhn.u16 d25, q9, q8 michael@0: vraddhn.u16 d26, q13, q6 michael@0: vraddhn.u16 d3, q7, q10 michael@0: /* michael@0: * 'combine_over_ca' replacement michael@0: * michael@0: * output: updated dest in {d28, d29, d30, d31} michael@0: */ michael@0: vmvn.8 q12, q12 michael@0: vmvn.8 d26, d26 michael@0: vmull.u8 q8, d24, d4 michael@0: vmull.u8 q9, d25, d5 michael@0: vmvn.8 d27, d3 michael@0: vmull.u8 q10, d26, d6 michael@0: vmull.u8 q11, d27, d7 michael@0: .endm michael@0: michael@0: .macro pixman_composite_over_n_8888_8888_ca_process_pixblock_tail michael@0: /* ... continue 'combine_over_ca' replacement */ michael@0: vrshr.u16 q14, q8, #8 michael@0: vrshr.u16 q15, q9, #8 michael@0: vrshr.u16 q6, q10, #8 michael@0: vrshr.u16 q7, q11, #8 michael@0: vraddhn.u16 d28, q14, q8 michael@0: vraddhn.u16 d29, q15, q9 michael@0: vraddhn.u16 d30, q6, q10 michael@0: vraddhn.u16 d31, q7, q11 michael@0: vqadd.u8 q14, q0, q14 michael@0: vqadd.u8 q15, q1, q15 michael@0: .endm michael@0: michael@0: .macro pixman_composite_over_n_8888_8888_ca_process_pixblock_tail_head michael@0: vrshr.u16 q14, q8, #8 michael@0: vrshr.u16 q15, q9, #8 michael@0: vld4.8 {d4, d5, d6, d7}, [DST_R, :128]! michael@0: vrshr.u16 q6, q10, #8 michael@0: vrshr.u16 q7, q11, #8 michael@0: vraddhn.u16 d28, q14, q8 michael@0: vraddhn.u16 d29, q15, q9 michael@0: vraddhn.u16 d30, q6, q10 michael@0: vraddhn.u16 d31, q7, q11 michael@0: fetch_mask_pixblock michael@0: vqadd.u8 q14, q0, q14 michael@0: vqadd.u8 q15, q1, q15 michael@0: cache_preload 8, 8 michael@0: pixman_composite_over_n_8888_8888_ca_process_pixblock_head michael@0: vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! michael@0: .endm michael@0: michael@0: .macro pixman_composite_over_n_8888_8888_ca_init michael@0: add DUMMY, sp, #ARGS_STACK_OFFSET michael@0: .vsave {d8-d15} michael@0: vpush {d8-d15} michael@0: vld1.32 {d11[0]}, [DUMMY] michael@0: vdup.8 d8, d11[0] michael@0: vdup.8 d9, d11[1] michael@0: vdup.8 d10, d11[2] michael@0: vdup.8 d11, d11[3] michael@0: .endm michael@0: michael@0: .macro pixman_composite_over_n_8888_8888_ca_cleanup michael@0: vpop {d8-d15} michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_over_n_8888_8888_ca_asm_neon, 0, 32, 32, \ michael@0: FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: 5, /* prefetch distance */ \ michael@0: pixman_composite_over_n_8888_8888_ca_init, \ michael@0: pixman_composite_over_n_8888_8888_ca_cleanup, \ michael@0: pixman_composite_over_n_8888_8888_ca_process_pixblock_head, \ michael@0: pixman_composite_over_n_8888_8888_ca_process_pixblock_tail, \ michael@0: pixman_composite_over_n_8888_8888_ca_process_pixblock_tail_head michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_over_n_8888_0565_ca_process_pixblock_head michael@0: /* michael@0: * 'combine_mask_ca' replacement michael@0: * michael@0: * input: solid src (n) in {d8, d9, d10, d11} [B, G, R, A] michael@0: * mask in {d24, d25, d26} [B, G, R] michael@0: * output: updated src in {d0, d1, d2 } [B, G, R] michael@0: * updated mask in {d24, d25, d26} [B, G, R] michael@0: */ michael@0: vmull.u8 q0, d24, d8 michael@0: vmull.u8 q1, d25, d9 michael@0: vmull.u8 q6, d26, d10 michael@0: vmull.u8 q9, d11, d25 michael@0: vmull.u8 q12, d11, d24 michael@0: vmull.u8 q13, d11, d26 michael@0: vrshr.u16 q8, q0, #8 michael@0: vrshr.u16 q10, q1, #8 michael@0: vrshr.u16 q11, q6, #8 michael@0: vraddhn.u16 d0, q0, q8 michael@0: vraddhn.u16 d1, q1, q10 michael@0: vraddhn.u16 d2, q6, q11 michael@0: vrshr.u16 q11, q12, #8 michael@0: vrshr.u16 q8, q9, #8 michael@0: vrshr.u16 q6, q13, #8 michael@0: vraddhn.u16 d24, q12, q11 michael@0: vraddhn.u16 d25, q9, q8 michael@0: /* michael@0: * convert 8 r5g6b5 pixel data from {d4, d5} to planar 8-bit format michael@0: * and put data into d16 - blue, d17 - green, d18 - red michael@0: */ michael@0: vshrn.u16 d17, q2, #3 michael@0: vshrn.u16 d18, q2, #8 michael@0: vraddhn.u16 d26, q13, q6 michael@0: vsli.u16 q2, q2, #5 michael@0: vsri.u8 d18, d18, #5 michael@0: vsri.u8 d17, d17, #6 michael@0: /* michael@0: * 'combine_over_ca' replacement michael@0: * michael@0: * output: updated dest in d16 - blue, d17 - green, d18 - red michael@0: */ michael@0: vmvn.8 q12, q12 michael@0: vshrn.u16 d16, q2, #2 michael@0: vmvn.8 d26, d26 michael@0: vmull.u8 q6, d16, d24 michael@0: vmull.u8 q7, d17, d25 michael@0: vmull.u8 q11, d18, d26 michael@0: .endm michael@0: michael@0: .macro pixman_composite_over_n_8888_0565_ca_process_pixblock_tail michael@0: /* ... continue 'combine_over_ca' replacement */ michael@0: vrshr.u16 q10, q6, #8 michael@0: vrshr.u16 q14, q7, #8 michael@0: vrshr.u16 q15, q11, #8 michael@0: vraddhn.u16 d16, q10, q6 michael@0: vraddhn.u16 d17, q14, q7 michael@0: vraddhn.u16 d18, q15, q11 michael@0: vqadd.u8 q8, q0, q8 michael@0: vqadd.u8 d18, d2, d18 michael@0: /* michael@0: * convert the results in d16, d17, d18 to r5g6b5 and store michael@0: * them into {d28, d29} michael@0: */ michael@0: vshll.u8 q14, d18, #8 michael@0: vshll.u8 q10, d17, #8 michael@0: vshll.u8 q15, d16, #8 michael@0: vsri.u16 q14, q10, #5 michael@0: vsri.u16 q14, q15, #11 michael@0: .endm michael@0: michael@0: .macro pixman_composite_over_n_8888_0565_ca_process_pixblock_tail_head michael@0: fetch_mask_pixblock michael@0: vrshr.u16 q10, q6, #8 michael@0: vrshr.u16 q14, q7, #8 michael@0: vld1.16 {d4, d5}, [DST_R, :128]! michael@0: vrshr.u16 q15, q11, #8 michael@0: vraddhn.u16 d16, q10, q6 michael@0: vraddhn.u16 d17, q14, q7 michael@0: vraddhn.u16 d22, q15, q11 michael@0: /* process_pixblock_head */ michael@0: /* michael@0: * 'combine_mask_ca' replacement michael@0: * michael@0: * input: solid src (n) in {d8, d9, d10, d11} [B, G, R, A] michael@0: * mask in {d24, d25, d26} [B, G, R] michael@0: * output: updated src in {d0, d1, d2 } [B, G, R] michael@0: * updated mask in {d24, d25, d26} [B, G, R] michael@0: */ michael@0: vmull.u8 q6, d26, d10 michael@0: vqadd.u8 q8, q0, q8 michael@0: vmull.u8 q0, d24, d8 michael@0: vqadd.u8 d22, d2, d22 michael@0: vmull.u8 q1, d25, d9 michael@0: /* michael@0: * convert the result in d16, d17, d22 to r5g6b5 and store michael@0: * it into {d28, d29} michael@0: */ michael@0: vshll.u8 q14, d22, #8 michael@0: vshll.u8 q10, d17, #8 michael@0: vshll.u8 q15, d16, #8 michael@0: vmull.u8 q9, d11, d25 michael@0: vsri.u16 q14, q10, #5 michael@0: vmull.u8 q12, d11, d24 michael@0: vmull.u8 q13, d11, d26 michael@0: vsri.u16 q14, q15, #11 michael@0: cache_preload 8, 8 michael@0: vrshr.u16 q8, q0, #8 michael@0: vrshr.u16 q10, q1, #8 michael@0: vrshr.u16 q11, q6, #8 michael@0: vraddhn.u16 d0, q0, q8 michael@0: vraddhn.u16 d1, q1, q10 michael@0: vraddhn.u16 d2, q6, q11 michael@0: vrshr.u16 q11, q12, #8 michael@0: vrshr.u16 q8, q9, #8 michael@0: vrshr.u16 q6, q13, #8 michael@0: vraddhn.u16 d24, q12, q11 michael@0: vraddhn.u16 d25, q9, q8 michael@0: /* michael@0: * convert 8 r5g6b5 pixel data from {d4, d5} to planar michael@0: * 8-bit format and put data into d16 - blue, d17 - green, michael@0: * d18 - red michael@0: */ michael@0: vshrn.u16 d17, q2, #3 michael@0: vshrn.u16 d18, q2, #8 michael@0: vraddhn.u16 d26, q13, q6 michael@0: vsli.u16 q2, q2, #5 michael@0: vsri.u8 d17, d17, #6 michael@0: vsri.u8 d18, d18, #5 michael@0: /* michael@0: * 'combine_over_ca' replacement michael@0: * michael@0: * output: updated dest in d16 - blue, d17 - green, d18 - red michael@0: */ michael@0: vmvn.8 q12, q12 michael@0: vshrn.u16 d16, q2, #2 michael@0: vmvn.8 d26, d26 michael@0: vmull.u8 q7, d17, d25 michael@0: vmull.u8 q6, d16, d24 michael@0: vmull.u8 q11, d18, d26 michael@0: vst1.16 {d28, d29}, [DST_W, :128]! michael@0: .endm michael@0: michael@0: .macro pixman_composite_over_n_8888_0565_ca_init michael@0: add DUMMY, sp, #ARGS_STACK_OFFSET michael@0: .vsave {d8-d15} michael@0: vpush {d8-d15} michael@0: vld1.32 {d11[0]}, [DUMMY] michael@0: vdup.8 d8, d11[0] michael@0: vdup.8 d9, d11[1] michael@0: vdup.8 d10, d11[2] michael@0: vdup.8 d11, d11[3] michael@0: .endm michael@0: michael@0: .macro pixman_composite_over_n_8888_0565_ca_cleanup michael@0: vpop {d8-d15} michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_over_n_8888_0565_ca_asm_neon, 0, 32, 16, \ michael@0: FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: 5, /* prefetch distance */ \ michael@0: pixman_composite_over_n_8888_0565_ca_init, \ michael@0: pixman_composite_over_n_8888_0565_ca_cleanup, \ michael@0: pixman_composite_over_n_8888_0565_ca_process_pixblock_head, \ michael@0: pixman_composite_over_n_8888_0565_ca_process_pixblock_tail, \ michael@0: pixman_composite_over_n_8888_0565_ca_process_pixblock_tail_head michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_in_n_8_process_pixblock_head michael@0: /* expecting source data in {d0, d1, d2, d3} */ michael@0: /* and destination data in {d4, d5, d6, d7} */ michael@0: vmull.u8 q8, d4, d3 michael@0: vmull.u8 q9, d5, d3 michael@0: vmull.u8 q10, d6, d3 michael@0: vmull.u8 q11, d7, d3 michael@0: .endm michael@0: michael@0: .macro pixman_composite_in_n_8_process_pixblock_tail michael@0: vrshr.u16 q14, q8, #8 michael@0: vrshr.u16 q15, q9, #8 michael@0: vrshr.u16 q12, q10, #8 michael@0: vrshr.u16 q13, q11, #8 michael@0: vraddhn.u16 d28, q8, q14 michael@0: vraddhn.u16 d29, q9, q15 michael@0: vraddhn.u16 d30, q10, q12 michael@0: vraddhn.u16 d31, q11, q13 michael@0: .endm michael@0: michael@0: .macro pixman_composite_in_n_8_process_pixblock_tail_head michael@0: pixman_composite_in_n_8_process_pixblock_tail michael@0: vld1.8 {d4, d5, d6, d7}, [DST_R, :128]! michael@0: cache_preload 32, 32 michael@0: pixman_composite_in_n_8_process_pixblock_head michael@0: vst1.8 {d28, d29, d30, d31}, [DST_W, :128]! michael@0: .endm michael@0: michael@0: .macro pixman_composite_in_n_8_init michael@0: add DUMMY, sp, #ARGS_STACK_OFFSET michael@0: vld1.32 {d3[0]}, [DUMMY] michael@0: vdup.8 d3, d3[3] michael@0: .endm michael@0: michael@0: .macro pixman_composite_in_n_8_cleanup michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_in_n_8_asm_neon, 0, 0, 8, \ michael@0: FLAG_DST_READWRITE, \ michael@0: 32, /* number of pixels, processed in a single block */ \ michael@0: 5, /* prefetch distance */ \ michael@0: pixman_composite_in_n_8_init, \ michael@0: pixman_composite_in_n_8_cleanup, \ michael@0: pixman_composite_in_n_8_process_pixblock_head, \ michael@0: pixman_composite_in_n_8_process_pixblock_tail, \ michael@0: pixman_composite_in_n_8_process_pixblock_tail_head, \ michael@0: 28, /* dst_w_basereg */ \ michael@0: 4, /* dst_r_basereg */ \ michael@0: 0, /* src_basereg */ \ michael@0: 24 /* mask_basereg */ michael@0: michael@0: .macro pixman_composite_add_n_8_8_process_pixblock_head michael@0: /* expecting source data in {d8, d9, d10, d11} */ michael@0: /* d8 - blue, d9 - green, d10 - red, d11 - alpha */ michael@0: /* and destination data in {d4, d5, d6, d7} */ michael@0: /* mask is in d24, d25, d26, d27 */ michael@0: vmull.u8 q0, d24, d11 michael@0: vmull.u8 q1, d25, d11 michael@0: vmull.u8 q6, d26, d11 michael@0: vmull.u8 q7, d27, d11 michael@0: vrshr.u16 q10, q0, #8 michael@0: vrshr.u16 q11, q1, #8 michael@0: vrshr.u16 q12, q6, #8 michael@0: vrshr.u16 q13, q7, #8 michael@0: vraddhn.u16 d0, q0, q10 michael@0: vraddhn.u16 d1, q1, q11 michael@0: vraddhn.u16 d2, q6, q12 michael@0: vraddhn.u16 d3, q7, q13 michael@0: vqadd.u8 q14, q0, q2 michael@0: vqadd.u8 q15, q1, q3 michael@0: .endm michael@0: michael@0: .macro pixman_composite_add_n_8_8_process_pixblock_tail michael@0: .endm michael@0: michael@0: /* TODO: expand macros and do better instructions scheduling */ michael@0: .macro pixman_composite_add_n_8_8_process_pixblock_tail_head michael@0: pixman_composite_add_n_8_8_process_pixblock_tail michael@0: vst1.8 {d28, d29, d30, d31}, [DST_W, :128]! michael@0: vld1.8 {d4, d5, d6, d7}, [DST_R, :128]! michael@0: fetch_mask_pixblock michael@0: cache_preload 32, 32 michael@0: pixman_composite_add_n_8_8_process_pixblock_head michael@0: .endm michael@0: michael@0: .macro pixman_composite_add_n_8_8_init michael@0: add DUMMY, sp, #ARGS_STACK_OFFSET michael@0: .vsave {d8-d15} michael@0: vpush {d8-d15} michael@0: vld1.32 {d11[0]}, [DUMMY] michael@0: vdup.8 d11, d11[3] michael@0: .endm michael@0: michael@0: .macro pixman_composite_add_n_8_8_cleanup michael@0: vpop {d8-d15} michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_add_n_8_8_asm_neon, 0, 8, 8, \ michael@0: FLAG_DST_READWRITE, \ michael@0: 32, /* number of pixels, processed in a single block */ \ michael@0: 5, /* prefetch distance */ \ michael@0: pixman_composite_add_n_8_8_init, \ michael@0: pixman_composite_add_n_8_8_cleanup, \ michael@0: pixman_composite_add_n_8_8_process_pixblock_head, \ michael@0: pixman_composite_add_n_8_8_process_pixblock_tail, \ michael@0: pixman_composite_add_n_8_8_process_pixblock_tail_head michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_add_8_8_8_process_pixblock_head michael@0: /* expecting source data in {d0, d1, d2, d3} */ michael@0: /* destination data in {d4, d5, d6, d7} */ michael@0: /* mask in {d24, d25, d26, d27} */ michael@0: vmull.u8 q8, d24, d0 michael@0: vmull.u8 q9, d25, d1 michael@0: vmull.u8 q10, d26, d2 michael@0: vmull.u8 q11, d27, d3 michael@0: vrshr.u16 q0, q8, #8 michael@0: vrshr.u16 q1, q9, #8 michael@0: vrshr.u16 q12, q10, #8 michael@0: vrshr.u16 q13, q11, #8 michael@0: vraddhn.u16 d0, q0, q8 michael@0: vraddhn.u16 d1, q1, q9 michael@0: vraddhn.u16 d2, q12, q10 michael@0: vraddhn.u16 d3, q13, q11 michael@0: vqadd.u8 q14, q0, q2 michael@0: vqadd.u8 q15, q1, q3 michael@0: .endm michael@0: michael@0: .macro pixman_composite_add_8_8_8_process_pixblock_tail michael@0: .endm michael@0: michael@0: /* TODO: expand macros and do better instructions scheduling */ michael@0: .macro pixman_composite_add_8_8_8_process_pixblock_tail_head michael@0: pixman_composite_add_8_8_8_process_pixblock_tail michael@0: vst1.8 {d28, d29, d30, d31}, [DST_W, :128]! michael@0: vld1.8 {d4, d5, d6, d7}, [DST_R, :128]! michael@0: fetch_mask_pixblock michael@0: fetch_src_pixblock michael@0: cache_preload 32, 32 michael@0: pixman_composite_add_8_8_8_process_pixblock_head michael@0: .endm michael@0: michael@0: .macro pixman_composite_add_8_8_8_init michael@0: .endm michael@0: michael@0: .macro pixman_composite_add_8_8_8_cleanup michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_add_8_8_8_asm_neon, 8, 8, 8, \ michael@0: FLAG_DST_READWRITE, \ michael@0: 32, /* number of pixels, processed in a single block */ \ michael@0: 5, /* prefetch distance */ \ michael@0: pixman_composite_add_8_8_8_init, \ michael@0: pixman_composite_add_8_8_8_cleanup, \ michael@0: pixman_composite_add_8_8_8_process_pixblock_head, \ michael@0: pixman_composite_add_8_8_8_process_pixblock_tail, \ michael@0: pixman_composite_add_8_8_8_process_pixblock_tail_head michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_add_8888_8888_8888_process_pixblock_head michael@0: /* expecting source data in {d0, d1, d2, d3} */ michael@0: /* destination data in {d4, d5, d6, d7} */ michael@0: /* mask in {d24, d25, d26, d27} */ michael@0: vmull.u8 q8, d27, d0 michael@0: vmull.u8 q9, d27, d1 michael@0: vmull.u8 q10, d27, d2 michael@0: vmull.u8 q11, d27, d3 michael@0: /* 1 cycle bubble */ michael@0: vrsra.u16 q8, q8, #8 michael@0: vrsra.u16 q9, q9, #8 michael@0: vrsra.u16 q10, q10, #8 michael@0: vrsra.u16 q11, q11, #8 michael@0: .endm michael@0: michael@0: .macro pixman_composite_add_8888_8888_8888_process_pixblock_tail michael@0: /* 2 cycle bubble */ michael@0: vrshrn.u16 d28, q8, #8 michael@0: vrshrn.u16 d29, q9, #8 michael@0: vrshrn.u16 d30, q10, #8 michael@0: vrshrn.u16 d31, q11, #8 michael@0: vqadd.u8 q14, q2, q14 michael@0: /* 1 cycle bubble */ michael@0: vqadd.u8 q15, q3, q15 michael@0: .endm michael@0: michael@0: .macro pixman_composite_add_8888_8888_8888_process_pixblock_tail_head michael@0: fetch_src_pixblock michael@0: vrshrn.u16 d28, q8, #8 michael@0: fetch_mask_pixblock michael@0: vrshrn.u16 d29, q9, #8 michael@0: vmull.u8 q8, d27, d0 michael@0: vrshrn.u16 d30, q10, #8 michael@0: vmull.u8 q9, d27, d1 michael@0: vrshrn.u16 d31, q11, #8 michael@0: vmull.u8 q10, d27, d2 michael@0: vqadd.u8 q14, q2, q14 michael@0: vmull.u8 q11, d27, d3 michael@0: vqadd.u8 q15, q3, q15 michael@0: vrsra.u16 q8, q8, #8 michael@0: vld4.8 {d4, d5, d6, d7}, [DST_R, :128]! michael@0: vrsra.u16 q9, q9, #8 michael@0: vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! michael@0: vrsra.u16 q10, q10, #8 michael@0: michael@0: cache_preload 8, 8 michael@0: michael@0: vrsra.u16 q11, q11, #8 michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_add_8888_8888_8888_asm_neon, 32, 32, 32, \ michael@0: FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: 10, /* prefetch distance */ \ michael@0: default_init, \ michael@0: default_cleanup, \ michael@0: pixman_composite_add_8888_8888_8888_process_pixblock_head, \ michael@0: pixman_composite_add_8888_8888_8888_process_pixblock_tail, \ michael@0: pixman_composite_add_8888_8888_8888_process_pixblock_tail_head michael@0: michael@0: generate_composite_function_single_scanline \ michael@0: pixman_composite_scanline_add_mask_asm_neon, 32, 32, 32, \ michael@0: FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: default_init, \ michael@0: default_cleanup, \ michael@0: pixman_composite_add_8888_8888_8888_process_pixblock_head, \ michael@0: pixman_composite_add_8888_8888_8888_process_pixblock_tail, \ michael@0: pixman_composite_add_8888_8888_8888_process_pixblock_tail_head michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_add_8888_8_8888_asm_neon, 32, 8, 32, \ michael@0: FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: 5, /* prefetch distance */ \ michael@0: default_init, \ michael@0: default_cleanup, \ michael@0: pixman_composite_add_8888_8888_8888_process_pixblock_head, \ michael@0: pixman_composite_add_8888_8888_8888_process_pixblock_tail, \ michael@0: pixman_composite_add_8888_8888_8888_process_pixblock_tail_head, \ michael@0: 28, /* dst_w_basereg */ \ michael@0: 4, /* dst_r_basereg */ \ michael@0: 0, /* src_basereg */ \ michael@0: 27 /* mask_basereg */ michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_add_n_8_8888_init michael@0: add DUMMY, sp, #ARGS_STACK_OFFSET michael@0: vld1.32 {d3[0]}, [DUMMY] michael@0: vdup.8 d0, d3[0] michael@0: vdup.8 d1, d3[1] michael@0: vdup.8 d2, d3[2] michael@0: vdup.8 d3, d3[3] michael@0: .endm michael@0: michael@0: .macro pixman_composite_add_n_8_8888_cleanup michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_add_n_8_8888_asm_neon, 0, 8, 32, \ michael@0: FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: 5, /* prefetch distance */ \ michael@0: pixman_composite_add_n_8_8888_init, \ michael@0: pixman_composite_add_n_8_8888_cleanup, \ michael@0: pixman_composite_add_8888_8888_8888_process_pixblock_head, \ michael@0: pixman_composite_add_8888_8888_8888_process_pixblock_tail, \ michael@0: pixman_composite_add_8888_8888_8888_process_pixblock_tail_head, \ michael@0: 28, /* dst_w_basereg */ \ michael@0: 4, /* dst_r_basereg */ \ michael@0: 0, /* src_basereg */ \ michael@0: 27 /* mask_basereg */ michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_add_8888_n_8888_init michael@0: add DUMMY, sp, #(ARGS_STACK_OFFSET + 8) michael@0: vld1.32 {d27[0]}, [DUMMY] michael@0: vdup.8 d27, d27[3] michael@0: .endm michael@0: michael@0: .macro pixman_composite_add_8888_n_8888_cleanup michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_add_8888_n_8888_asm_neon, 32, 0, 32, \ michael@0: FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: 5, /* prefetch distance */ \ michael@0: pixman_composite_add_8888_n_8888_init, \ michael@0: pixman_composite_add_8888_n_8888_cleanup, \ michael@0: pixman_composite_add_8888_8888_8888_process_pixblock_head, \ michael@0: pixman_composite_add_8888_8888_8888_process_pixblock_tail, \ michael@0: pixman_composite_add_8888_8888_8888_process_pixblock_tail_head, \ michael@0: 28, /* dst_w_basereg */ \ michael@0: 4, /* dst_r_basereg */ \ michael@0: 0, /* src_basereg */ \ michael@0: 27 /* mask_basereg */ michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_out_reverse_8888_n_8888_process_pixblock_head michael@0: /* expecting source data in {d0, d1, d2, d3} */ michael@0: /* destination data in {d4, d5, d6, d7} */ michael@0: /* solid mask is in d15 */ michael@0: michael@0: /* 'in' */ michael@0: vmull.u8 q8, d15, d3 michael@0: vmull.u8 q6, d15, d2 michael@0: vmull.u8 q5, d15, d1 michael@0: vmull.u8 q4, d15, d0 michael@0: vrshr.u16 q13, q8, #8 michael@0: vrshr.u16 q12, q6, #8 michael@0: vrshr.u16 q11, q5, #8 michael@0: vrshr.u16 q10, q4, #8 michael@0: vraddhn.u16 d3, q8, q13 michael@0: vraddhn.u16 d2, q6, q12 michael@0: vraddhn.u16 d1, q5, q11 michael@0: vraddhn.u16 d0, q4, q10 michael@0: vmvn.8 d24, d3 /* get inverted alpha */ michael@0: /* now do alpha blending */ michael@0: vmull.u8 q8, d24, d4 michael@0: vmull.u8 q9, d24, d5 michael@0: vmull.u8 q10, d24, d6 michael@0: vmull.u8 q11, d24, d7 michael@0: .endm michael@0: michael@0: .macro pixman_composite_out_reverse_8888_n_8888_process_pixblock_tail michael@0: vrshr.u16 q14, q8, #8 michael@0: vrshr.u16 q15, q9, #8 michael@0: vrshr.u16 q12, q10, #8 michael@0: vrshr.u16 q13, q11, #8 michael@0: vraddhn.u16 d28, q14, q8 michael@0: vraddhn.u16 d29, q15, q9 michael@0: vraddhn.u16 d30, q12, q10 michael@0: vraddhn.u16 d31, q13, q11 michael@0: .endm michael@0: michael@0: /* TODO: expand macros and do better instructions scheduling */ michael@0: .macro pixman_composite_out_reverse_8888_8888_8888_process_pixblock_tail_head michael@0: vld4.8 {d4, d5, d6, d7}, [DST_R, :128]! michael@0: pixman_composite_out_reverse_8888_n_8888_process_pixblock_tail michael@0: fetch_src_pixblock michael@0: cache_preload 8, 8 michael@0: fetch_mask_pixblock michael@0: pixman_composite_out_reverse_8888_n_8888_process_pixblock_head michael@0: vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! michael@0: .endm michael@0: michael@0: generate_composite_function_single_scanline \ michael@0: pixman_composite_scanline_out_reverse_mask_asm_neon, 32, 32, 32, \ michael@0: FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: default_init_need_all_regs, \ michael@0: default_cleanup_need_all_regs, \ michael@0: pixman_composite_out_reverse_8888_n_8888_process_pixblock_head, \ michael@0: pixman_composite_out_reverse_8888_n_8888_process_pixblock_tail, \ michael@0: pixman_composite_out_reverse_8888_8888_8888_process_pixblock_tail_head \ michael@0: 28, /* dst_w_basereg */ \ michael@0: 4, /* dst_r_basereg */ \ michael@0: 0, /* src_basereg */ \ michael@0: 12 /* mask_basereg */ michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_over_8888_n_8888_process_pixblock_head michael@0: pixman_composite_out_reverse_8888_n_8888_process_pixblock_head michael@0: .endm michael@0: michael@0: .macro pixman_composite_over_8888_n_8888_process_pixblock_tail michael@0: pixman_composite_out_reverse_8888_n_8888_process_pixblock_tail michael@0: vqadd.u8 q14, q0, q14 michael@0: vqadd.u8 q15, q1, q15 michael@0: .endm michael@0: michael@0: /* TODO: expand macros and do better instructions scheduling */ michael@0: .macro pixman_composite_over_8888_n_8888_process_pixblock_tail_head michael@0: vld4.8 {d4, d5, d6, d7}, [DST_R, :128]! michael@0: pixman_composite_over_8888_n_8888_process_pixblock_tail michael@0: fetch_src_pixblock michael@0: cache_preload 8, 8 michael@0: pixman_composite_over_8888_n_8888_process_pixblock_head michael@0: vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! michael@0: .endm michael@0: michael@0: .macro pixman_composite_over_8888_n_8888_init michael@0: add DUMMY, sp, #48 michael@0: .vsave {d8-d15} michael@0: vpush {d8-d15} michael@0: vld1.32 {d15[0]}, [DUMMY] michael@0: vdup.8 d15, d15[3] michael@0: .endm michael@0: michael@0: .macro pixman_composite_over_8888_n_8888_cleanup michael@0: vpop {d8-d15} michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_over_8888_n_8888_asm_neon, 32, 0, 32, \ michael@0: FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: 5, /* prefetch distance */ \ michael@0: pixman_composite_over_8888_n_8888_init, \ michael@0: pixman_composite_over_8888_n_8888_cleanup, \ michael@0: pixman_composite_over_8888_n_8888_process_pixblock_head, \ michael@0: pixman_composite_over_8888_n_8888_process_pixblock_tail, \ michael@0: pixman_composite_over_8888_n_8888_process_pixblock_tail_head michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: /* TODO: expand macros and do better instructions scheduling */ michael@0: .macro pixman_composite_over_8888_8888_8888_process_pixblock_tail_head michael@0: vld4.8 {d4, d5, d6, d7}, [DST_R, :128]! michael@0: pixman_composite_over_8888_n_8888_process_pixblock_tail michael@0: fetch_src_pixblock michael@0: cache_preload 8, 8 michael@0: fetch_mask_pixblock michael@0: pixman_composite_over_8888_n_8888_process_pixblock_head michael@0: vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_over_8888_8888_8888_asm_neon, 32, 32, 32, \ michael@0: FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: 5, /* prefetch distance */ \ michael@0: default_init_need_all_regs, \ michael@0: default_cleanup_need_all_regs, \ michael@0: pixman_composite_over_8888_n_8888_process_pixblock_head, \ michael@0: pixman_composite_over_8888_n_8888_process_pixblock_tail, \ michael@0: pixman_composite_over_8888_8888_8888_process_pixblock_tail_head \ michael@0: 28, /* dst_w_basereg */ \ michael@0: 4, /* dst_r_basereg */ \ michael@0: 0, /* src_basereg */ \ michael@0: 12 /* mask_basereg */ michael@0: michael@0: generate_composite_function_single_scanline \ michael@0: pixman_composite_scanline_over_mask_asm_neon, 32, 32, 32, \ michael@0: FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: default_init_need_all_regs, \ michael@0: default_cleanup_need_all_regs, \ michael@0: pixman_composite_over_8888_n_8888_process_pixblock_head, \ michael@0: pixman_composite_over_8888_n_8888_process_pixblock_tail, \ michael@0: pixman_composite_over_8888_8888_8888_process_pixblock_tail_head \ michael@0: 28, /* dst_w_basereg */ \ michael@0: 4, /* dst_r_basereg */ \ michael@0: 0, /* src_basereg */ \ michael@0: 12 /* mask_basereg */ michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: /* TODO: expand macros and do better instructions scheduling */ michael@0: .macro pixman_composite_over_8888_8_8888_process_pixblock_tail_head michael@0: vld4.8 {d4, d5, d6, d7}, [DST_R, :128]! michael@0: pixman_composite_over_8888_n_8888_process_pixblock_tail michael@0: fetch_src_pixblock michael@0: cache_preload 8, 8 michael@0: fetch_mask_pixblock michael@0: pixman_composite_over_8888_n_8888_process_pixblock_head michael@0: vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_over_8888_8_8888_asm_neon, 32, 8, 32, \ michael@0: FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: 5, /* prefetch distance */ \ michael@0: default_init_need_all_regs, \ michael@0: default_cleanup_need_all_regs, \ michael@0: pixman_composite_over_8888_n_8888_process_pixblock_head, \ michael@0: pixman_composite_over_8888_n_8888_process_pixblock_tail, \ michael@0: pixman_composite_over_8888_8_8888_process_pixblock_tail_head \ michael@0: 28, /* dst_w_basereg */ \ michael@0: 4, /* dst_r_basereg */ \ michael@0: 0, /* src_basereg */ \ michael@0: 15 /* mask_basereg */ michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_src_0888_0888_process_pixblock_head michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_0888_0888_process_pixblock_tail michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_0888_0888_process_pixblock_tail_head michael@0: vst3.8 {d0, d1, d2}, [DST_W]! michael@0: fetch_src_pixblock michael@0: cache_preload 8, 8 michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_src_0888_0888_asm_neon, 24, 0, 24, \ michael@0: FLAG_DST_WRITEONLY, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: 10, /* prefetch distance */ \ michael@0: default_init, \ michael@0: default_cleanup, \ michael@0: pixman_composite_src_0888_0888_process_pixblock_head, \ michael@0: pixman_composite_src_0888_0888_process_pixblock_tail, \ michael@0: pixman_composite_src_0888_0888_process_pixblock_tail_head, \ michael@0: 0, /* dst_w_basereg */ \ michael@0: 0, /* dst_r_basereg */ \ michael@0: 0, /* src_basereg */ \ michael@0: 0 /* mask_basereg */ michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_src_0888_8888_rev_process_pixblock_head michael@0: vswp d0, d2 michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_0888_8888_rev_process_pixblock_tail michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_0888_8888_rev_process_pixblock_tail_head michael@0: vst4.8 {d0, d1, d2, d3}, [DST_W]! michael@0: fetch_src_pixblock michael@0: vswp d0, d2 michael@0: cache_preload 8, 8 michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_0888_8888_rev_init michael@0: veor d3, d3, d3 michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_src_0888_8888_rev_asm_neon, 24, 0, 32, \ michael@0: FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: 10, /* prefetch distance */ \ michael@0: pixman_composite_src_0888_8888_rev_init, \ michael@0: default_cleanup, \ michael@0: pixman_composite_src_0888_8888_rev_process_pixblock_head, \ michael@0: pixman_composite_src_0888_8888_rev_process_pixblock_tail, \ michael@0: pixman_composite_src_0888_8888_rev_process_pixblock_tail_head, \ michael@0: 0, /* dst_w_basereg */ \ michael@0: 0, /* dst_r_basereg */ \ michael@0: 0, /* src_basereg */ \ michael@0: 0 /* mask_basereg */ michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_src_0888_0565_rev_process_pixblock_head michael@0: vshll.u8 q8, d1, #8 michael@0: vshll.u8 q9, d2, #8 michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_0888_0565_rev_process_pixblock_tail michael@0: vshll.u8 q14, d0, #8 michael@0: vsri.u16 q14, q8, #5 michael@0: vsri.u16 q14, q9, #11 michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_0888_0565_rev_process_pixblock_tail_head michael@0: vshll.u8 q14, d0, #8 michael@0: fetch_src_pixblock michael@0: vsri.u16 q14, q8, #5 michael@0: vsri.u16 q14, q9, #11 michael@0: vshll.u8 q8, d1, #8 michael@0: vst1.16 {d28, d29}, [DST_W, :128]! michael@0: vshll.u8 q9, d2, #8 michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_src_0888_0565_rev_asm_neon, 24, 0, 16, \ michael@0: FLAG_DST_WRITEONLY, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: 10, /* prefetch distance */ \ michael@0: default_init, \ michael@0: default_cleanup, \ michael@0: pixman_composite_src_0888_0565_rev_process_pixblock_head, \ michael@0: pixman_composite_src_0888_0565_rev_process_pixblock_tail, \ michael@0: pixman_composite_src_0888_0565_rev_process_pixblock_tail_head, \ michael@0: 28, /* dst_w_basereg */ \ michael@0: 0, /* dst_r_basereg */ \ michael@0: 0, /* src_basereg */ \ michael@0: 0 /* mask_basereg */ michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_src_pixbuf_8888_process_pixblock_head michael@0: vmull.u8 q8, d3, d0 michael@0: vmull.u8 q9, d3, d1 michael@0: vmull.u8 q10, d3, d2 michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_pixbuf_8888_process_pixblock_tail michael@0: vrshr.u16 q11, q8, #8 michael@0: vswp d3, d31 michael@0: vrshr.u16 q12, q9, #8 michael@0: vrshr.u16 q13, q10, #8 michael@0: vraddhn.u16 d30, q11, q8 michael@0: vraddhn.u16 d29, q12, q9 michael@0: vraddhn.u16 d28, q13, q10 michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_pixbuf_8888_process_pixblock_tail_head michael@0: vrshr.u16 q11, q8, #8 michael@0: vswp d3, d31 michael@0: vrshr.u16 q12, q9, #8 michael@0: vrshr.u16 q13, q10, #8 michael@0: fetch_src_pixblock michael@0: vraddhn.u16 d30, q11, q8 michael@0: PF add PF_X, PF_X, #8 michael@0: PF tst PF_CTL, #0xF michael@0: PF addne PF_X, PF_X, #8 michael@0: PF subne PF_CTL, PF_CTL, #1 michael@0: vraddhn.u16 d29, q12, q9 michael@0: vraddhn.u16 d28, q13, q10 michael@0: vmull.u8 q8, d3, d0 michael@0: vmull.u8 q9, d3, d1 michael@0: vmull.u8 q10, d3, d2 michael@0: vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! michael@0: PF cmp PF_X, ORIG_W michael@0: PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift] michael@0: PF subge PF_X, PF_X, ORIG_W michael@0: PF subges PF_CTL, PF_CTL, #0x10 michael@0: PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]! michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_src_pixbuf_8888_asm_neon, 32, 0, 32, \ michael@0: FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: 10, /* prefetch distance */ \ michael@0: default_init, \ michael@0: default_cleanup, \ michael@0: pixman_composite_src_pixbuf_8888_process_pixblock_head, \ michael@0: pixman_composite_src_pixbuf_8888_process_pixblock_tail, \ michael@0: pixman_composite_src_pixbuf_8888_process_pixblock_tail_head, \ michael@0: 28, /* dst_w_basereg */ \ michael@0: 0, /* dst_r_basereg */ \ michael@0: 0, /* src_basereg */ \ michael@0: 0 /* mask_basereg */ michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_src_rpixbuf_8888_process_pixblock_head michael@0: vmull.u8 q8, d3, d0 michael@0: vmull.u8 q9, d3, d1 michael@0: vmull.u8 q10, d3, d2 michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_rpixbuf_8888_process_pixblock_tail michael@0: vrshr.u16 q11, q8, #8 michael@0: vswp d3, d31 michael@0: vrshr.u16 q12, q9, #8 michael@0: vrshr.u16 q13, q10, #8 michael@0: vraddhn.u16 d28, q11, q8 michael@0: vraddhn.u16 d29, q12, q9 michael@0: vraddhn.u16 d30, q13, q10 michael@0: .endm michael@0: michael@0: .macro pixman_composite_src_rpixbuf_8888_process_pixblock_tail_head michael@0: vrshr.u16 q11, q8, #8 michael@0: vswp d3, d31 michael@0: vrshr.u16 q12, q9, #8 michael@0: vrshr.u16 q13, q10, #8 michael@0: fetch_src_pixblock michael@0: vraddhn.u16 d28, q11, q8 michael@0: PF add PF_X, PF_X, #8 michael@0: PF tst PF_CTL, #0xF michael@0: PF addne PF_X, PF_X, #8 michael@0: PF subne PF_CTL, PF_CTL, #1 michael@0: vraddhn.u16 d29, q12, q9 michael@0: vraddhn.u16 d30, q13, q10 michael@0: vmull.u8 q8, d3, d0 michael@0: vmull.u8 q9, d3, d1 michael@0: vmull.u8 q10, d3, d2 michael@0: vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! michael@0: PF cmp PF_X, ORIG_W michael@0: PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift] michael@0: PF subge PF_X, PF_X, ORIG_W michael@0: PF subges PF_CTL, PF_CTL, #0x10 michael@0: PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]! michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_src_rpixbuf_8888_asm_neon, 32, 0, 32, \ michael@0: FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: 10, /* prefetch distance */ \ michael@0: default_init, \ michael@0: default_cleanup, \ michael@0: pixman_composite_src_rpixbuf_8888_process_pixblock_head, \ michael@0: pixman_composite_src_rpixbuf_8888_process_pixblock_tail, \ michael@0: pixman_composite_src_rpixbuf_8888_process_pixblock_tail_head, \ michael@0: 28, /* dst_w_basereg */ \ michael@0: 0, /* dst_r_basereg */ \ michael@0: 0, /* src_basereg */ \ michael@0: 0 /* mask_basereg */ michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_over_0565_8_0565_process_pixblock_head michael@0: /* mask is in d15 */ michael@0: convert_0565_to_x888 q4, d2, d1, d0 michael@0: convert_0565_to_x888 q5, d6, d5, d4 michael@0: /* source pixel data is in {d0, d1, d2, XX} */ michael@0: /* destination pixel data is in {d4, d5, d6, XX} */ michael@0: vmvn.8 d7, d15 michael@0: vmull.u8 q6, d15, d2 michael@0: vmull.u8 q5, d15, d1 michael@0: vmull.u8 q4, d15, d0 michael@0: vmull.u8 q8, d7, d4 michael@0: vmull.u8 q9, d7, d5 michael@0: vmull.u8 q13, d7, d6 michael@0: vrshr.u16 q12, q6, #8 michael@0: vrshr.u16 q11, q5, #8 michael@0: vrshr.u16 q10, q4, #8 michael@0: vraddhn.u16 d2, q6, q12 michael@0: vraddhn.u16 d1, q5, q11 michael@0: vraddhn.u16 d0, q4, q10 michael@0: .endm michael@0: michael@0: .macro pixman_composite_over_0565_8_0565_process_pixblock_tail michael@0: vrshr.u16 q14, q8, #8 michael@0: vrshr.u16 q15, q9, #8 michael@0: vrshr.u16 q12, q13, #8 michael@0: vraddhn.u16 d28, q14, q8 michael@0: vraddhn.u16 d29, q15, q9 michael@0: vraddhn.u16 d30, q12, q13 michael@0: vqadd.u8 q0, q0, q14 michael@0: vqadd.u8 q1, q1, q15 michael@0: /* 32bpp result is in {d0, d1, d2, XX} */ michael@0: convert_8888_to_0565 d2, d1, d0, q14, q15, q3 michael@0: .endm michael@0: michael@0: /* TODO: expand macros and do better instructions scheduling */ michael@0: .macro pixman_composite_over_0565_8_0565_process_pixblock_tail_head michael@0: fetch_mask_pixblock michael@0: pixman_composite_over_0565_8_0565_process_pixblock_tail michael@0: fetch_src_pixblock michael@0: vld1.16 {d10, d11}, [DST_R, :128]! michael@0: cache_preload 8, 8 michael@0: pixman_composite_over_0565_8_0565_process_pixblock_head michael@0: vst1.16 {d28, d29}, [DST_W, :128]! michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_over_0565_8_0565_asm_neon, 16, 8, 16, \ michael@0: FLAG_DST_READWRITE, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: 5, /* prefetch distance */ \ michael@0: default_init_need_all_regs, \ michael@0: default_cleanup_need_all_regs, \ michael@0: pixman_composite_over_0565_8_0565_process_pixblock_head, \ michael@0: pixman_composite_over_0565_8_0565_process_pixblock_tail, \ michael@0: pixman_composite_over_0565_8_0565_process_pixblock_tail_head, \ michael@0: 28, /* dst_w_basereg */ \ michael@0: 10, /* dst_r_basereg */ \ michael@0: 8, /* src_basereg */ \ michael@0: 15 /* mask_basereg */ michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_over_0565_n_0565_init michael@0: add DUMMY, sp, #(ARGS_STACK_OFFSET + 8) michael@0: .vsave {d8-d15} michael@0: vpush {d8-d15} michael@0: vld1.32 {d15[0]}, [DUMMY] michael@0: vdup.8 d15, d15[3] michael@0: .endm michael@0: michael@0: .macro pixman_composite_over_0565_n_0565_cleanup michael@0: vpop {d8-d15} michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_over_0565_n_0565_asm_neon, 16, 0, 16, \ michael@0: FLAG_DST_READWRITE, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: 5, /* prefetch distance */ \ michael@0: pixman_composite_over_0565_n_0565_init, \ michael@0: pixman_composite_over_0565_n_0565_cleanup, \ michael@0: pixman_composite_over_0565_8_0565_process_pixblock_head, \ michael@0: pixman_composite_over_0565_8_0565_process_pixblock_tail, \ michael@0: pixman_composite_over_0565_8_0565_process_pixblock_tail_head, \ michael@0: 28, /* dst_w_basereg */ \ michael@0: 10, /* dst_r_basereg */ \ michael@0: 8, /* src_basereg */ \ michael@0: 15 /* mask_basereg */ michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_add_0565_8_0565_process_pixblock_head michael@0: /* mask is in d15 */ michael@0: convert_0565_to_x888 q4, d2, d1, d0 michael@0: convert_0565_to_x888 q5, d6, d5, d4 michael@0: /* source pixel data is in {d0, d1, d2, XX} */ michael@0: /* destination pixel data is in {d4, d5, d6, XX} */ michael@0: vmull.u8 q6, d15, d2 michael@0: vmull.u8 q5, d15, d1 michael@0: vmull.u8 q4, d15, d0 michael@0: vrshr.u16 q12, q6, #8 michael@0: vrshr.u16 q11, q5, #8 michael@0: vrshr.u16 q10, q4, #8 michael@0: vraddhn.u16 d2, q6, q12 michael@0: vraddhn.u16 d1, q5, q11 michael@0: vraddhn.u16 d0, q4, q10 michael@0: .endm michael@0: michael@0: .macro pixman_composite_add_0565_8_0565_process_pixblock_tail michael@0: vqadd.u8 q0, q0, q2 michael@0: vqadd.u8 q1, q1, q3 michael@0: /* 32bpp result is in {d0, d1, d2, XX} */ michael@0: convert_8888_to_0565 d2, d1, d0, q14, q15, q3 michael@0: .endm michael@0: michael@0: /* TODO: expand macros and do better instructions scheduling */ michael@0: .macro pixman_composite_add_0565_8_0565_process_pixblock_tail_head michael@0: fetch_mask_pixblock michael@0: pixman_composite_add_0565_8_0565_process_pixblock_tail michael@0: fetch_src_pixblock michael@0: vld1.16 {d10, d11}, [DST_R, :128]! michael@0: cache_preload 8, 8 michael@0: pixman_composite_add_0565_8_0565_process_pixblock_head michael@0: vst1.16 {d28, d29}, [DST_W, :128]! michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_add_0565_8_0565_asm_neon, 16, 8, 16, \ michael@0: FLAG_DST_READWRITE, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: 5, /* prefetch distance */ \ michael@0: default_init_need_all_regs, \ michael@0: default_cleanup_need_all_regs, \ michael@0: pixman_composite_add_0565_8_0565_process_pixblock_head, \ michael@0: pixman_composite_add_0565_8_0565_process_pixblock_tail, \ michael@0: pixman_composite_add_0565_8_0565_process_pixblock_tail_head, \ michael@0: 28, /* dst_w_basereg */ \ michael@0: 10, /* dst_r_basereg */ \ michael@0: 8, /* src_basereg */ \ michael@0: 15 /* mask_basereg */ michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_out_reverse_8_0565_process_pixblock_head michael@0: /* mask is in d15 */ michael@0: convert_0565_to_x888 q5, d6, d5, d4 michael@0: /* destination pixel data is in {d4, d5, d6, xx} */ michael@0: vmvn.8 d24, d15 /* get inverted alpha */ michael@0: /* now do alpha blending */ michael@0: vmull.u8 q8, d24, d4 michael@0: vmull.u8 q9, d24, d5 michael@0: vmull.u8 q10, d24, d6 michael@0: .endm michael@0: michael@0: .macro pixman_composite_out_reverse_8_0565_process_pixblock_tail michael@0: vrshr.u16 q14, q8, #8 michael@0: vrshr.u16 q15, q9, #8 michael@0: vrshr.u16 q12, q10, #8 michael@0: vraddhn.u16 d0, q14, q8 michael@0: vraddhn.u16 d1, q15, q9 michael@0: vraddhn.u16 d2, q12, q10 michael@0: /* 32bpp result is in {d0, d1, d2, XX} */ michael@0: convert_8888_to_0565 d2, d1, d0, q14, q15, q3 michael@0: .endm michael@0: michael@0: /* TODO: expand macros and do better instructions scheduling */ michael@0: .macro pixman_composite_out_reverse_8_0565_process_pixblock_tail_head michael@0: fetch_src_pixblock michael@0: pixman_composite_out_reverse_8_0565_process_pixblock_tail michael@0: vld1.16 {d10, d11}, [DST_R, :128]! michael@0: cache_preload 8, 8 michael@0: pixman_composite_out_reverse_8_0565_process_pixblock_head michael@0: vst1.16 {d28, d29}, [DST_W, :128]! michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_out_reverse_8_0565_asm_neon, 8, 0, 16, \ michael@0: FLAG_DST_READWRITE, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: 5, /* prefetch distance */ \ michael@0: default_init_need_all_regs, \ michael@0: default_cleanup_need_all_regs, \ michael@0: pixman_composite_out_reverse_8_0565_process_pixblock_head, \ michael@0: pixman_composite_out_reverse_8_0565_process_pixblock_tail, \ michael@0: pixman_composite_out_reverse_8_0565_process_pixblock_tail_head, \ michael@0: 28, /* dst_w_basereg */ \ michael@0: 10, /* dst_r_basereg */ \ michael@0: 15, /* src_basereg */ \ michael@0: 0 /* mask_basereg */ michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: .macro pixman_composite_out_reverse_8_8888_process_pixblock_head michael@0: /* src is in d0 */ michael@0: /* destination pixel data is in {d4, d5, d6, d7} */ michael@0: vmvn.8 d1, d0 /* get inverted alpha */ michael@0: /* now do alpha blending */ michael@0: vmull.u8 q8, d1, d4 michael@0: vmull.u8 q9, d1, d5 michael@0: vmull.u8 q10, d1, d6 michael@0: vmull.u8 q11, d1, d7 michael@0: .endm michael@0: michael@0: .macro pixman_composite_out_reverse_8_8888_process_pixblock_tail michael@0: vrshr.u16 q14, q8, #8 michael@0: vrshr.u16 q15, q9, #8 michael@0: vrshr.u16 q12, q10, #8 michael@0: vrshr.u16 q13, q11, #8 michael@0: vraddhn.u16 d28, q14, q8 michael@0: vraddhn.u16 d29, q15, q9 michael@0: vraddhn.u16 d30, q12, q10 michael@0: vraddhn.u16 d31, q13, q11 michael@0: /* 32bpp result is in {d28, d29, d30, d31} */ michael@0: .endm michael@0: michael@0: /* TODO: expand macros and do better instructions scheduling */ michael@0: .macro pixman_composite_out_reverse_8_8888_process_pixblock_tail_head michael@0: fetch_src_pixblock michael@0: pixman_composite_out_reverse_8_8888_process_pixblock_tail michael@0: vld4.8 {d4, d5, d6, d7}, [DST_R, :128]! michael@0: cache_preload 8, 8 michael@0: pixman_composite_out_reverse_8_8888_process_pixblock_head michael@0: vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! michael@0: .endm michael@0: michael@0: generate_composite_function \ michael@0: pixman_composite_out_reverse_8_8888_asm_neon, 8, 0, 32, \ michael@0: FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: 5, /* prefetch distance */ \ michael@0: default_init, \ michael@0: default_cleanup, \ michael@0: pixman_composite_out_reverse_8_8888_process_pixblock_head, \ michael@0: pixman_composite_out_reverse_8_8888_process_pixblock_tail, \ michael@0: pixman_composite_out_reverse_8_8888_process_pixblock_tail_head, \ michael@0: 28, /* dst_w_basereg */ \ michael@0: 4, /* dst_r_basereg */ \ michael@0: 0, /* src_basereg */ \ michael@0: 0 /* mask_basereg */ michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: generate_composite_function_nearest_scanline \ michael@0: pixman_scaled_nearest_scanline_8888_8888_OVER_asm_neon, 32, 0, 32, \ michael@0: FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: default_init, \ michael@0: default_cleanup, \ michael@0: pixman_composite_over_8888_8888_process_pixblock_head, \ michael@0: pixman_composite_over_8888_8888_process_pixblock_tail, \ michael@0: pixman_composite_over_8888_8888_process_pixblock_tail_head michael@0: michael@0: generate_composite_function_nearest_scanline \ michael@0: pixman_scaled_nearest_scanline_8888_0565_OVER_asm_neon, 32, 0, 16, \ michael@0: FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: default_init, \ michael@0: default_cleanup, \ michael@0: pixman_composite_over_8888_0565_process_pixblock_head, \ michael@0: pixman_composite_over_8888_0565_process_pixblock_tail, \ michael@0: pixman_composite_over_8888_0565_process_pixblock_tail_head, \ michael@0: 28, /* dst_w_basereg */ \ michael@0: 4, /* dst_r_basereg */ \ michael@0: 0, /* src_basereg */ \ michael@0: 24 /* mask_basereg */ michael@0: michael@0: generate_composite_function_nearest_scanline \ michael@0: pixman_scaled_nearest_scanline_8888_0565_SRC_asm_neon, 32, 0, 16, \ michael@0: FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: default_init, \ michael@0: default_cleanup, \ michael@0: pixman_composite_src_8888_0565_process_pixblock_head, \ michael@0: pixman_composite_src_8888_0565_process_pixblock_tail, \ michael@0: pixman_composite_src_8888_0565_process_pixblock_tail_head michael@0: michael@0: generate_composite_function_nearest_scanline \ michael@0: pixman_scaled_nearest_scanline_0565_8888_SRC_asm_neon, 16, 0, 32, \ michael@0: FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: default_init, \ michael@0: default_cleanup, \ michael@0: pixman_composite_src_0565_8888_process_pixblock_head, \ michael@0: pixman_composite_src_0565_8888_process_pixblock_tail, \ michael@0: pixman_composite_src_0565_8888_process_pixblock_tail_head michael@0: michael@0: generate_composite_function_nearest_scanline \ michael@0: pixman_scaled_nearest_scanline_8888_8_0565_OVER_asm_neon, 32, 8, 16, \ michael@0: FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: default_init_need_all_regs, \ michael@0: default_cleanup_need_all_regs, \ michael@0: pixman_composite_over_8888_8_0565_process_pixblock_head, \ michael@0: pixman_composite_over_8888_8_0565_process_pixblock_tail, \ michael@0: pixman_composite_over_8888_8_0565_process_pixblock_tail_head, \ michael@0: 28, /* dst_w_basereg */ \ michael@0: 4, /* dst_r_basereg */ \ michael@0: 8, /* src_basereg */ \ michael@0: 24 /* mask_basereg */ michael@0: michael@0: generate_composite_function_nearest_scanline \ michael@0: pixman_scaled_nearest_scanline_0565_8_0565_OVER_asm_neon, 16, 8, 16, \ michael@0: FLAG_DST_READWRITE, \ michael@0: 8, /* number of pixels, processed in a single block */ \ michael@0: default_init_need_all_regs, \ michael@0: default_cleanup_need_all_regs, \ michael@0: pixman_composite_over_0565_8_0565_process_pixblock_head, \ michael@0: pixman_composite_over_0565_8_0565_process_pixblock_tail, \ michael@0: pixman_composite_over_0565_8_0565_process_pixblock_tail_head, \ michael@0: 28, /* dst_w_basereg */ \ michael@0: 10, /* dst_r_basereg */ \ michael@0: 8, /* src_basereg */ \ michael@0: 15 /* mask_basereg */ michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: /* Supplementary macro for setting function attributes */ michael@0: .macro pixman_asm_function fname michael@0: .func fname michael@0: .global fname michael@0: #ifdef __ELF__ michael@0: .hidden fname michael@0: .type fname, %function michael@0: #endif michael@0: fname: michael@0: .endm michael@0: michael@0: /* michael@0: * Bilinear scaling support code which tries to provide pixel fetching, color michael@0: * format conversion, and interpolation as separate macros which can be used michael@0: * as the basic building blocks for constructing bilinear scanline functions. michael@0: */ michael@0: michael@0: .macro bilinear_load_8888 reg1, reg2, tmp michael@0: mov TMP1, X, asr #16 michael@0: add X, X, UX michael@0: add TMP1, TOP, TMP1, asl #2 michael@0: vld1.32 {reg1}, [TMP1], STRIDE michael@0: vld1.32 {reg2}, [TMP1] michael@0: .endm michael@0: michael@0: .macro bilinear_load_0565 reg1, reg2, tmp michael@0: mov TMP1, X, asr #16 michael@0: add X, X, UX michael@0: add TMP1, TOP, TMP1, asl #1 michael@0: vld1.32 {reg2[0]}, [TMP1], STRIDE michael@0: vld1.32 {reg2[1]}, [TMP1] michael@0: convert_four_0565_to_x888_packed reg2, reg1, reg2, tmp michael@0: .endm michael@0: michael@0: .macro bilinear_load_and_vertical_interpolate_two_8888 \ michael@0: acc1, acc2, reg1, reg2, reg3, reg4, tmp1, tmp2 michael@0: michael@0: bilinear_load_8888 reg1, reg2, tmp1 michael@0: vmull.u8 acc1, reg1, d28 michael@0: vmlal.u8 acc1, reg2, d29 michael@0: bilinear_load_8888 reg3, reg4, tmp2 michael@0: vmull.u8 acc2, reg3, d28 michael@0: vmlal.u8 acc2, reg4, d29 michael@0: .endm michael@0: michael@0: .macro bilinear_load_and_vertical_interpolate_four_8888 \ michael@0: xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi \ michael@0: yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi michael@0: michael@0: bilinear_load_and_vertical_interpolate_two_8888 \ michael@0: xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi michael@0: bilinear_load_and_vertical_interpolate_two_8888 \ michael@0: yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi michael@0: .endm michael@0: michael@0: .macro bilinear_load_and_vertical_interpolate_two_0565 \ michael@0: acc1, acc2, reg1, reg2, reg3, reg4, acc2lo, acc2hi michael@0: michael@0: mov TMP1, X, asr #16 michael@0: add X, X, UX michael@0: add TMP1, TOP, TMP1, asl #1 michael@0: mov TMP2, X, asr #16 michael@0: add X, X, UX michael@0: add TMP2, TOP, TMP2, asl #1 michael@0: vld1.32 {acc2lo[0]}, [TMP1], STRIDE michael@0: vld1.32 {acc2hi[0]}, [TMP2], STRIDE michael@0: vld1.32 {acc2lo[1]}, [TMP1] michael@0: vld1.32 {acc2hi[1]}, [TMP2] michael@0: convert_0565_to_x888 acc2, reg3, reg2, reg1 michael@0: vzip.u8 reg1, reg3 michael@0: vzip.u8 reg2, reg4 michael@0: vzip.u8 reg3, reg4 michael@0: vzip.u8 reg1, reg2 michael@0: vmull.u8 acc1, reg1, d28 michael@0: vmlal.u8 acc1, reg2, d29 michael@0: vmull.u8 acc2, reg3, d28 michael@0: vmlal.u8 acc2, reg4, d29 michael@0: .endm michael@0: michael@0: .macro bilinear_load_and_vertical_interpolate_four_0565 \ michael@0: xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi \ michael@0: yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi michael@0: michael@0: mov TMP1, X, asr #16 michael@0: add X, X, UX michael@0: add TMP1, TOP, TMP1, asl #1 michael@0: mov TMP2, X, asr #16 michael@0: add X, X, UX michael@0: add TMP2, TOP, TMP2, asl #1 michael@0: vld1.32 {xacc2lo[0]}, [TMP1], STRIDE michael@0: vld1.32 {xacc2hi[0]}, [TMP2], STRIDE michael@0: vld1.32 {xacc2lo[1]}, [TMP1] michael@0: vld1.32 {xacc2hi[1]}, [TMP2] michael@0: convert_0565_to_x888 xacc2, xreg3, xreg2, xreg1 michael@0: mov TMP1, X, asr #16 michael@0: add X, X, UX michael@0: add TMP1, TOP, TMP1, asl #1 michael@0: mov TMP2, X, asr #16 michael@0: add X, X, UX michael@0: add TMP2, TOP, TMP2, asl #1 michael@0: vld1.32 {yacc2lo[0]}, [TMP1], STRIDE michael@0: vzip.u8 xreg1, xreg3 michael@0: vld1.32 {yacc2hi[0]}, [TMP2], STRIDE michael@0: vzip.u8 xreg2, xreg4 michael@0: vld1.32 {yacc2lo[1]}, [TMP1] michael@0: vzip.u8 xreg3, xreg4 michael@0: vld1.32 {yacc2hi[1]}, [TMP2] michael@0: vzip.u8 xreg1, xreg2 michael@0: convert_0565_to_x888 yacc2, yreg3, yreg2, yreg1 michael@0: vmull.u8 xacc1, xreg1, d28 michael@0: vzip.u8 yreg1, yreg3 michael@0: vmlal.u8 xacc1, xreg2, d29 michael@0: vzip.u8 yreg2, yreg4 michael@0: vmull.u8 xacc2, xreg3, d28 michael@0: vzip.u8 yreg3, yreg4 michael@0: vmlal.u8 xacc2, xreg4, d29 michael@0: vzip.u8 yreg1, yreg2 michael@0: vmull.u8 yacc1, yreg1, d28 michael@0: vmlal.u8 yacc1, yreg2, d29 michael@0: vmull.u8 yacc2, yreg3, d28 michael@0: vmlal.u8 yacc2, yreg4, d29 michael@0: .endm michael@0: michael@0: .macro bilinear_store_8888 numpix, tmp1, tmp2 michael@0: .if numpix == 4 michael@0: vst1.32 {d0, d1}, [OUT, :128]! michael@0: .elseif numpix == 2 michael@0: vst1.32 {d0}, [OUT, :64]! michael@0: .elseif numpix == 1 michael@0: vst1.32 {d0[0]}, [OUT, :32]! michael@0: .else michael@0: .error bilinear_store_8888 numpix is unsupported michael@0: .endif michael@0: .endm michael@0: michael@0: .macro bilinear_store_0565 numpix, tmp1, tmp2 michael@0: vuzp.u8 d0, d1 michael@0: vuzp.u8 d2, d3 michael@0: vuzp.u8 d1, d3 michael@0: vuzp.u8 d0, d2 michael@0: convert_8888_to_0565 d2, d1, d0, q1, tmp1, tmp2 michael@0: .if numpix == 4 michael@0: vst1.16 {d2}, [OUT, :64]! michael@0: .elseif numpix == 2 michael@0: vst1.32 {d2[0]}, [OUT, :32]! michael@0: .elseif numpix == 1 michael@0: vst1.16 {d2[0]}, [OUT, :16]! michael@0: .else michael@0: .error bilinear_store_0565 numpix is unsupported michael@0: .endif michael@0: .endm michael@0: michael@0: .macro bilinear_interpolate_last_pixel src_fmt, dst_fmt michael@0: bilinear_load_&src_fmt d0, d1, d2 michael@0: vmull.u8 q1, d0, d28 michael@0: vmlal.u8 q1, d1, d29 michael@0: /* 5 cycles bubble */ michael@0: vshll.u16 q0, d2, #BILINEAR_INTERPOLATION_BITS michael@0: vmlsl.u16 q0, d2, d30 michael@0: vmlal.u16 q0, d3, d30 michael@0: /* 5 cycles bubble */ michael@0: vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) michael@0: /* 3 cycles bubble */ michael@0: vmovn.u16 d0, q0 michael@0: /* 1 cycle bubble */ michael@0: bilinear_store_&dst_fmt 1, q2, q3 michael@0: .endm michael@0: michael@0: .macro bilinear_interpolate_two_pixels src_fmt, dst_fmt michael@0: bilinear_load_and_vertical_interpolate_two_&src_fmt \ michael@0: q1, q11, d0, d1, d20, d21, d22, d23 michael@0: vshll.u16 q0, d2, #BILINEAR_INTERPOLATION_BITS michael@0: vmlsl.u16 q0, d2, d30 michael@0: vmlal.u16 q0, d3, d30 michael@0: vshll.u16 q10, d22, #BILINEAR_INTERPOLATION_BITS michael@0: vmlsl.u16 q10, d22, d31 michael@0: vmlal.u16 q10, d23, d31 michael@0: vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) michael@0: vshrn.u32 d1, q10, #(2 * BILINEAR_INTERPOLATION_BITS) michael@0: vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) michael@0: vadd.u16 q12, q12, q13 michael@0: vmovn.u16 d0, q0 michael@0: bilinear_store_&dst_fmt 2, q2, q3 michael@0: .endm michael@0: michael@0: .macro bilinear_interpolate_four_pixels src_fmt, dst_fmt michael@0: bilinear_load_and_vertical_interpolate_four_&src_fmt \ michael@0: q1, q11, d0, d1, d20, d21, d22, d23 \ michael@0: q3, q9, d4, d5, d16, d17, d18, d19 michael@0: pld [TMP1, PF_OFFS] michael@0: sub TMP1, TMP1, STRIDE michael@0: vshll.u16 q0, d2, #BILINEAR_INTERPOLATION_BITS michael@0: vmlsl.u16 q0, d2, d30 michael@0: vmlal.u16 q0, d3, d30 michael@0: vshll.u16 q10, d22, #BILINEAR_INTERPOLATION_BITS michael@0: vmlsl.u16 q10, d22, d31 michael@0: vmlal.u16 q10, d23, d31 michael@0: vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) michael@0: vshll.u16 q2, d6, #BILINEAR_INTERPOLATION_BITS michael@0: vmlsl.u16 q2, d6, d30 michael@0: vmlal.u16 q2, d7, d30 michael@0: vshll.u16 q8, d18, #BILINEAR_INTERPOLATION_BITS michael@0: pld [TMP2, PF_OFFS] michael@0: vmlsl.u16 q8, d18, d31 michael@0: vmlal.u16 q8, d19, d31 michael@0: vadd.u16 q12, q12, q13 michael@0: vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) michael@0: vshrn.u32 d1, q10, #(2 * BILINEAR_INTERPOLATION_BITS) michael@0: vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS) michael@0: vshrn.u32 d5, q8, #(2 * BILINEAR_INTERPOLATION_BITS) michael@0: vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) michael@0: vmovn.u16 d0, q0 michael@0: vmovn.u16 d1, q2 michael@0: vadd.u16 q12, q12, q13 michael@0: bilinear_store_&dst_fmt 4, q2, q3 michael@0: .endm michael@0: michael@0: .macro bilinear_interpolate_four_pixels_head src_fmt, dst_fmt michael@0: .ifdef have_bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt michael@0: bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt&_head michael@0: .else michael@0: bilinear_interpolate_four_pixels src_fmt, dst_fmt michael@0: .endif michael@0: .endm michael@0: michael@0: .macro bilinear_interpolate_four_pixels_tail src_fmt, dst_fmt michael@0: .ifdef have_bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt michael@0: bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt&_tail michael@0: .endif michael@0: .endm michael@0: michael@0: .macro bilinear_interpolate_four_pixels_tail_head src_fmt, dst_fmt michael@0: .ifdef have_bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt michael@0: bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt&_tail_head michael@0: .else michael@0: bilinear_interpolate_four_pixels src_fmt, dst_fmt michael@0: .endif michael@0: .endm michael@0: michael@0: .macro bilinear_interpolate_eight_pixels_head src_fmt, dst_fmt michael@0: .ifdef have_bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt michael@0: bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt&_head michael@0: .else michael@0: bilinear_interpolate_four_pixels_head src_fmt, dst_fmt michael@0: bilinear_interpolate_four_pixels_tail_head src_fmt, dst_fmt michael@0: .endif michael@0: .endm michael@0: michael@0: .macro bilinear_interpolate_eight_pixels_tail src_fmt, dst_fmt michael@0: .ifdef have_bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt michael@0: bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt&_tail michael@0: .else michael@0: bilinear_interpolate_four_pixels_tail src_fmt, dst_fmt michael@0: .endif michael@0: .endm michael@0: michael@0: .macro bilinear_interpolate_eight_pixels_tail_head src_fmt, dst_fmt michael@0: .ifdef have_bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt michael@0: bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt&_tail_head michael@0: .else michael@0: bilinear_interpolate_four_pixels_tail_head src_fmt, dst_fmt michael@0: bilinear_interpolate_four_pixels_tail_head src_fmt, dst_fmt michael@0: .endif michael@0: .endm michael@0: michael@0: .set BILINEAR_FLAG_UNROLL_4, 0 michael@0: .set BILINEAR_FLAG_UNROLL_8, 1 michael@0: .set BILINEAR_FLAG_USE_ALL_NEON_REGS, 2 michael@0: michael@0: /* michael@0: * Main template macro for generating NEON optimized bilinear scanline michael@0: * functions. michael@0: * michael@0: * Bilinear scanline scaler macro template uses the following arguments: michael@0: * fname - name of the function to generate michael@0: * src_fmt - source color format (8888 or 0565) michael@0: * dst_fmt - destination color format (8888 or 0565) michael@0: * bpp_shift - (1 << bpp_shift) is the size of source pixel in bytes michael@0: * prefetch_distance - prefetch in the source image by that many michael@0: * pixels ahead michael@0: */ michael@0: michael@0: .macro generate_bilinear_scanline_func fname, src_fmt, dst_fmt, \ michael@0: src_bpp_shift, dst_bpp_shift, \ michael@0: prefetch_distance, flags michael@0: michael@0: pixman_asm_function fname michael@0: OUT .req r0 michael@0: TOP .req r1 michael@0: BOTTOM .req r2 michael@0: WT .req r3 michael@0: WB .req r4 michael@0: X .req r5 michael@0: UX .req r6 michael@0: WIDTH .req ip michael@0: TMP1 .req r3 michael@0: TMP2 .req r4 michael@0: PF_OFFS .req r7 michael@0: TMP3 .req r8 michael@0: TMP4 .req r9 michael@0: STRIDE .req r2 michael@0: michael@0: .fnstart michael@0: mov ip, sp michael@0: .save {r4, r5, r6, r7, r8, r9} michael@0: push {r4, r5, r6, r7, r8, r9} michael@0: mov PF_OFFS, #prefetch_distance michael@0: ldmia ip, {WB, X, UX, WIDTH} michael@0: mul PF_OFFS, PF_OFFS, UX michael@0: michael@0: .if ((flags) & BILINEAR_FLAG_USE_ALL_NEON_REGS) != 0 michael@0: .vsave {d8-d15} michael@0: vpush {d8-d15} michael@0: .endif michael@0: michael@0: sub STRIDE, BOTTOM, TOP michael@0: .unreq BOTTOM michael@0: michael@0: cmp WIDTH, #0 michael@0: ble 3f michael@0: michael@0: vdup.u16 q12, X michael@0: vdup.u16 q13, UX michael@0: vdup.u8 d28, WT michael@0: vdup.u8 d29, WB michael@0: vadd.u16 d25, d25, d26 michael@0: michael@0: /* ensure good destination alignment */ michael@0: cmp WIDTH, #1 michael@0: blt 0f michael@0: tst OUT, #(1 << dst_bpp_shift) michael@0: beq 0f michael@0: vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) michael@0: vadd.u16 q12, q12, q13 michael@0: bilinear_interpolate_last_pixel src_fmt, dst_fmt michael@0: sub WIDTH, WIDTH, #1 michael@0: 0: michael@0: vadd.u16 q13, q13, q13 michael@0: vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) michael@0: vadd.u16 q12, q12, q13 michael@0: michael@0: cmp WIDTH, #2 michael@0: blt 0f michael@0: tst OUT, #(1 << (dst_bpp_shift + 1)) michael@0: beq 0f michael@0: bilinear_interpolate_two_pixels src_fmt, dst_fmt michael@0: sub WIDTH, WIDTH, #2 michael@0: 0: michael@0: .if ((flags) & BILINEAR_FLAG_UNROLL_8) != 0 michael@0: /*********** 8 pixels per iteration *****************/ michael@0: cmp WIDTH, #4 michael@0: blt 0f michael@0: tst OUT, #(1 << (dst_bpp_shift + 2)) michael@0: beq 0f michael@0: bilinear_interpolate_four_pixels src_fmt, dst_fmt michael@0: sub WIDTH, WIDTH, #4 michael@0: 0: michael@0: subs WIDTH, WIDTH, #8 michael@0: blt 1f michael@0: mov PF_OFFS, PF_OFFS, asr #(16 - src_bpp_shift) michael@0: bilinear_interpolate_eight_pixels_head src_fmt, dst_fmt michael@0: subs WIDTH, WIDTH, #8 michael@0: blt 5f michael@0: 0: michael@0: bilinear_interpolate_eight_pixels_tail_head src_fmt, dst_fmt michael@0: subs WIDTH, WIDTH, #8 michael@0: bge 0b michael@0: 5: michael@0: bilinear_interpolate_eight_pixels_tail src_fmt, dst_fmt michael@0: 1: michael@0: tst WIDTH, #4 michael@0: beq 2f michael@0: bilinear_interpolate_four_pixels src_fmt, dst_fmt michael@0: 2: michael@0: .else michael@0: /*********** 4 pixels per iteration *****************/ michael@0: subs WIDTH, WIDTH, #4 michael@0: blt 1f michael@0: mov PF_OFFS, PF_OFFS, asr #(16 - src_bpp_shift) michael@0: bilinear_interpolate_four_pixels_head src_fmt, dst_fmt michael@0: subs WIDTH, WIDTH, #4 michael@0: blt 5f michael@0: 0: michael@0: bilinear_interpolate_four_pixels_tail_head src_fmt, dst_fmt michael@0: subs WIDTH, WIDTH, #4 michael@0: bge 0b michael@0: 5: michael@0: bilinear_interpolate_four_pixels_tail src_fmt, dst_fmt michael@0: 1: michael@0: /****************************************************/ michael@0: .endif michael@0: /* handle the remaining trailing pixels */ michael@0: tst WIDTH, #2 michael@0: beq 2f michael@0: bilinear_interpolate_two_pixels src_fmt, dst_fmt michael@0: 2: michael@0: tst WIDTH, #1 michael@0: beq 3f michael@0: bilinear_interpolate_last_pixel src_fmt, dst_fmt michael@0: 3: michael@0: .if ((flags) & BILINEAR_FLAG_USE_ALL_NEON_REGS) != 0 michael@0: vpop {d8-d15} michael@0: .endif michael@0: pop {r4, r5, r6, r7, r8, r9} michael@0: bx lr michael@0: .fnend michael@0: michael@0: .unreq OUT michael@0: .unreq TOP michael@0: .unreq WT michael@0: .unreq WB michael@0: .unreq X michael@0: .unreq UX michael@0: .unreq WIDTH michael@0: .unreq TMP1 michael@0: .unreq TMP2 michael@0: .unreq PF_OFFS michael@0: .unreq TMP3 michael@0: .unreq TMP4 michael@0: .unreq STRIDE michael@0: .endfunc michael@0: michael@0: .endm michael@0: michael@0: /*****************************************************************************/ michael@0: michael@0: .set have_bilinear_interpolate_four_pixels_8888_8888, 1 michael@0: michael@0: .macro bilinear_interpolate_four_pixels_8888_8888_head michael@0: mov TMP1, X, asr #16 michael@0: add X, X, UX michael@0: add TMP1, TOP, TMP1, asl #2 michael@0: mov TMP2, X, asr #16 michael@0: add X, X, UX michael@0: add TMP2, TOP, TMP2, asl #2 michael@0: michael@0: vld1.32 {d22}, [TMP1], STRIDE michael@0: vld1.32 {d23}, [TMP1] michael@0: mov TMP3, X, asr #16 michael@0: add X, X, UX michael@0: add TMP3, TOP, TMP3, asl #2 michael@0: vmull.u8 q8, d22, d28 michael@0: vmlal.u8 q8, d23, d29 michael@0: michael@0: vld1.32 {d22}, [TMP2], STRIDE michael@0: vld1.32 {d23}, [TMP2] michael@0: mov TMP4, X, asr #16 michael@0: add X, X, UX michael@0: add TMP4, TOP, TMP4, asl #2 michael@0: vmull.u8 q9, d22, d28 michael@0: vmlal.u8 q9, d23, d29 michael@0: michael@0: vld1.32 {d22}, [TMP3], STRIDE michael@0: vld1.32 {d23}, [TMP3] michael@0: vmull.u8 q10, d22, d28 michael@0: vmlal.u8 q10, d23, d29 michael@0: michael@0: vshll.u16 q0, d16, #BILINEAR_INTERPOLATION_BITS michael@0: vmlsl.u16 q0, d16, d30 michael@0: vmlal.u16 q0, d17, d30 michael@0: michael@0: pld [TMP4, PF_OFFS] michael@0: vld1.32 {d16}, [TMP4], STRIDE michael@0: vld1.32 {d17}, [TMP4] michael@0: pld [TMP4, PF_OFFS] michael@0: vmull.u8 q11, d16, d28 michael@0: vmlal.u8 q11, d17, d29 michael@0: michael@0: vshll.u16 q1, d18, #BILINEAR_INTERPOLATION_BITS michael@0: vmlsl.u16 q1, d18, d31 michael@0: .endm michael@0: michael@0: .macro bilinear_interpolate_four_pixels_8888_8888_tail michael@0: vmlal.u16 q1, d19, d31 michael@0: vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) michael@0: vshll.u16 q2, d20, #BILINEAR_INTERPOLATION_BITS michael@0: vmlsl.u16 q2, d20, d30 michael@0: vmlal.u16 q2, d21, d30 michael@0: vshll.u16 q3, d22, #BILINEAR_INTERPOLATION_BITS michael@0: vmlsl.u16 q3, d22, d31 michael@0: vmlal.u16 q3, d23, d31 michael@0: vadd.u16 q12, q12, q13 michael@0: vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) michael@0: vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS) michael@0: vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS) michael@0: vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) michael@0: vshrn.u32 d5, q3, #(2 * BILINEAR_INTERPOLATION_BITS) michael@0: vmovn.u16 d6, q0 michael@0: vmovn.u16 d7, q2 michael@0: vadd.u16 q12, q12, q13 michael@0: vst1.32 {d6, d7}, [OUT, :128]! michael@0: .endm michael@0: michael@0: .macro bilinear_interpolate_four_pixels_8888_8888_tail_head michael@0: mov TMP1, X, asr #16 michael@0: add X, X, UX michael@0: add TMP1, TOP, TMP1, asl #2 michael@0: mov TMP2, X, asr #16 michael@0: add X, X, UX michael@0: add TMP2, TOP, TMP2, asl #2 michael@0: vmlal.u16 q1, d19, d31 michael@0: vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) michael@0: vshll.u16 q2, d20, #BILINEAR_INTERPOLATION_BITS michael@0: vmlsl.u16 q2, d20, d30 michael@0: vmlal.u16 q2, d21, d30 michael@0: vshll.u16 q3, d22, #BILINEAR_INTERPOLATION_BITS michael@0: vld1.32 {d20}, [TMP1], STRIDE michael@0: vmlsl.u16 q3, d22, d31 michael@0: vmlal.u16 q3, d23, d31 michael@0: vld1.32 {d21}, [TMP1] michael@0: vmull.u8 q8, d20, d28 michael@0: vmlal.u8 q8, d21, d29 michael@0: vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) michael@0: vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS) michael@0: vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS) michael@0: vld1.32 {d22}, [TMP2], STRIDE michael@0: vshrn.u32 d5, q3, #(2 * BILINEAR_INTERPOLATION_BITS) michael@0: vadd.u16 q12, q12, q13 michael@0: vld1.32 {d23}, [TMP2] michael@0: vmull.u8 q9, d22, d28 michael@0: mov TMP3, X, asr #16 michael@0: add X, X, UX michael@0: add TMP3, TOP, TMP3, asl #2 michael@0: mov TMP4, X, asr #16 michael@0: add X, X, UX michael@0: add TMP4, TOP, TMP4, asl #2 michael@0: vmlal.u8 q9, d23, d29 michael@0: vld1.32 {d22}, [TMP3], STRIDE michael@0: vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) michael@0: vld1.32 {d23}, [TMP3] michael@0: vmull.u8 q10, d22, d28 michael@0: vmlal.u8 q10, d23, d29 michael@0: vmovn.u16 d6, q0 michael@0: vshll.u16 q0, d16, #BILINEAR_INTERPOLATION_BITS michael@0: vmovn.u16 d7, q2 michael@0: vmlsl.u16 q0, d16, d30 michael@0: vmlal.u16 q0, d17, d30 michael@0: pld [TMP4, PF_OFFS] michael@0: vld1.32 {d16}, [TMP4], STRIDE michael@0: vadd.u16 q12, q12, q13 michael@0: vld1.32 {d17}, [TMP4] michael@0: pld [TMP4, PF_OFFS] michael@0: vmull.u8 q11, d16, d28 michael@0: vmlal.u8 q11, d17, d29 michael@0: vst1.32 {d6, d7}, [OUT, :128]! michael@0: vshll.u16 q1, d18, #BILINEAR_INTERPOLATION_BITS michael@0: vmlsl.u16 q1, d18, d31 michael@0: .endm michael@0: michael@0: /*****************************************************************************/ michael@0: michael@0: .set have_bilinear_interpolate_eight_pixels_8888_0565, 1 michael@0: michael@0: .macro bilinear_interpolate_eight_pixels_8888_0565_head michael@0: mov TMP1, X, asr #16 michael@0: add X, X, UX michael@0: add TMP1, TOP, TMP1, asl #2 michael@0: mov TMP2, X, asr #16 michael@0: add X, X, UX michael@0: add TMP2, TOP, TMP2, asl #2 michael@0: vld1.32 {d20}, [TMP1], STRIDE michael@0: vld1.32 {d21}, [TMP1] michael@0: vmull.u8 q8, d20, d28 michael@0: vmlal.u8 q8, d21, d29 michael@0: vld1.32 {d22}, [TMP2], STRIDE michael@0: vld1.32 {d23}, [TMP2] michael@0: vmull.u8 q9, d22, d28 michael@0: mov TMP3, X, asr #16 michael@0: add X, X, UX michael@0: add TMP3, TOP, TMP3, asl #2 michael@0: mov TMP4, X, asr #16 michael@0: add X, X, UX michael@0: add TMP4, TOP, TMP4, asl #2 michael@0: vmlal.u8 q9, d23, d29 michael@0: vld1.32 {d22}, [TMP3], STRIDE michael@0: vld1.32 {d23}, [TMP3] michael@0: vmull.u8 q10, d22, d28 michael@0: vmlal.u8 q10, d23, d29 michael@0: vshll.u16 q0, d16, #BILINEAR_INTERPOLATION_BITS michael@0: vmlsl.u16 q0, d16, d30 michael@0: vmlal.u16 q0, d17, d30 michael@0: pld [TMP4, PF_OFFS] michael@0: vld1.32 {d16}, [TMP4], STRIDE michael@0: vld1.32 {d17}, [TMP4] michael@0: pld [TMP4, PF_OFFS] michael@0: vmull.u8 q11, d16, d28 michael@0: vmlal.u8 q11, d17, d29 michael@0: vshll.u16 q1, d18, #BILINEAR_INTERPOLATION_BITS michael@0: vmlsl.u16 q1, d18, d31 michael@0: michael@0: mov TMP1, X, asr #16 michael@0: add X, X, UX michael@0: add TMP1, TOP, TMP1, asl #2 michael@0: mov TMP2, X, asr #16 michael@0: add X, X, UX michael@0: add TMP2, TOP, TMP2, asl #2 michael@0: vmlal.u16 q1, d19, d31 michael@0: vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) michael@0: vshll.u16 q2, d20, #BILINEAR_INTERPOLATION_BITS michael@0: vmlsl.u16 q2, d20, d30 michael@0: vmlal.u16 q2, d21, d30 michael@0: vshll.u16 q3, d22, #BILINEAR_INTERPOLATION_BITS michael@0: vld1.32 {d20}, [TMP1], STRIDE michael@0: vmlsl.u16 q3, d22, d31 michael@0: vmlal.u16 q3, d23, d31 michael@0: vld1.32 {d21}, [TMP1] michael@0: vmull.u8 q8, d20, d28 michael@0: vmlal.u8 q8, d21, d29 michael@0: vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) michael@0: vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS) michael@0: vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS) michael@0: vld1.32 {d22}, [TMP2], STRIDE michael@0: vshrn.u32 d5, q3, #(2 * BILINEAR_INTERPOLATION_BITS) michael@0: vadd.u16 q12, q12, q13 michael@0: vld1.32 {d23}, [TMP2] michael@0: vmull.u8 q9, d22, d28 michael@0: mov TMP3, X, asr #16 michael@0: add X, X, UX michael@0: add TMP3, TOP, TMP3, asl #2 michael@0: mov TMP4, X, asr #16 michael@0: add X, X, UX michael@0: add TMP4, TOP, TMP4, asl #2 michael@0: vmlal.u8 q9, d23, d29 michael@0: vld1.32 {d22}, [TMP3], STRIDE michael@0: vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) michael@0: vld1.32 {d23}, [TMP3] michael@0: vmull.u8 q10, d22, d28 michael@0: vmlal.u8 q10, d23, d29 michael@0: vmovn.u16 d8, q0 michael@0: vshll.u16 q0, d16, #BILINEAR_INTERPOLATION_BITS michael@0: vmovn.u16 d9, q2 michael@0: vmlsl.u16 q0, d16, d30 michael@0: vmlal.u16 q0, d17, d30 michael@0: pld [TMP4, PF_OFFS] michael@0: vld1.32 {d16}, [TMP4], STRIDE michael@0: vadd.u16 q12, q12, q13 michael@0: vld1.32 {d17}, [TMP4] michael@0: pld [TMP4, PF_OFFS] michael@0: vmull.u8 q11, d16, d28 michael@0: vmlal.u8 q11, d17, d29 michael@0: vshll.u16 q1, d18, #BILINEAR_INTERPOLATION_BITS michael@0: vmlsl.u16 q1, d18, d31 michael@0: .endm michael@0: michael@0: .macro bilinear_interpolate_eight_pixels_8888_0565_tail michael@0: vmlal.u16 q1, d19, d31 michael@0: vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) michael@0: vshll.u16 q2, d20, #BILINEAR_INTERPOLATION_BITS michael@0: vmlsl.u16 q2, d20, d30 michael@0: vmlal.u16 q2, d21, d30 michael@0: vshll.u16 q3, d22, #BILINEAR_INTERPOLATION_BITS michael@0: vmlsl.u16 q3, d22, d31 michael@0: vmlal.u16 q3, d23, d31 michael@0: vadd.u16 q12, q12, q13 michael@0: vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) michael@0: vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS) michael@0: vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS) michael@0: vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) michael@0: vshrn.u32 d5, q3, #(2 * BILINEAR_INTERPOLATION_BITS) michael@0: vmovn.u16 d10, q0 michael@0: vmovn.u16 d11, q2 michael@0: vadd.u16 q12, q12, q13 michael@0: michael@0: vuzp.u8 d8, d9 michael@0: vuzp.u8 d10, d11 michael@0: vuzp.u8 d9, d11 michael@0: vuzp.u8 d8, d10 michael@0: vshll.u8 q6, d9, #8 michael@0: vshll.u8 q5, d10, #8 michael@0: vshll.u8 q7, d8, #8 michael@0: vsri.u16 q5, q6, #5 michael@0: vsri.u16 q5, q7, #11 michael@0: vst1.32 {d10, d11}, [OUT, :128]! michael@0: .endm michael@0: michael@0: .macro bilinear_interpolate_eight_pixels_8888_0565_tail_head michael@0: mov TMP1, X, asr #16 michael@0: add X, X, UX michael@0: add TMP1, TOP, TMP1, asl #2 michael@0: mov TMP2, X, asr #16 michael@0: add X, X, UX michael@0: add TMP2, TOP, TMP2, asl #2 michael@0: vmlal.u16 q1, d19, d31 michael@0: vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) michael@0: vuzp.u8 d8, d9 michael@0: vshll.u16 q2, d20, #BILINEAR_INTERPOLATION_BITS michael@0: vmlsl.u16 q2, d20, d30 michael@0: vmlal.u16 q2, d21, d30 michael@0: vshll.u16 q3, d22, #BILINEAR_INTERPOLATION_BITS michael@0: vld1.32 {d20}, [TMP1], STRIDE michael@0: vmlsl.u16 q3, d22, d31 michael@0: vmlal.u16 q3, d23, d31 michael@0: vld1.32 {d21}, [TMP1] michael@0: vmull.u8 q8, d20, d28 michael@0: vmlal.u8 q8, d21, d29 michael@0: vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) michael@0: vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS) michael@0: vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS) michael@0: vld1.32 {d22}, [TMP2], STRIDE michael@0: vshrn.u32 d5, q3, #(2 * BILINEAR_INTERPOLATION_BITS) michael@0: vadd.u16 q12, q12, q13 michael@0: vld1.32 {d23}, [TMP2] michael@0: vmull.u8 q9, d22, d28 michael@0: mov TMP3, X, asr #16 michael@0: add X, X, UX michael@0: add TMP3, TOP, TMP3, asl #2 michael@0: mov TMP4, X, asr #16 michael@0: add X, X, UX michael@0: add TMP4, TOP, TMP4, asl #2 michael@0: vmlal.u8 q9, d23, d29 michael@0: vld1.32 {d22}, [TMP3], STRIDE michael@0: vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) michael@0: vld1.32 {d23}, [TMP3] michael@0: vmull.u8 q10, d22, d28 michael@0: vmlal.u8 q10, d23, d29 michael@0: vmovn.u16 d10, q0 michael@0: vshll.u16 q0, d16, #BILINEAR_INTERPOLATION_BITS michael@0: vmovn.u16 d11, q2 michael@0: vmlsl.u16 q0, d16, d30 michael@0: vmlal.u16 q0, d17, d30 michael@0: pld [TMP4, PF_OFFS] michael@0: vld1.32 {d16}, [TMP4], STRIDE michael@0: vadd.u16 q12, q12, q13 michael@0: vld1.32 {d17}, [TMP4] michael@0: pld [TMP4, PF_OFFS] michael@0: vmull.u8 q11, d16, d28 michael@0: vmlal.u8 q11, d17, d29 michael@0: vuzp.u8 d10, d11 michael@0: vshll.u16 q1, d18, #BILINEAR_INTERPOLATION_BITS michael@0: vmlsl.u16 q1, d18, d31 michael@0: michael@0: mov TMP1, X, asr #16 michael@0: add X, X, UX michael@0: add TMP1, TOP, TMP1, asl #2 michael@0: mov TMP2, X, asr #16 michael@0: add X, X, UX michael@0: add TMP2, TOP, TMP2, asl #2 michael@0: vmlal.u16 q1, d19, d31 michael@0: vuzp.u8 d9, d11 michael@0: vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) michael@0: vshll.u16 q2, d20, #BILINEAR_INTERPOLATION_BITS michael@0: vuzp.u8 d8, d10 michael@0: vmlsl.u16 q2, d20, d30 michael@0: vmlal.u16 q2, d21, d30 michael@0: vshll.u16 q3, d22, #BILINEAR_INTERPOLATION_BITS michael@0: vld1.32 {d20}, [TMP1], STRIDE michael@0: vmlsl.u16 q3, d22, d31 michael@0: vmlal.u16 q3, d23, d31 michael@0: vld1.32 {d21}, [TMP1] michael@0: vmull.u8 q8, d20, d28 michael@0: vmlal.u8 q8, d21, d29 michael@0: vshll.u8 q6, d9, #8 michael@0: vshll.u8 q5, d10, #8 michael@0: vshll.u8 q7, d8, #8 michael@0: vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) michael@0: vsri.u16 q5, q6, #5 michael@0: vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS) michael@0: vsri.u16 q5, q7, #11 michael@0: vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS) michael@0: vld1.32 {d22}, [TMP2], STRIDE michael@0: vshrn.u32 d5, q3, #(2 * BILINEAR_INTERPOLATION_BITS) michael@0: vadd.u16 q12, q12, q13 michael@0: vld1.32 {d23}, [TMP2] michael@0: vmull.u8 q9, d22, d28 michael@0: mov TMP3, X, asr #16 michael@0: add X, X, UX michael@0: add TMP3, TOP, TMP3, asl #2 michael@0: mov TMP4, X, asr #16 michael@0: add X, X, UX michael@0: add TMP4, TOP, TMP4, asl #2 michael@0: vmlal.u8 q9, d23, d29 michael@0: vld1.32 {d22}, [TMP3], STRIDE michael@0: vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) michael@0: vld1.32 {d23}, [TMP3] michael@0: vmull.u8 q10, d22, d28 michael@0: vmlal.u8 q10, d23, d29 michael@0: vmovn.u16 d8, q0 michael@0: vshll.u16 q0, d16, #BILINEAR_INTERPOLATION_BITS michael@0: vmovn.u16 d9, q2 michael@0: vmlsl.u16 q0, d16, d30 michael@0: vmlal.u16 q0, d17, d30 michael@0: pld [TMP4, PF_OFFS] michael@0: vld1.32 {d16}, [TMP4], STRIDE michael@0: vadd.u16 q12, q12, q13 michael@0: vld1.32 {d17}, [TMP4] michael@0: pld [TMP4, PF_OFFS] michael@0: vmull.u8 q11, d16, d28 michael@0: vmlal.u8 q11, d17, d29 michael@0: vshll.u16 q1, d18, #BILINEAR_INTERPOLATION_BITS michael@0: vst1.32 {d10, d11}, [OUT, :128]! michael@0: vmlsl.u16 q1, d18, d31 michael@0: .endm michael@0: /*****************************************************************************/ michael@0: michael@0: generate_bilinear_scanline_func \ michael@0: pixman_scaled_bilinear_scanline_8888_8888_SRC_asm_neon, 8888, 8888, \ michael@0: 2, 2, 28, BILINEAR_FLAG_UNROLL_4 michael@0: michael@0: generate_bilinear_scanline_func \ michael@0: pixman_scaled_bilinear_scanline_8888_0565_SRC_asm_neon, 8888, 0565, \ michael@0: 2, 1, 28, BILINEAR_FLAG_UNROLL_8 | BILINEAR_FLAG_USE_ALL_NEON_REGS michael@0: michael@0: generate_bilinear_scanline_func \ michael@0: pixman_scaled_bilinear_scanline_0565_x888_SRC_asm_neon, 0565, 8888, \ michael@0: 1, 2, 28, BILINEAR_FLAG_UNROLL_4 michael@0: michael@0: generate_bilinear_scanline_func \ michael@0: pixman_scaled_bilinear_scanline_0565_0565_SRC_asm_neon, 0565, 0565, \ michael@0: 1, 1, 28, BILINEAR_FLAG_UNROLL_4