michael@0: /* michael@0: * Copyright © 2009 Nokia Corporation michael@0: * michael@0: * Permission is hereby granted, free of charge, to any person obtaining a michael@0: * copy of this software and associated documentation files (the "Software"), michael@0: * to deal in the Software without restriction, including without limitation michael@0: * the rights to use, copy, modify, merge, publish, distribute, sublicense, michael@0: * and/or sell copies of the Software, and to permit persons to whom the michael@0: * Software is furnished to do so, subject to the following conditions: michael@0: * michael@0: * The above copyright notice and this permission notice (including the next michael@0: * paragraph) shall be included in all copies or substantial portions of the michael@0: * Software. michael@0: * michael@0: * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR michael@0: * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, michael@0: * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL michael@0: * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER michael@0: * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING michael@0: * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER michael@0: * DEALINGS IN THE SOFTWARE. michael@0: * michael@0: * Author: Siarhei Siamashka (siarhei.siamashka@nokia.com) michael@0: */ michael@0: michael@0: /* michael@0: * This file contains a macro ('generate_composite_function') which can michael@0: * construct 2D image processing functions, based on a common template. michael@0: * Any combinations of source, destination and mask images with 8bpp, michael@0: * 16bpp, 24bpp, 32bpp color formats are supported. michael@0: * michael@0: * This macro takes care of: michael@0: * - handling of leading and trailing unaligned pixels michael@0: * - doing most of the work related to L2 cache preload michael@0: * - encourages the use of software pipelining for better instructions michael@0: * scheduling michael@0: * michael@0: * The user of this macro has to provide some configuration parameters michael@0: * (bit depths for the images, prefetch distance, etc.) and a set of michael@0: * macros, which should implement basic code chunks responsible for michael@0: * pixels processing. See 'pixman-arm-neon-asm.S' file for the usage michael@0: * examples. michael@0: * michael@0: * TODO: michael@0: * - try overlapped pixel method (from Ian Rickards) when processing michael@0: * exactly two blocks of pixels michael@0: * - maybe add an option to do reverse scanline processing michael@0: */ michael@0: michael@0: /* michael@0: * Bit flags for 'generate_composite_function' macro which are used michael@0: * to tune generated functions behavior. michael@0: */ michael@0: .set FLAG_DST_WRITEONLY, 0 michael@0: .set FLAG_DST_READWRITE, 1 michael@0: .set FLAG_DEINTERLEAVE_32BPP, 2 michael@0: michael@0: /* michael@0: * Offset in stack where mask and source pointer/stride can be accessed michael@0: * from 'init' macro. This is useful for doing special handling for solid mask. michael@0: */ michael@0: .set ARGS_STACK_OFFSET, 40 michael@0: michael@0: /* michael@0: * Constants for selecting preferable prefetch type. michael@0: */ michael@0: .set PREFETCH_TYPE_NONE, 0 /* No prefetch at all */ michael@0: .set PREFETCH_TYPE_SIMPLE, 1 /* A simple, fixed-distance-ahead prefetch */ michael@0: .set PREFETCH_TYPE_ADVANCED, 2 /* Advanced fine-grained prefetch */ michael@0: michael@0: /* michael@0: * Definitions of supplementary pixld/pixst macros (for partial load/store of michael@0: * pixel data). michael@0: */ michael@0: michael@0: .macro pixldst1 op, elem_size, reg1, mem_operand, abits michael@0: .if abits > 0 michael@0: op&.&elem_size {d®1}, [&mem_operand&, :&abits&]! michael@0: .else michael@0: op&.&elem_size {d®1}, [&mem_operand&]! michael@0: .endif michael@0: .endm michael@0: michael@0: .macro pixldst2 op, elem_size, reg1, reg2, mem_operand, abits michael@0: .if abits > 0 michael@0: op&.&elem_size {d®1, d®2}, [&mem_operand&, :&abits&]! michael@0: .else michael@0: op&.&elem_size {d®1, d®2}, [&mem_operand&]! michael@0: .endif michael@0: .endm michael@0: michael@0: .macro pixldst4 op, elem_size, reg1, reg2, reg3, reg4, mem_operand, abits michael@0: .if abits > 0 michael@0: op&.&elem_size {d®1, d®2, d®3, d®4}, [&mem_operand&, :&abits&]! michael@0: .else michael@0: op&.&elem_size {d®1, d®2, d®3, d®4}, [&mem_operand&]! michael@0: .endif michael@0: .endm michael@0: michael@0: .macro pixldst0 op, elem_size, reg1, idx, mem_operand, abits michael@0: op&.&elem_size {d®1[idx]}, [&mem_operand&]! michael@0: .endm michael@0: michael@0: .macro pixldst3 op, elem_size, reg1, reg2, reg3, mem_operand michael@0: op&.&elem_size {d®1, d®2, d®3}, [&mem_operand&]! michael@0: .endm michael@0: michael@0: .macro pixldst30 op, elem_size, reg1, reg2, reg3, idx, mem_operand michael@0: op&.&elem_size {d®1[idx], d®2[idx], d®3[idx]}, [&mem_operand&]! michael@0: .endm michael@0: michael@0: .macro pixldst numbytes, op, elem_size, basereg, mem_operand, abits michael@0: .if numbytes == 32 michael@0: pixldst4 op, elem_size, %(basereg+4), %(basereg+5), \ michael@0: %(basereg+6), %(basereg+7), mem_operand, abits michael@0: .elseif numbytes == 16 michael@0: pixldst2 op, elem_size, %(basereg+2), %(basereg+3), mem_operand, abits michael@0: .elseif numbytes == 8 michael@0: pixldst1 op, elem_size, %(basereg+1), mem_operand, abits michael@0: .elseif numbytes == 4 michael@0: .if !RESPECT_STRICT_ALIGNMENT || (elem_size == 32) michael@0: pixldst0 op, 32, %(basereg+0), 1, mem_operand, abits michael@0: .elseif elem_size == 16 michael@0: pixldst0 op, 16, %(basereg+0), 2, mem_operand, abits michael@0: pixldst0 op, 16, %(basereg+0), 3, mem_operand, abits michael@0: .else michael@0: pixldst0 op, 8, %(basereg+0), 4, mem_operand, abits michael@0: pixldst0 op, 8, %(basereg+0), 5, mem_operand, abits michael@0: pixldst0 op, 8, %(basereg+0), 6, mem_operand, abits michael@0: pixldst0 op, 8, %(basereg+0), 7, mem_operand, abits michael@0: .endif michael@0: .elseif numbytes == 2 michael@0: .if !RESPECT_STRICT_ALIGNMENT || (elem_size == 16) michael@0: pixldst0 op, 16, %(basereg+0), 1, mem_operand, abits michael@0: .else michael@0: pixldst0 op, 8, %(basereg+0), 2, mem_operand, abits michael@0: pixldst0 op, 8, %(basereg+0), 3, mem_operand, abits michael@0: .endif michael@0: .elseif numbytes == 1 michael@0: pixldst0 op, 8, %(basereg+0), 1, mem_operand, abits michael@0: .else michael@0: .error "unsupported size: numbytes" michael@0: .endif michael@0: .endm michael@0: michael@0: .macro pixld numpix, bpp, basereg, mem_operand, abits=0 michael@0: .if bpp > 0 michael@0: .if (bpp == 32) && (numpix == 8) && (DEINTERLEAVE_32BPP_ENABLED != 0) michael@0: pixldst4 vld4, 8, %(basereg+4), %(basereg+5), \ michael@0: %(basereg+6), %(basereg+7), mem_operand, abits michael@0: .elseif (bpp == 24) && (numpix == 8) michael@0: pixldst3 vld3, 8, %(basereg+3), %(basereg+4), %(basereg+5), mem_operand michael@0: .elseif (bpp == 24) && (numpix == 4) michael@0: pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 4, mem_operand michael@0: pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 5, mem_operand michael@0: pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 6, mem_operand michael@0: pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 7, mem_operand michael@0: .elseif (bpp == 24) && (numpix == 2) michael@0: pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 2, mem_operand michael@0: pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 3, mem_operand michael@0: .elseif (bpp == 24) && (numpix == 1) michael@0: pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 1, mem_operand michael@0: .else michael@0: pixldst %(numpix * bpp / 8), vld1, %(bpp), basereg, mem_operand, abits michael@0: .endif michael@0: .endif michael@0: .endm michael@0: michael@0: .macro pixst numpix, bpp, basereg, mem_operand, abits=0 michael@0: .if bpp > 0 michael@0: .if (bpp == 32) && (numpix == 8) && (DEINTERLEAVE_32BPP_ENABLED != 0) michael@0: pixldst4 vst4, 8, %(basereg+4), %(basereg+5), \ michael@0: %(basereg+6), %(basereg+7), mem_operand, abits michael@0: .elseif (bpp == 24) && (numpix == 8) michael@0: pixldst3 vst3, 8, %(basereg+3), %(basereg+4), %(basereg+5), mem_operand michael@0: .elseif (bpp == 24) && (numpix == 4) michael@0: pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 4, mem_operand michael@0: pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 5, mem_operand michael@0: pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 6, mem_operand michael@0: pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 7, mem_operand michael@0: .elseif (bpp == 24) && (numpix == 2) michael@0: pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 2, mem_operand michael@0: pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 3, mem_operand michael@0: .elseif (bpp == 24) && (numpix == 1) michael@0: pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 1, mem_operand michael@0: .else michael@0: pixldst %(numpix * bpp / 8), vst1, %(bpp), basereg, mem_operand, abits michael@0: .endif michael@0: .endif michael@0: .endm michael@0: michael@0: .macro pixld_a numpix, bpp, basereg, mem_operand michael@0: .if (bpp * numpix) <= 128 michael@0: pixld numpix, bpp, basereg, mem_operand, %(bpp * numpix) michael@0: .else michael@0: pixld numpix, bpp, basereg, mem_operand, 128 michael@0: .endif michael@0: .endm michael@0: michael@0: .macro pixst_a numpix, bpp, basereg, mem_operand michael@0: .if (bpp * numpix) <= 128 michael@0: pixst numpix, bpp, basereg, mem_operand, %(bpp * numpix) michael@0: .else michael@0: pixst numpix, bpp, basereg, mem_operand, 128 michael@0: .endif michael@0: .endm michael@0: michael@0: /* michael@0: * Pixel fetcher for nearest scaling (needs TMP1, TMP2, VX, UNIT_X register michael@0: * aliases to be defined) michael@0: */ michael@0: .macro pixld1_s elem_size, reg1, mem_operand michael@0: .if elem_size == 16 michael@0: mov TMP1, VX, asr #16 michael@0: adds VX, VX, UNIT_X michael@0: 5: subpls VX, VX, SRC_WIDTH_FIXED michael@0: bpl 5b michael@0: add TMP1, mem_operand, TMP1, asl #1 michael@0: mov TMP2, VX, asr #16 michael@0: adds VX, VX, UNIT_X michael@0: 5: subpls VX, VX, SRC_WIDTH_FIXED michael@0: bpl 5b michael@0: add TMP2, mem_operand, TMP2, asl #1 michael@0: vld1.16 {d®1&[0]}, [TMP1, :16] michael@0: mov TMP1, VX, asr #16 michael@0: adds VX, VX, UNIT_X michael@0: 5: subpls VX, VX, SRC_WIDTH_FIXED michael@0: bpl 5b michael@0: add TMP1, mem_operand, TMP1, asl #1 michael@0: vld1.16 {d®1&[1]}, [TMP2, :16] michael@0: mov TMP2, VX, asr #16 michael@0: adds VX, VX, UNIT_X michael@0: 5: subpls VX, VX, SRC_WIDTH_FIXED michael@0: bpl 5b michael@0: add TMP2, mem_operand, TMP2, asl #1 michael@0: vld1.16 {d®1&[2]}, [TMP1, :16] michael@0: vld1.16 {d®1&[3]}, [TMP2, :16] michael@0: .elseif elem_size == 32 michael@0: mov TMP1, VX, asr #16 michael@0: adds VX, VX, UNIT_X michael@0: 5: subpls VX, VX, SRC_WIDTH_FIXED michael@0: bpl 5b michael@0: add TMP1, mem_operand, TMP1, asl #2 michael@0: mov TMP2, VX, asr #16 michael@0: adds VX, VX, UNIT_X michael@0: 5: subpls VX, VX, SRC_WIDTH_FIXED michael@0: bpl 5b michael@0: add TMP2, mem_operand, TMP2, asl #2 michael@0: vld1.32 {d®1&[0]}, [TMP1, :32] michael@0: vld1.32 {d®1&[1]}, [TMP2, :32] michael@0: .else michael@0: .error "unsupported" michael@0: .endif michael@0: .endm michael@0: michael@0: .macro pixld2_s elem_size, reg1, reg2, mem_operand michael@0: .if 0 /* elem_size == 32 */ michael@0: mov TMP1, VX, asr #16 michael@0: add VX, VX, UNIT_X, asl #1 michael@0: add TMP1, mem_operand, TMP1, asl #2 michael@0: mov TMP2, VX, asr #16 michael@0: sub VX, VX, UNIT_X michael@0: add TMP2, mem_operand, TMP2, asl #2 michael@0: vld1.32 {d®1&[0]}, [TMP1, :32] michael@0: mov TMP1, VX, asr #16 michael@0: add VX, VX, UNIT_X, asl #1 michael@0: add TMP1, mem_operand, TMP1, asl #2 michael@0: vld1.32 {d®2&[0]}, [TMP2, :32] michael@0: mov TMP2, VX, asr #16 michael@0: add VX, VX, UNIT_X michael@0: add TMP2, mem_operand, TMP2, asl #2 michael@0: vld1.32 {d®1&[1]}, [TMP1, :32] michael@0: vld1.32 {d®2&[1]}, [TMP2, :32] michael@0: .else michael@0: pixld1_s elem_size, reg1, mem_operand michael@0: pixld1_s elem_size, reg2, mem_operand michael@0: .endif michael@0: .endm michael@0: michael@0: .macro pixld0_s elem_size, reg1, idx, mem_operand michael@0: .if elem_size == 16 michael@0: mov TMP1, VX, asr #16 michael@0: adds VX, VX, UNIT_X michael@0: 5: subpls VX, VX, SRC_WIDTH_FIXED michael@0: bpl 5b michael@0: add TMP1, mem_operand, TMP1, asl #1 michael@0: vld1.16 {d®1&[idx]}, [TMP1, :16] michael@0: .elseif elem_size == 32 michael@0: mov TMP1, VX, asr #16 michael@0: adds VX, VX, UNIT_X michael@0: 5: subpls VX, VX, SRC_WIDTH_FIXED michael@0: bpl 5b michael@0: add TMP1, mem_operand, TMP1, asl #2 michael@0: vld1.32 {d®1&[idx]}, [TMP1, :32] michael@0: .endif michael@0: .endm michael@0: michael@0: .macro pixld_s_internal numbytes, elem_size, basereg, mem_operand michael@0: .if numbytes == 32 michael@0: pixld2_s elem_size, %(basereg+4), %(basereg+5), mem_operand michael@0: pixld2_s elem_size, %(basereg+6), %(basereg+7), mem_operand michael@0: pixdeinterleave elem_size, %(basereg+4) michael@0: .elseif numbytes == 16 michael@0: pixld2_s elem_size, %(basereg+2), %(basereg+3), mem_operand michael@0: .elseif numbytes == 8 michael@0: pixld1_s elem_size, %(basereg+1), mem_operand michael@0: .elseif numbytes == 4 michael@0: .if elem_size == 32 michael@0: pixld0_s elem_size, %(basereg+0), 1, mem_operand michael@0: .elseif elem_size == 16 michael@0: pixld0_s elem_size, %(basereg+0), 2, mem_operand michael@0: pixld0_s elem_size, %(basereg+0), 3, mem_operand michael@0: .else michael@0: pixld0_s elem_size, %(basereg+0), 4, mem_operand michael@0: pixld0_s elem_size, %(basereg+0), 5, mem_operand michael@0: pixld0_s elem_size, %(basereg+0), 6, mem_operand michael@0: pixld0_s elem_size, %(basereg+0), 7, mem_operand michael@0: .endif michael@0: .elseif numbytes == 2 michael@0: .if elem_size == 16 michael@0: pixld0_s elem_size, %(basereg+0), 1, mem_operand michael@0: .else michael@0: pixld0_s elem_size, %(basereg+0), 2, mem_operand michael@0: pixld0_s elem_size, %(basereg+0), 3, mem_operand michael@0: .endif michael@0: .elseif numbytes == 1 michael@0: pixld0_s elem_size, %(basereg+0), 1, mem_operand michael@0: .else michael@0: .error "unsupported size: numbytes" michael@0: .endif michael@0: .endm michael@0: michael@0: .macro pixld_s numpix, bpp, basereg, mem_operand michael@0: .if bpp > 0 michael@0: pixld_s_internal %(numpix * bpp / 8), %(bpp), basereg, mem_operand michael@0: .endif michael@0: .endm michael@0: michael@0: .macro vuzp8 reg1, reg2 michael@0: vuzp.8 d®1, d®2 michael@0: .endm michael@0: michael@0: .macro vzip8 reg1, reg2 michael@0: vzip.8 d®1, d®2 michael@0: .endm michael@0: michael@0: /* deinterleave B, G, R, A channels for eight 32bpp pixels in 4 registers */ michael@0: .macro pixdeinterleave bpp, basereg michael@0: .if (bpp == 32) && (DEINTERLEAVE_32BPP_ENABLED != 0) michael@0: vuzp8 %(basereg+0), %(basereg+1) michael@0: vuzp8 %(basereg+2), %(basereg+3) michael@0: vuzp8 %(basereg+1), %(basereg+3) michael@0: vuzp8 %(basereg+0), %(basereg+2) michael@0: .endif michael@0: .endm michael@0: michael@0: /* interleave B, G, R, A channels for eight 32bpp pixels in 4 registers */ michael@0: .macro pixinterleave bpp, basereg michael@0: .if (bpp == 32) && (DEINTERLEAVE_32BPP_ENABLED != 0) michael@0: vzip8 %(basereg+0), %(basereg+2) michael@0: vzip8 %(basereg+1), %(basereg+3) michael@0: vzip8 %(basereg+2), %(basereg+3) michael@0: vzip8 %(basereg+0), %(basereg+1) michael@0: .endif michael@0: .endm michael@0: michael@0: /* michael@0: * This is a macro for implementing cache preload. The main idea is that michael@0: * cache preload logic is mostly independent from the rest of pixels michael@0: * processing code. It starts at the top left pixel and moves forward michael@0: * across pixels and can jump across scanlines. Prefetch distance is michael@0: * handled in an 'incremental' way: it starts from 0 and advances to the michael@0: * optimal distance over time. After reaching optimal prefetch distance, michael@0: * it is kept constant. There are some checks which prevent prefetching michael@0: * unneeded pixel lines below the image (but it still can prefetch a bit michael@0: * more data on the right side of the image - not a big issue and may michael@0: * be actually helpful when rendering text glyphs). Additional trick is michael@0: * the use of LDR instruction for prefetch instead of PLD when moving to michael@0: * the next line, the point is that we have a high chance of getting TLB michael@0: * miss in this case, and PLD would be useless. michael@0: * michael@0: * This sounds like it may introduce a noticeable overhead (when working with michael@0: * fully cached data). But in reality, due to having a separate pipeline and michael@0: * instruction queue for NEON unit in ARM Cortex-A8, normal ARM code can michael@0: * execute simultaneously with NEON and be completely shadowed by it. Thus michael@0: * we get no performance overhead at all (*). This looks like a very nice michael@0: * feature of Cortex-A8, if used wisely. We don't have a hardware prefetcher, michael@0: * but still can implement some rather advanced prefetch logic in sofware michael@0: * for almost zero cost! michael@0: * michael@0: * (*) The overhead of the prefetcher is visible when running some trivial michael@0: * pixels processing like simple copy. Anyway, having prefetch is a must michael@0: * when working with the graphics data. michael@0: */ michael@0: .macro PF a, x:vararg michael@0: .if (PREFETCH_TYPE_CURRENT == PREFETCH_TYPE_ADVANCED) michael@0: a x michael@0: .endif michael@0: .endm michael@0: michael@0: .macro cache_preload std_increment, boost_increment michael@0: .if (src_bpp_shift >= 0) || (dst_r_bpp != 0) || (mask_bpp_shift >= 0) michael@0: .if regs_shortage michael@0: PF ldr ORIG_W, [sp] /* If we are short on regs, ORIG_W is kept on stack */ michael@0: .endif michael@0: .if std_increment != 0 michael@0: PF add PF_X, PF_X, #std_increment michael@0: .endif michael@0: PF tst PF_CTL, #0xF michael@0: PF addne PF_X, PF_X, #boost_increment michael@0: PF subne PF_CTL, PF_CTL, #1 michael@0: PF cmp PF_X, ORIG_W michael@0: .if src_bpp_shift >= 0 michael@0: PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift] michael@0: .endif michael@0: .if dst_r_bpp != 0 michael@0: PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift] michael@0: .endif michael@0: .if mask_bpp_shift >= 0 michael@0: PF pld, [PF_MASK, PF_X, lsl #mask_bpp_shift] michael@0: .endif michael@0: PF subge PF_X, PF_X, ORIG_W michael@0: PF subges PF_CTL, PF_CTL, #0x10 michael@0: .if src_bpp_shift >= 0 michael@0: PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]! michael@0: .endif michael@0: .if dst_r_bpp != 0 michael@0: PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]! michael@0: .endif michael@0: .if mask_bpp_shift >= 0 michael@0: PF ldrgeb DUMMY, [PF_MASK, MASK_STRIDE, lsl #mask_bpp_shift]! michael@0: .endif michael@0: .endif michael@0: .endm michael@0: michael@0: .macro cache_preload_simple michael@0: .if (PREFETCH_TYPE_CURRENT == PREFETCH_TYPE_SIMPLE) michael@0: .if src_bpp > 0 michael@0: pld [SRC, #(PREFETCH_DISTANCE_SIMPLE * src_bpp / 8)] michael@0: .endif michael@0: .if dst_r_bpp > 0 michael@0: pld [DST_R, #(PREFETCH_DISTANCE_SIMPLE * dst_r_bpp / 8)] michael@0: .endif michael@0: .if mask_bpp > 0 michael@0: pld [MASK, #(PREFETCH_DISTANCE_SIMPLE * mask_bpp / 8)] michael@0: .endif michael@0: .endif michael@0: .endm michael@0: michael@0: .macro fetch_mask_pixblock michael@0: pixld pixblock_size, mask_bpp, \ michael@0: (mask_basereg - pixblock_size * mask_bpp / 64), MASK michael@0: .endm michael@0: michael@0: /* michael@0: * Macro which is used to process leading pixels until destination michael@0: * pointer is properly aligned (at 16 bytes boundary). When destination michael@0: * buffer uses 16bpp format, this is unnecessary, or even pointless. michael@0: */ michael@0: .macro ensure_destination_ptr_alignment process_pixblock_head, \ michael@0: process_pixblock_tail, \ michael@0: process_pixblock_tail_head michael@0: .if dst_w_bpp != 24 michael@0: tst DST_R, #0xF michael@0: beq 2f michael@0: michael@0: .irp lowbit, 1, 2, 4, 8, 16 michael@0: local skip1 michael@0: .if (dst_w_bpp <= (lowbit * 8)) && ((lowbit * 8) < (pixblock_size * dst_w_bpp)) michael@0: .if lowbit < 16 /* we don't need more than 16-byte alignment */ michael@0: tst DST_R, #lowbit michael@0: beq 1f michael@0: .endif michael@0: pixld_src (lowbit * 8 / dst_w_bpp), src_bpp, src_basereg, SRC michael@0: pixld (lowbit * 8 / dst_w_bpp), mask_bpp, mask_basereg, MASK michael@0: .if dst_r_bpp > 0 michael@0: pixld_a (lowbit * 8 / dst_r_bpp), dst_r_bpp, dst_r_basereg, DST_R michael@0: .else michael@0: add DST_R, DST_R, #lowbit michael@0: .endif michael@0: PF add PF_X, PF_X, #(lowbit * 8 / dst_w_bpp) michael@0: sub W, W, #(lowbit * 8 / dst_w_bpp) michael@0: 1: michael@0: .endif michael@0: .endr michael@0: pixdeinterleave src_bpp, src_basereg michael@0: pixdeinterleave mask_bpp, mask_basereg michael@0: pixdeinterleave dst_r_bpp, dst_r_basereg michael@0: michael@0: process_pixblock_head michael@0: cache_preload 0, pixblock_size michael@0: cache_preload_simple michael@0: process_pixblock_tail michael@0: michael@0: pixinterleave dst_w_bpp, dst_w_basereg michael@0: .irp lowbit, 1, 2, 4, 8, 16 michael@0: .if (dst_w_bpp <= (lowbit * 8)) && ((lowbit * 8) < (pixblock_size * dst_w_bpp)) michael@0: .if lowbit < 16 /* we don't need more than 16-byte alignment */ michael@0: tst DST_W, #lowbit michael@0: beq 1f michael@0: .endif michael@0: pixst_a (lowbit * 8 / dst_w_bpp), dst_w_bpp, dst_w_basereg, DST_W michael@0: 1: michael@0: .endif michael@0: .endr michael@0: .endif michael@0: 2: michael@0: .endm michael@0: michael@0: /* michael@0: * Special code for processing up to (pixblock_size - 1) remaining michael@0: * trailing pixels. As SIMD processing performs operation on michael@0: * pixblock_size pixels, anything smaller than this has to be loaded michael@0: * and stored in a special way. Loading and storing of pixel data is michael@0: * performed in such a way that we fill some 'slots' in the NEON michael@0: * registers (some slots naturally are unused), then perform compositing michael@0: * operation as usual. In the end, the data is taken from these 'slots' michael@0: * and saved to memory. michael@0: * michael@0: * cache_preload_flag - allows to suppress prefetch if michael@0: * set to 0 michael@0: * dst_aligned_flag - selects whether destination buffer michael@0: * is aligned michael@0: */ michael@0: .macro process_trailing_pixels cache_preload_flag, \ michael@0: dst_aligned_flag, \ michael@0: process_pixblock_head, \ michael@0: process_pixblock_tail, \ michael@0: process_pixblock_tail_head michael@0: tst W, #(pixblock_size - 1) michael@0: beq 2f michael@0: .irp chunk_size, 16, 8, 4, 2, 1 michael@0: .if pixblock_size > chunk_size michael@0: tst W, #chunk_size michael@0: beq 1f michael@0: pixld_src chunk_size, src_bpp, src_basereg, SRC michael@0: pixld chunk_size, mask_bpp, mask_basereg, MASK michael@0: .if dst_aligned_flag != 0 michael@0: pixld_a chunk_size, dst_r_bpp, dst_r_basereg, DST_R michael@0: .else michael@0: pixld chunk_size, dst_r_bpp, dst_r_basereg, DST_R michael@0: .endif michael@0: .if cache_preload_flag != 0 michael@0: PF add PF_X, PF_X, #chunk_size michael@0: .endif michael@0: 1: michael@0: .endif michael@0: .endr michael@0: pixdeinterleave src_bpp, src_basereg michael@0: pixdeinterleave mask_bpp, mask_basereg michael@0: pixdeinterleave dst_r_bpp, dst_r_basereg michael@0: michael@0: process_pixblock_head michael@0: .if cache_preload_flag != 0 michael@0: cache_preload 0, pixblock_size michael@0: cache_preload_simple michael@0: .endif michael@0: process_pixblock_tail michael@0: pixinterleave dst_w_bpp, dst_w_basereg michael@0: .irp chunk_size, 16, 8, 4, 2, 1 michael@0: .if pixblock_size > chunk_size michael@0: tst W, #chunk_size michael@0: beq 1f michael@0: .if dst_aligned_flag != 0 michael@0: pixst_a chunk_size, dst_w_bpp, dst_w_basereg, DST_W michael@0: .else michael@0: pixst chunk_size, dst_w_bpp, dst_w_basereg, DST_W michael@0: .endif michael@0: 1: michael@0: .endif michael@0: .endr michael@0: 2: michael@0: .endm michael@0: michael@0: /* michael@0: * Macro, which performs all the needed operations to switch to the next michael@0: * scanline and start the next loop iteration unless all the scanlines michael@0: * are already processed. michael@0: */ michael@0: .macro advance_to_next_scanline start_of_loop_label michael@0: .if regs_shortage michael@0: ldrd W, [sp] /* load W and H (width and height) from stack */ michael@0: .else michael@0: mov W, ORIG_W michael@0: .endif michael@0: add DST_W, DST_W, DST_STRIDE, lsl #dst_bpp_shift michael@0: .if src_bpp != 0 michael@0: add SRC, SRC, SRC_STRIDE, lsl #src_bpp_shift michael@0: .endif michael@0: .if mask_bpp != 0 michael@0: add MASK, MASK, MASK_STRIDE, lsl #mask_bpp_shift michael@0: .endif michael@0: .if (dst_w_bpp != 24) michael@0: sub DST_W, DST_W, W, lsl #dst_bpp_shift michael@0: .endif michael@0: .if (src_bpp != 24) && (src_bpp != 0) michael@0: sub SRC, SRC, W, lsl #src_bpp_shift michael@0: .endif michael@0: .if (mask_bpp != 24) && (mask_bpp != 0) michael@0: sub MASK, MASK, W, lsl #mask_bpp_shift michael@0: .endif michael@0: subs H, H, #1 michael@0: mov DST_R, DST_W michael@0: .if regs_shortage michael@0: str H, [sp, #4] /* save updated height to stack */ michael@0: .endif michael@0: bge start_of_loop_label michael@0: .endm michael@0: michael@0: /* michael@0: * Registers are allocated in the following way by default: michael@0: * d0, d1, d2, d3 - reserved for loading source pixel data michael@0: * d4, d5, d6, d7 - reserved for loading destination pixel data michael@0: * d24, d25, d26, d27 - reserved for loading mask pixel data michael@0: * d28, d29, d30, d31 - final destination pixel data for writeback to memory michael@0: */ michael@0: .macro generate_composite_function fname, \ michael@0: src_bpp_, \ michael@0: mask_bpp_, \ michael@0: dst_w_bpp_, \ michael@0: flags, \ michael@0: pixblock_size_, \ michael@0: prefetch_distance, \ michael@0: init, \ michael@0: cleanup, \ michael@0: process_pixblock_head, \ michael@0: process_pixblock_tail, \ michael@0: process_pixblock_tail_head, \ michael@0: dst_w_basereg_ = 28, \ michael@0: dst_r_basereg_ = 4, \ michael@0: src_basereg_ = 0, \ michael@0: mask_basereg_ = 24 michael@0: michael@0: .func fname michael@0: .global fname michael@0: /* For ELF format also set function visibility to hidden */ michael@0: #ifdef __ELF__ michael@0: .hidden fname michael@0: .type fname, %function michael@0: #endif michael@0: fname: michael@0: .fnstart michael@0: .save {r4-r12, lr} michael@0: push {r4-r12, lr} /* save all registers */ michael@0: michael@0: /* michael@0: * Select prefetch type for this function. If prefetch distance is michael@0: * set to 0 or one of the color formats is 24bpp, SIMPLE prefetch michael@0: * has to be used instead of ADVANCED. michael@0: */ michael@0: .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_DEFAULT michael@0: .if prefetch_distance == 0 michael@0: .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_NONE michael@0: .elseif (PREFETCH_TYPE_CURRENT > PREFETCH_TYPE_SIMPLE) && \ michael@0: ((src_bpp_ == 24) || (mask_bpp_ == 24) || (dst_w_bpp_ == 24)) michael@0: .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_SIMPLE michael@0: .endif michael@0: michael@0: /* michael@0: * Make some macro arguments globally visible and accessible michael@0: * from other macros michael@0: */ michael@0: .set src_bpp, src_bpp_ michael@0: .set mask_bpp, mask_bpp_ michael@0: .set dst_w_bpp, dst_w_bpp_ michael@0: .set pixblock_size, pixblock_size_ michael@0: .set dst_w_basereg, dst_w_basereg_ michael@0: .set dst_r_basereg, dst_r_basereg_ michael@0: .set src_basereg, src_basereg_ michael@0: .set mask_basereg, mask_basereg_ michael@0: michael@0: .macro pixld_src x:vararg michael@0: pixld x michael@0: .endm michael@0: .macro fetch_src_pixblock michael@0: pixld_src pixblock_size, src_bpp, \ michael@0: (src_basereg - pixblock_size * src_bpp / 64), SRC michael@0: .endm michael@0: /* michael@0: * Assign symbolic names to registers michael@0: */ michael@0: W .req r0 /* width (is updated during processing) */ michael@0: H .req r1 /* height (is updated during processing) */ michael@0: DST_W .req r2 /* destination buffer pointer for writes */ michael@0: DST_STRIDE .req r3 /* destination image stride */ michael@0: SRC .req r4 /* source buffer pointer */ michael@0: SRC_STRIDE .req r5 /* source image stride */ michael@0: DST_R .req r6 /* destination buffer pointer for reads */ michael@0: michael@0: MASK .req r7 /* mask pointer */ michael@0: MASK_STRIDE .req r8 /* mask stride */ michael@0: michael@0: PF_CTL .req r9 /* combined lines counter and prefetch */ michael@0: /* distance increment counter */ michael@0: PF_X .req r10 /* pixel index in a scanline for current */ michael@0: /* pretetch position */ michael@0: PF_SRC .req r11 /* pointer to source scanline start */ michael@0: /* for prefetch purposes */ michael@0: PF_DST .req r12 /* pointer to destination scanline start */ michael@0: /* for prefetch purposes */ michael@0: PF_MASK .req r14 /* pointer to mask scanline start */ michael@0: /* for prefetch purposes */ michael@0: /* michael@0: * Check whether we have enough registers for all the local variables. michael@0: * If we don't have enough registers, original width and height are michael@0: * kept on top of stack (and 'regs_shortage' variable is set to indicate michael@0: * this for the rest of code). Even if there are enough registers, the michael@0: * allocation scheme may be a bit different depending on whether source michael@0: * or mask is not used. michael@0: */ michael@0: .if (PREFETCH_TYPE_CURRENT < PREFETCH_TYPE_ADVANCED) michael@0: ORIG_W .req r10 /* saved original width */ michael@0: DUMMY .req r12 /* temporary register */ michael@0: .set regs_shortage, 0 michael@0: .elseif mask_bpp == 0 michael@0: ORIG_W .req r7 /* saved original width */ michael@0: DUMMY .req r8 /* temporary register */ michael@0: .set regs_shortage, 0 michael@0: .elseif src_bpp == 0 michael@0: ORIG_W .req r4 /* saved original width */ michael@0: DUMMY .req r5 /* temporary register */ michael@0: .set regs_shortage, 0 michael@0: .else michael@0: ORIG_W .req r1 /* saved original width */ michael@0: DUMMY .req r1 /* temporary register */ michael@0: .set regs_shortage, 1 michael@0: .endif michael@0: michael@0: .set mask_bpp_shift, -1 michael@0: .if src_bpp == 32 michael@0: .set src_bpp_shift, 2 michael@0: .elseif src_bpp == 24 michael@0: .set src_bpp_shift, 0 michael@0: .elseif src_bpp == 16 michael@0: .set src_bpp_shift, 1 michael@0: .elseif src_bpp == 8 michael@0: .set src_bpp_shift, 0 michael@0: .elseif src_bpp == 0 michael@0: .set src_bpp_shift, -1 michael@0: .else michael@0: .error "requested src bpp (src_bpp) is not supported" michael@0: .endif michael@0: .if mask_bpp == 32 michael@0: .set mask_bpp_shift, 2 michael@0: .elseif mask_bpp == 24 michael@0: .set mask_bpp_shift, 0 michael@0: .elseif mask_bpp == 8 michael@0: .set mask_bpp_shift, 0 michael@0: .elseif mask_bpp == 0 michael@0: .set mask_bpp_shift, -1 michael@0: .else michael@0: .error "requested mask bpp (mask_bpp) is not supported" michael@0: .endif michael@0: .if dst_w_bpp == 32 michael@0: .set dst_bpp_shift, 2 michael@0: .elseif dst_w_bpp == 24 michael@0: .set dst_bpp_shift, 0 michael@0: .elseif dst_w_bpp == 16 michael@0: .set dst_bpp_shift, 1 michael@0: .elseif dst_w_bpp == 8 michael@0: .set dst_bpp_shift, 0 michael@0: .else michael@0: .error "requested dst bpp (dst_w_bpp) is not supported" michael@0: .endif michael@0: michael@0: .if (((flags) & FLAG_DST_READWRITE) != 0) michael@0: .set dst_r_bpp, dst_w_bpp michael@0: .else michael@0: .set dst_r_bpp, 0 michael@0: .endif michael@0: .if (((flags) & FLAG_DEINTERLEAVE_32BPP) != 0) michael@0: .set DEINTERLEAVE_32BPP_ENABLED, 1 michael@0: .else michael@0: .set DEINTERLEAVE_32BPP_ENABLED, 0 michael@0: .endif michael@0: michael@0: .if prefetch_distance < 0 || prefetch_distance > 15 michael@0: .error "invalid prefetch distance (prefetch_distance)" michael@0: .endif michael@0: michael@0: .if src_bpp > 0 michael@0: ldr SRC, [sp, #40] michael@0: .endif michael@0: .if mask_bpp > 0 michael@0: ldr MASK, [sp, #48] michael@0: .endif michael@0: PF mov PF_X, #0 michael@0: .if src_bpp > 0 michael@0: ldr SRC_STRIDE, [sp, #44] michael@0: .endif michael@0: .if mask_bpp > 0 michael@0: ldr MASK_STRIDE, [sp, #52] michael@0: .endif michael@0: mov DST_R, DST_W michael@0: michael@0: .if src_bpp == 24 michael@0: sub SRC_STRIDE, SRC_STRIDE, W michael@0: sub SRC_STRIDE, SRC_STRIDE, W, lsl #1 michael@0: .endif michael@0: .if mask_bpp == 24 michael@0: sub MASK_STRIDE, MASK_STRIDE, W michael@0: sub MASK_STRIDE, MASK_STRIDE, W, lsl #1 michael@0: .endif michael@0: .if dst_w_bpp == 24 michael@0: sub DST_STRIDE, DST_STRIDE, W michael@0: sub DST_STRIDE, DST_STRIDE, W, lsl #1 michael@0: .endif michael@0: michael@0: /* michael@0: * Setup advanced prefetcher initial state michael@0: */ michael@0: PF mov PF_SRC, SRC michael@0: PF mov PF_DST, DST_R michael@0: PF mov PF_MASK, MASK michael@0: /* PF_CTL = prefetch_distance | ((h - 1) << 4) */ michael@0: PF mov PF_CTL, H, lsl #4 michael@0: PF add PF_CTL, #(prefetch_distance - 0x10) michael@0: michael@0: init michael@0: .if regs_shortage michael@0: .save {r0, r1} michael@0: push {r0, r1} michael@0: .endif michael@0: subs H, H, #1 michael@0: .if regs_shortage michael@0: str H, [sp, #4] /* save updated height to stack */ michael@0: .else michael@0: mov ORIG_W, W michael@0: .endif michael@0: blt 9f michael@0: cmp W, #(pixblock_size * 2) michael@0: blt 8f michael@0: /* michael@0: * This is the start of the pipelined loop, which if optimized for michael@0: * long scanlines michael@0: */ michael@0: 0: michael@0: ensure_destination_ptr_alignment process_pixblock_head, \ michael@0: process_pixblock_tail, \ michael@0: process_pixblock_tail_head michael@0: michael@0: /* Implement "head (tail_head) ... (tail_head) tail" loop pattern */ michael@0: pixld_a pixblock_size, dst_r_bpp, \ michael@0: (dst_r_basereg - pixblock_size * dst_r_bpp / 64), DST_R michael@0: fetch_src_pixblock michael@0: pixld pixblock_size, mask_bpp, \ michael@0: (mask_basereg - pixblock_size * mask_bpp / 64), MASK michael@0: PF add PF_X, PF_X, #pixblock_size michael@0: process_pixblock_head michael@0: cache_preload 0, pixblock_size michael@0: cache_preload_simple michael@0: subs W, W, #(pixblock_size * 2) michael@0: blt 2f michael@0: 1: michael@0: process_pixblock_tail_head michael@0: cache_preload_simple michael@0: subs W, W, #pixblock_size michael@0: bge 1b michael@0: 2: michael@0: process_pixblock_tail michael@0: pixst_a pixblock_size, dst_w_bpp, \ michael@0: (dst_w_basereg - pixblock_size * dst_w_bpp / 64), DST_W michael@0: michael@0: /* Process the remaining trailing pixels in the scanline */ michael@0: process_trailing_pixels 1, 1, \ michael@0: process_pixblock_head, \ michael@0: process_pixblock_tail, \ michael@0: process_pixblock_tail_head michael@0: advance_to_next_scanline 0b michael@0: michael@0: .if regs_shortage michael@0: pop {r0, r1} michael@0: .endif michael@0: cleanup michael@0: pop {r4-r12, pc} /* exit */ michael@0: /* michael@0: * This is the start of the loop, designed to process images with small width michael@0: * (less than pixblock_size * 2 pixels). In this case neither pipelining michael@0: * nor prefetch are used. michael@0: */ michael@0: 8: michael@0: /* Process exactly pixblock_size pixels if needed */ michael@0: tst W, #pixblock_size michael@0: beq 1f michael@0: pixld pixblock_size, dst_r_bpp, \ michael@0: (dst_r_basereg - pixblock_size * dst_r_bpp / 64), DST_R michael@0: fetch_src_pixblock michael@0: pixld pixblock_size, mask_bpp, \ michael@0: (mask_basereg - pixblock_size * mask_bpp / 64), MASK michael@0: process_pixblock_head michael@0: process_pixblock_tail michael@0: pixst pixblock_size, dst_w_bpp, \ michael@0: (dst_w_basereg - pixblock_size * dst_w_bpp / 64), DST_W michael@0: 1: michael@0: /* Process the remaining trailing pixels in the scanline */ michael@0: process_trailing_pixels 0, 0, \ michael@0: process_pixblock_head, \ michael@0: process_pixblock_tail, \ michael@0: process_pixblock_tail_head michael@0: advance_to_next_scanline 8b michael@0: 9: michael@0: .if regs_shortage michael@0: pop {r0, r1} michael@0: .endif michael@0: cleanup michael@0: pop {r4-r12, pc} /* exit */ michael@0: .fnend michael@0: michael@0: .purgem fetch_src_pixblock michael@0: .purgem pixld_src michael@0: michael@0: .unreq SRC michael@0: .unreq MASK michael@0: .unreq DST_R michael@0: .unreq DST_W michael@0: .unreq ORIG_W michael@0: .unreq W michael@0: .unreq H michael@0: .unreq SRC_STRIDE michael@0: .unreq DST_STRIDE michael@0: .unreq MASK_STRIDE michael@0: .unreq PF_CTL michael@0: .unreq PF_X michael@0: .unreq PF_SRC michael@0: .unreq PF_DST michael@0: .unreq PF_MASK michael@0: .unreq DUMMY michael@0: .endfunc michael@0: .endm michael@0: michael@0: /* michael@0: * A simplified variant of function generation template for a single michael@0: * scanline processing (for implementing pixman combine functions) michael@0: */ michael@0: .macro generate_composite_function_scanline use_nearest_scaling, \ michael@0: fname, \ michael@0: src_bpp_, \ michael@0: mask_bpp_, \ michael@0: dst_w_bpp_, \ michael@0: flags, \ michael@0: pixblock_size_, \ michael@0: init, \ michael@0: cleanup, \ michael@0: process_pixblock_head, \ michael@0: process_pixblock_tail, \ michael@0: process_pixblock_tail_head, \ michael@0: dst_w_basereg_ = 28, \ michael@0: dst_r_basereg_ = 4, \ michael@0: src_basereg_ = 0, \ michael@0: mask_basereg_ = 24 michael@0: michael@0: .func fname michael@0: .global fname michael@0: /* For ELF format also set function visibility to hidden */ michael@0: #ifdef __ELF__ michael@0: .hidden fname michael@0: .type fname, %function michael@0: #endif michael@0: fname: michael@0: .fnstart michael@0: .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_NONE michael@0: /* michael@0: * Make some macro arguments globally visible and accessible michael@0: * from other macros michael@0: */ michael@0: .set src_bpp, src_bpp_ michael@0: .set mask_bpp, mask_bpp_ michael@0: .set dst_w_bpp, dst_w_bpp_ michael@0: .set pixblock_size, pixblock_size_ michael@0: .set dst_w_basereg, dst_w_basereg_ michael@0: .set dst_r_basereg, dst_r_basereg_ michael@0: .set src_basereg, src_basereg_ michael@0: .set mask_basereg, mask_basereg_ michael@0: michael@0: .if use_nearest_scaling != 0 michael@0: /* michael@0: * Assign symbolic names to registers for nearest scaling michael@0: */ michael@0: W .req r0 michael@0: DST_W .req r1 michael@0: SRC .req r2 michael@0: VX .req r3 michael@0: UNIT_X .req ip michael@0: MASK .req lr michael@0: TMP1 .req r4 michael@0: TMP2 .req r5 michael@0: DST_R .req r6 michael@0: SRC_WIDTH_FIXED .req r7 michael@0: michael@0: .macro pixld_src x:vararg michael@0: pixld_s x michael@0: .endm michael@0: michael@0: ldr UNIT_X, [sp] michael@0: .save {r4-r8, lr} michael@0: push {r4-r8, lr} michael@0: ldr SRC_WIDTH_FIXED, [sp, #(24 + 4)] michael@0: .if mask_bpp != 0 michael@0: ldr MASK, [sp, #(24 + 8)] michael@0: .endif michael@0: .else michael@0: /* michael@0: * Assign symbolic names to registers michael@0: */ michael@0: W .req r0 /* width (is updated during processing) */ michael@0: DST_W .req r1 /* destination buffer pointer for writes */ michael@0: SRC .req r2 /* source buffer pointer */ michael@0: DST_R .req ip /* destination buffer pointer for reads */ michael@0: MASK .req r3 /* mask pointer */ michael@0: michael@0: .macro pixld_src x:vararg michael@0: pixld x michael@0: .endm michael@0: .endif michael@0: michael@0: .if (((flags) & FLAG_DST_READWRITE) != 0) michael@0: .set dst_r_bpp, dst_w_bpp michael@0: .else michael@0: .set dst_r_bpp, 0 michael@0: .endif michael@0: .if (((flags) & FLAG_DEINTERLEAVE_32BPP) != 0) michael@0: .set DEINTERLEAVE_32BPP_ENABLED, 1 michael@0: .else michael@0: .set DEINTERLEAVE_32BPP_ENABLED, 0 michael@0: .endif michael@0: michael@0: .macro fetch_src_pixblock michael@0: pixld_src pixblock_size, src_bpp, \ michael@0: (src_basereg - pixblock_size * src_bpp / 64), SRC michael@0: .endm michael@0: michael@0: init michael@0: mov DST_R, DST_W michael@0: michael@0: cmp W, #pixblock_size michael@0: blt 8f michael@0: michael@0: ensure_destination_ptr_alignment process_pixblock_head, \ michael@0: process_pixblock_tail, \ michael@0: process_pixblock_tail_head michael@0: michael@0: subs W, W, #pixblock_size michael@0: blt 7f michael@0: michael@0: /* Implement "head (tail_head) ... (tail_head) tail" loop pattern */ michael@0: pixld_a pixblock_size, dst_r_bpp, \ michael@0: (dst_r_basereg - pixblock_size * dst_r_bpp / 64), DST_R michael@0: fetch_src_pixblock michael@0: pixld pixblock_size, mask_bpp, \ michael@0: (mask_basereg - pixblock_size * mask_bpp / 64), MASK michael@0: process_pixblock_head michael@0: subs W, W, #pixblock_size michael@0: blt 2f michael@0: 1: michael@0: process_pixblock_tail_head michael@0: subs W, W, #pixblock_size michael@0: bge 1b michael@0: 2: michael@0: process_pixblock_tail michael@0: pixst_a pixblock_size, dst_w_bpp, \ michael@0: (dst_w_basereg - pixblock_size * dst_w_bpp / 64), DST_W michael@0: 7: michael@0: /* Process the remaining trailing pixels in the scanline (dst aligned) */ michael@0: process_trailing_pixels 0, 1, \ michael@0: process_pixblock_head, \ michael@0: process_pixblock_tail, \ michael@0: process_pixblock_tail_head michael@0: michael@0: cleanup michael@0: .if use_nearest_scaling != 0 michael@0: pop {r4-r8, pc} /* exit */ michael@0: .else michael@0: bx lr /* exit */ michael@0: .endif michael@0: 8: michael@0: /* Process the remaining trailing pixels in the scanline (dst unaligned) */ michael@0: process_trailing_pixels 0, 0, \ michael@0: process_pixblock_head, \ michael@0: process_pixblock_tail, \ michael@0: process_pixblock_tail_head michael@0: michael@0: cleanup michael@0: michael@0: .if use_nearest_scaling != 0 michael@0: pop {r4-r8, pc} /* exit */ michael@0: michael@0: .unreq DST_R michael@0: .unreq SRC michael@0: .unreq W michael@0: .unreq VX michael@0: .unreq UNIT_X michael@0: .unreq TMP1 michael@0: .unreq TMP2 michael@0: .unreq DST_W michael@0: .unreq MASK michael@0: .unreq SRC_WIDTH_FIXED michael@0: michael@0: .else michael@0: bx lr /* exit */ michael@0: michael@0: .unreq SRC michael@0: .unreq MASK michael@0: .unreq DST_R michael@0: .unreq DST_W michael@0: .unreq W michael@0: .endif michael@0: michael@0: .purgem fetch_src_pixblock michael@0: .purgem pixld_src michael@0: michael@0: .fnend michael@0: .endfunc michael@0: .endm michael@0: michael@0: .macro generate_composite_function_single_scanline x:vararg michael@0: generate_composite_function_scanline 0, x michael@0: .endm michael@0: michael@0: .macro generate_composite_function_nearest_scanline x:vararg michael@0: generate_composite_function_scanline 1, x michael@0: .endm michael@0: michael@0: /* Default prologue/epilogue, nothing special needs to be done */ michael@0: michael@0: .macro default_init michael@0: .endm michael@0: michael@0: .macro default_cleanup michael@0: .endm michael@0: michael@0: /* michael@0: * Prologue/epilogue variant which additionally saves/restores d8-d15 michael@0: * registers (they need to be saved/restored by callee according to ABI). michael@0: * This is required if the code needs to use all the NEON registers. michael@0: */ michael@0: michael@0: .macro default_init_need_all_regs michael@0: .vsave {d8-d15} michael@0: vpush {d8-d15} michael@0: .endm michael@0: michael@0: .macro default_cleanup_need_all_regs michael@0: vpop {d8-d15} michael@0: .endm michael@0: michael@0: /******************************************************************************/ michael@0: michael@0: /* michael@0: * Conversion of 8 r5g6b6 pixels packed in 128-bit register (in) michael@0: * into a planar a8r8g8b8 format (with a, r, g, b color components michael@0: * stored into 64-bit registers out_a, out_r, out_g, out_b respectively). michael@0: * michael@0: * Warning: the conversion is destructive and the original michael@0: * value (in) is lost. michael@0: */ michael@0: .macro convert_0565_to_8888 in, out_a, out_r, out_g, out_b michael@0: vshrn.u16 out_r, in, #8 michael@0: vshrn.u16 out_g, in, #3 michael@0: vsli.u16 in, in, #5 michael@0: vmov.u8 out_a, #255 michael@0: vsri.u8 out_r, out_r, #5 michael@0: vsri.u8 out_g, out_g, #6 michael@0: vshrn.u16 out_b, in, #2 michael@0: .endm michael@0: michael@0: .macro convert_0565_to_x888 in, out_r, out_g, out_b michael@0: vshrn.u16 out_r, in, #8 michael@0: vshrn.u16 out_g, in, #3 michael@0: vsli.u16 in, in, #5 michael@0: vsri.u8 out_r, out_r, #5 michael@0: vsri.u8 out_g, out_g, #6 michael@0: vshrn.u16 out_b, in, #2 michael@0: .endm michael@0: michael@0: /* michael@0: * Conversion from planar a8r8g8b8 format (with a, r, g, b color components michael@0: * in 64-bit registers in_a, in_r, in_g, in_b respectively) into 8 r5g6b6 michael@0: * pixels packed in 128-bit register (out). Requires two temporary 128-bit michael@0: * registers (tmp1, tmp2) michael@0: */ michael@0: .macro convert_8888_to_0565 in_r, in_g, in_b, out, tmp1, tmp2 michael@0: vshll.u8 tmp1, in_g, #8 michael@0: vshll.u8 out, in_r, #8 michael@0: vshll.u8 tmp2, in_b, #8 michael@0: vsri.u16 out, tmp1, #5 michael@0: vsri.u16 out, tmp2, #11 michael@0: .endm michael@0: michael@0: /* michael@0: * Conversion of four r5g6b5 pixels (in) to four x8r8g8b8 pixels michael@0: * returned in (out0, out1) registers pair. Requires one temporary michael@0: * 64-bit register (tmp). 'out1' and 'in' may overlap, the original michael@0: * value from 'in' is lost michael@0: */ michael@0: .macro convert_four_0565_to_x888_packed in, out0, out1, tmp michael@0: vshl.u16 out0, in, #5 /* G top 6 bits */ michael@0: vshl.u16 tmp, in, #11 /* B top 5 bits */ michael@0: vsri.u16 in, in, #5 /* R is ready in top bits */ michael@0: vsri.u16 out0, out0, #6 /* G is ready in top bits */ michael@0: vsri.u16 tmp, tmp, #5 /* B is ready in top bits */ michael@0: vshr.u16 out1, in, #8 /* R is in place */ michael@0: vsri.u16 out0, tmp, #8 /* G & B is in place */ michael@0: vzip.u16 out0, out1 /* everything is in place */ michael@0: .endm