gfx/cairo/libpixman/src/pixman-arm-neon-asm.h

Thu, 22 Jan 2015 13:21:57 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Thu, 22 Jan 2015 13:21:57 +0100
branch
TOR_BUG_9701
changeset 15
b8a032363ba2
permissions
-rw-r--r--

Incorporate requested changes from Mozilla in review:
https://bugzilla.mozilla.org/show_bug.cgi?id=1123480#c6

michael@0 1 /*
michael@0 2 * Copyright © 2009 Nokia Corporation
michael@0 3 *
michael@0 4 * Permission is hereby granted, free of charge, to any person obtaining a
michael@0 5 * copy of this software and associated documentation files (the "Software"),
michael@0 6 * to deal in the Software without restriction, including without limitation
michael@0 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
michael@0 8 * and/or sell copies of the Software, and to permit persons to whom the
michael@0 9 * Software is furnished to do so, subject to the following conditions:
michael@0 10 *
michael@0 11 * The above copyright notice and this permission notice (including the next
michael@0 12 * paragraph) shall be included in all copies or substantial portions of the
michael@0 13 * Software.
michael@0 14 *
michael@0 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
michael@0 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
michael@0 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
michael@0 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
michael@0 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
michael@0 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
michael@0 21 * DEALINGS IN THE SOFTWARE.
michael@0 22 *
michael@0 23 * Author: Siarhei Siamashka (siarhei.siamashka@nokia.com)
michael@0 24 */
michael@0 25
michael@0 26 /*
michael@0 27 * This file contains a macro ('generate_composite_function') which can
michael@0 28 * construct 2D image processing functions, based on a common template.
michael@0 29 * Any combinations of source, destination and mask images with 8bpp,
michael@0 30 * 16bpp, 24bpp, 32bpp color formats are supported.
michael@0 31 *
michael@0 32 * This macro takes care of:
michael@0 33 * - handling of leading and trailing unaligned pixels
michael@0 34 * - doing most of the work related to L2 cache preload
michael@0 35 * - encourages the use of software pipelining for better instructions
michael@0 36 * scheduling
michael@0 37 *
michael@0 38 * The user of this macro has to provide some configuration parameters
michael@0 39 * (bit depths for the images, prefetch distance, etc.) and a set of
michael@0 40 * macros, which should implement basic code chunks responsible for
michael@0 41 * pixels processing. See 'pixman-arm-neon-asm.S' file for the usage
michael@0 42 * examples.
michael@0 43 *
michael@0 44 * TODO:
michael@0 45 * - try overlapped pixel method (from Ian Rickards) when processing
michael@0 46 * exactly two blocks of pixels
michael@0 47 * - maybe add an option to do reverse scanline processing
michael@0 48 */
michael@0 49
michael@0 50 /*
michael@0 51 * Bit flags for 'generate_composite_function' macro which are used
michael@0 52 * to tune generated functions behavior.
michael@0 53 */
michael@0 54 .set FLAG_DST_WRITEONLY, 0
michael@0 55 .set FLAG_DST_READWRITE, 1
michael@0 56 .set FLAG_DEINTERLEAVE_32BPP, 2
michael@0 57
michael@0 58 /*
michael@0 59 * Offset in stack where mask and source pointer/stride can be accessed
michael@0 60 * from 'init' macro. This is useful for doing special handling for solid mask.
michael@0 61 */
michael@0 62 .set ARGS_STACK_OFFSET, 40
michael@0 63
michael@0 64 /*
michael@0 65 * Constants for selecting preferable prefetch type.
michael@0 66 */
michael@0 67 .set PREFETCH_TYPE_NONE, 0 /* No prefetch at all */
michael@0 68 .set PREFETCH_TYPE_SIMPLE, 1 /* A simple, fixed-distance-ahead prefetch */
michael@0 69 .set PREFETCH_TYPE_ADVANCED, 2 /* Advanced fine-grained prefetch */
michael@0 70
michael@0 71 /*
michael@0 72 * Definitions of supplementary pixld/pixst macros (for partial load/store of
michael@0 73 * pixel data).
michael@0 74 */
michael@0 75
michael@0 76 .macro pixldst1 op, elem_size, reg1, mem_operand, abits
michael@0 77 .if abits > 0
michael@0 78 op&.&elem_size {d&reg1}, [&mem_operand&, :&abits&]!
michael@0 79 .else
michael@0 80 op&.&elem_size {d&reg1}, [&mem_operand&]!
michael@0 81 .endif
michael@0 82 .endm
michael@0 83
michael@0 84 .macro pixldst2 op, elem_size, reg1, reg2, mem_operand, abits
michael@0 85 .if abits > 0
michael@0 86 op&.&elem_size {d&reg1, d&reg2}, [&mem_operand&, :&abits&]!
michael@0 87 .else
michael@0 88 op&.&elem_size {d&reg1, d&reg2}, [&mem_operand&]!
michael@0 89 .endif
michael@0 90 .endm
michael@0 91
michael@0 92 .macro pixldst4 op, elem_size, reg1, reg2, reg3, reg4, mem_operand, abits
michael@0 93 .if abits > 0
michael@0 94 op&.&elem_size {d&reg1, d&reg2, d&reg3, d&reg4}, [&mem_operand&, :&abits&]!
michael@0 95 .else
michael@0 96 op&.&elem_size {d&reg1, d&reg2, d&reg3, d&reg4}, [&mem_operand&]!
michael@0 97 .endif
michael@0 98 .endm
michael@0 99
michael@0 100 .macro pixldst0 op, elem_size, reg1, idx, mem_operand, abits
michael@0 101 op&.&elem_size {d&reg1[idx]}, [&mem_operand&]!
michael@0 102 .endm
michael@0 103
michael@0 104 .macro pixldst3 op, elem_size, reg1, reg2, reg3, mem_operand
michael@0 105 op&.&elem_size {d&reg1, d&reg2, d&reg3}, [&mem_operand&]!
michael@0 106 .endm
michael@0 107
michael@0 108 .macro pixldst30 op, elem_size, reg1, reg2, reg3, idx, mem_operand
michael@0 109 op&.&elem_size {d&reg1[idx], d&reg2[idx], d&reg3[idx]}, [&mem_operand&]!
michael@0 110 .endm
michael@0 111
michael@0 112 .macro pixldst numbytes, op, elem_size, basereg, mem_operand, abits
michael@0 113 .if numbytes == 32
michael@0 114 pixldst4 op, elem_size, %(basereg+4), %(basereg+5), \
michael@0 115 %(basereg+6), %(basereg+7), mem_operand, abits
michael@0 116 .elseif numbytes == 16
michael@0 117 pixldst2 op, elem_size, %(basereg+2), %(basereg+3), mem_operand, abits
michael@0 118 .elseif numbytes == 8
michael@0 119 pixldst1 op, elem_size, %(basereg+1), mem_operand, abits
michael@0 120 .elseif numbytes == 4
michael@0 121 .if !RESPECT_STRICT_ALIGNMENT || (elem_size == 32)
michael@0 122 pixldst0 op, 32, %(basereg+0), 1, mem_operand, abits
michael@0 123 .elseif elem_size == 16
michael@0 124 pixldst0 op, 16, %(basereg+0), 2, mem_operand, abits
michael@0 125 pixldst0 op, 16, %(basereg+0), 3, mem_operand, abits
michael@0 126 .else
michael@0 127 pixldst0 op, 8, %(basereg+0), 4, mem_operand, abits
michael@0 128 pixldst0 op, 8, %(basereg+0), 5, mem_operand, abits
michael@0 129 pixldst0 op, 8, %(basereg+0), 6, mem_operand, abits
michael@0 130 pixldst0 op, 8, %(basereg+0), 7, mem_operand, abits
michael@0 131 .endif
michael@0 132 .elseif numbytes == 2
michael@0 133 .if !RESPECT_STRICT_ALIGNMENT || (elem_size == 16)
michael@0 134 pixldst0 op, 16, %(basereg+0), 1, mem_operand, abits
michael@0 135 .else
michael@0 136 pixldst0 op, 8, %(basereg+0), 2, mem_operand, abits
michael@0 137 pixldst0 op, 8, %(basereg+0), 3, mem_operand, abits
michael@0 138 .endif
michael@0 139 .elseif numbytes == 1
michael@0 140 pixldst0 op, 8, %(basereg+0), 1, mem_operand, abits
michael@0 141 .else
michael@0 142 .error "unsupported size: numbytes"
michael@0 143 .endif
michael@0 144 .endm
michael@0 145
michael@0 146 .macro pixld numpix, bpp, basereg, mem_operand, abits=0
michael@0 147 .if bpp > 0
michael@0 148 .if (bpp == 32) && (numpix == 8) && (DEINTERLEAVE_32BPP_ENABLED != 0)
michael@0 149 pixldst4 vld4, 8, %(basereg+4), %(basereg+5), \
michael@0 150 %(basereg+6), %(basereg+7), mem_operand, abits
michael@0 151 .elseif (bpp == 24) && (numpix == 8)
michael@0 152 pixldst3 vld3, 8, %(basereg+3), %(basereg+4), %(basereg+5), mem_operand
michael@0 153 .elseif (bpp == 24) && (numpix == 4)
michael@0 154 pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 4, mem_operand
michael@0 155 pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 5, mem_operand
michael@0 156 pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 6, mem_operand
michael@0 157 pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 7, mem_operand
michael@0 158 .elseif (bpp == 24) && (numpix == 2)
michael@0 159 pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 2, mem_operand
michael@0 160 pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 3, mem_operand
michael@0 161 .elseif (bpp == 24) && (numpix == 1)
michael@0 162 pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 1, mem_operand
michael@0 163 .else
michael@0 164 pixldst %(numpix * bpp / 8), vld1, %(bpp), basereg, mem_operand, abits
michael@0 165 .endif
michael@0 166 .endif
michael@0 167 .endm
michael@0 168
michael@0 169 .macro pixst numpix, bpp, basereg, mem_operand, abits=0
michael@0 170 .if bpp > 0
michael@0 171 .if (bpp == 32) && (numpix == 8) && (DEINTERLEAVE_32BPP_ENABLED != 0)
michael@0 172 pixldst4 vst4, 8, %(basereg+4), %(basereg+5), \
michael@0 173 %(basereg+6), %(basereg+7), mem_operand, abits
michael@0 174 .elseif (bpp == 24) && (numpix == 8)
michael@0 175 pixldst3 vst3, 8, %(basereg+3), %(basereg+4), %(basereg+5), mem_operand
michael@0 176 .elseif (bpp == 24) && (numpix == 4)
michael@0 177 pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 4, mem_operand
michael@0 178 pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 5, mem_operand
michael@0 179 pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 6, mem_operand
michael@0 180 pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 7, mem_operand
michael@0 181 .elseif (bpp == 24) && (numpix == 2)
michael@0 182 pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 2, mem_operand
michael@0 183 pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 3, mem_operand
michael@0 184 .elseif (bpp == 24) && (numpix == 1)
michael@0 185 pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 1, mem_operand
michael@0 186 .else
michael@0 187 pixldst %(numpix * bpp / 8), vst1, %(bpp), basereg, mem_operand, abits
michael@0 188 .endif
michael@0 189 .endif
michael@0 190 .endm
michael@0 191
michael@0 192 .macro pixld_a numpix, bpp, basereg, mem_operand
michael@0 193 .if (bpp * numpix) <= 128
michael@0 194 pixld numpix, bpp, basereg, mem_operand, %(bpp * numpix)
michael@0 195 .else
michael@0 196 pixld numpix, bpp, basereg, mem_operand, 128
michael@0 197 .endif
michael@0 198 .endm
michael@0 199
michael@0 200 .macro pixst_a numpix, bpp, basereg, mem_operand
michael@0 201 .if (bpp * numpix) <= 128
michael@0 202 pixst numpix, bpp, basereg, mem_operand, %(bpp * numpix)
michael@0 203 .else
michael@0 204 pixst numpix, bpp, basereg, mem_operand, 128
michael@0 205 .endif
michael@0 206 .endm
michael@0 207
michael@0 208 /*
michael@0 209 * Pixel fetcher for nearest scaling (needs TMP1, TMP2, VX, UNIT_X register
michael@0 210 * aliases to be defined)
michael@0 211 */
michael@0 212 .macro pixld1_s elem_size, reg1, mem_operand
michael@0 213 .if elem_size == 16
michael@0 214 mov TMP1, VX, asr #16
michael@0 215 adds VX, VX, UNIT_X
michael@0 216 5: subpls VX, VX, SRC_WIDTH_FIXED
michael@0 217 bpl 5b
michael@0 218 add TMP1, mem_operand, TMP1, asl #1
michael@0 219 mov TMP2, VX, asr #16
michael@0 220 adds VX, VX, UNIT_X
michael@0 221 5: subpls VX, VX, SRC_WIDTH_FIXED
michael@0 222 bpl 5b
michael@0 223 add TMP2, mem_operand, TMP2, asl #1
michael@0 224 vld1.16 {d&reg1&[0]}, [TMP1, :16]
michael@0 225 mov TMP1, VX, asr #16
michael@0 226 adds VX, VX, UNIT_X
michael@0 227 5: subpls VX, VX, SRC_WIDTH_FIXED
michael@0 228 bpl 5b
michael@0 229 add TMP1, mem_operand, TMP1, asl #1
michael@0 230 vld1.16 {d&reg1&[1]}, [TMP2, :16]
michael@0 231 mov TMP2, VX, asr #16
michael@0 232 adds VX, VX, UNIT_X
michael@0 233 5: subpls VX, VX, SRC_WIDTH_FIXED
michael@0 234 bpl 5b
michael@0 235 add TMP2, mem_operand, TMP2, asl #1
michael@0 236 vld1.16 {d&reg1&[2]}, [TMP1, :16]
michael@0 237 vld1.16 {d&reg1&[3]}, [TMP2, :16]
michael@0 238 .elseif elem_size == 32
michael@0 239 mov TMP1, VX, asr #16
michael@0 240 adds VX, VX, UNIT_X
michael@0 241 5: subpls VX, VX, SRC_WIDTH_FIXED
michael@0 242 bpl 5b
michael@0 243 add TMP1, mem_operand, TMP1, asl #2
michael@0 244 mov TMP2, VX, asr #16
michael@0 245 adds VX, VX, UNIT_X
michael@0 246 5: subpls VX, VX, SRC_WIDTH_FIXED
michael@0 247 bpl 5b
michael@0 248 add TMP2, mem_operand, TMP2, asl #2
michael@0 249 vld1.32 {d&reg1&[0]}, [TMP1, :32]
michael@0 250 vld1.32 {d&reg1&[1]}, [TMP2, :32]
michael@0 251 .else
michael@0 252 .error "unsupported"
michael@0 253 .endif
michael@0 254 .endm
michael@0 255
michael@0 256 .macro pixld2_s elem_size, reg1, reg2, mem_operand
michael@0 257 .if 0 /* elem_size == 32 */
michael@0 258 mov TMP1, VX, asr #16
michael@0 259 add VX, VX, UNIT_X, asl #1
michael@0 260 add TMP1, mem_operand, TMP1, asl #2
michael@0 261 mov TMP2, VX, asr #16
michael@0 262 sub VX, VX, UNIT_X
michael@0 263 add TMP2, mem_operand, TMP2, asl #2
michael@0 264 vld1.32 {d&reg1&[0]}, [TMP1, :32]
michael@0 265 mov TMP1, VX, asr #16
michael@0 266 add VX, VX, UNIT_X, asl #1
michael@0 267 add TMP1, mem_operand, TMP1, asl #2
michael@0 268 vld1.32 {d&reg2&[0]}, [TMP2, :32]
michael@0 269 mov TMP2, VX, asr #16
michael@0 270 add VX, VX, UNIT_X
michael@0 271 add TMP2, mem_operand, TMP2, asl #2
michael@0 272 vld1.32 {d&reg1&[1]}, [TMP1, :32]
michael@0 273 vld1.32 {d&reg2&[1]}, [TMP2, :32]
michael@0 274 .else
michael@0 275 pixld1_s elem_size, reg1, mem_operand
michael@0 276 pixld1_s elem_size, reg2, mem_operand
michael@0 277 .endif
michael@0 278 .endm
michael@0 279
michael@0 280 .macro pixld0_s elem_size, reg1, idx, mem_operand
michael@0 281 .if elem_size == 16
michael@0 282 mov TMP1, VX, asr #16
michael@0 283 adds VX, VX, UNIT_X
michael@0 284 5: subpls VX, VX, SRC_WIDTH_FIXED
michael@0 285 bpl 5b
michael@0 286 add TMP1, mem_operand, TMP1, asl #1
michael@0 287 vld1.16 {d&reg1&[idx]}, [TMP1, :16]
michael@0 288 .elseif elem_size == 32
michael@0 289 mov TMP1, VX, asr #16
michael@0 290 adds VX, VX, UNIT_X
michael@0 291 5: subpls VX, VX, SRC_WIDTH_FIXED
michael@0 292 bpl 5b
michael@0 293 add TMP1, mem_operand, TMP1, asl #2
michael@0 294 vld1.32 {d&reg1&[idx]}, [TMP1, :32]
michael@0 295 .endif
michael@0 296 .endm
michael@0 297
michael@0 298 .macro pixld_s_internal numbytes, elem_size, basereg, mem_operand
michael@0 299 .if numbytes == 32
michael@0 300 pixld2_s elem_size, %(basereg+4), %(basereg+5), mem_operand
michael@0 301 pixld2_s elem_size, %(basereg+6), %(basereg+7), mem_operand
michael@0 302 pixdeinterleave elem_size, %(basereg+4)
michael@0 303 .elseif numbytes == 16
michael@0 304 pixld2_s elem_size, %(basereg+2), %(basereg+3), mem_operand
michael@0 305 .elseif numbytes == 8
michael@0 306 pixld1_s elem_size, %(basereg+1), mem_operand
michael@0 307 .elseif numbytes == 4
michael@0 308 .if elem_size == 32
michael@0 309 pixld0_s elem_size, %(basereg+0), 1, mem_operand
michael@0 310 .elseif elem_size == 16
michael@0 311 pixld0_s elem_size, %(basereg+0), 2, mem_operand
michael@0 312 pixld0_s elem_size, %(basereg+0), 3, mem_operand
michael@0 313 .else
michael@0 314 pixld0_s elem_size, %(basereg+0), 4, mem_operand
michael@0 315 pixld0_s elem_size, %(basereg+0), 5, mem_operand
michael@0 316 pixld0_s elem_size, %(basereg+0), 6, mem_operand
michael@0 317 pixld0_s elem_size, %(basereg+0), 7, mem_operand
michael@0 318 .endif
michael@0 319 .elseif numbytes == 2
michael@0 320 .if elem_size == 16
michael@0 321 pixld0_s elem_size, %(basereg+0), 1, mem_operand
michael@0 322 .else
michael@0 323 pixld0_s elem_size, %(basereg+0), 2, mem_operand
michael@0 324 pixld0_s elem_size, %(basereg+0), 3, mem_operand
michael@0 325 .endif
michael@0 326 .elseif numbytes == 1
michael@0 327 pixld0_s elem_size, %(basereg+0), 1, mem_operand
michael@0 328 .else
michael@0 329 .error "unsupported size: numbytes"
michael@0 330 .endif
michael@0 331 .endm
michael@0 332
michael@0 333 .macro pixld_s numpix, bpp, basereg, mem_operand
michael@0 334 .if bpp > 0
michael@0 335 pixld_s_internal %(numpix * bpp / 8), %(bpp), basereg, mem_operand
michael@0 336 .endif
michael@0 337 .endm
michael@0 338
michael@0 339 .macro vuzp8 reg1, reg2
michael@0 340 vuzp.8 d&reg1, d&reg2
michael@0 341 .endm
michael@0 342
michael@0 343 .macro vzip8 reg1, reg2
michael@0 344 vzip.8 d&reg1, d&reg2
michael@0 345 .endm
michael@0 346
michael@0 347 /* deinterleave B, G, R, A channels for eight 32bpp pixels in 4 registers */
michael@0 348 .macro pixdeinterleave bpp, basereg
michael@0 349 .if (bpp == 32) && (DEINTERLEAVE_32BPP_ENABLED != 0)
michael@0 350 vuzp8 %(basereg+0), %(basereg+1)
michael@0 351 vuzp8 %(basereg+2), %(basereg+3)
michael@0 352 vuzp8 %(basereg+1), %(basereg+3)
michael@0 353 vuzp8 %(basereg+0), %(basereg+2)
michael@0 354 .endif
michael@0 355 .endm
michael@0 356
michael@0 357 /* interleave B, G, R, A channels for eight 32bpp pixels in 4 registers */
michael@0 358 .macro pixinterleave bpp, basereg
michael@0 359 .if (bpp == 32) && (DEINTERLEAVE_32BPP_ENABLED != 0)
michael@0 360 vzip8 %(basereg+0), %(basereg+2)
michael@0 361 vzip8 %(basereg+1), %(basereg+3)
michael@0 362 vzip8 %(basereg+2), %(basereg+3)
michael@0 363 vzip8 %(basereg+0), %(basereg+1)
michael@0 364 .endif
michael@0 365 .endm
michael@0 366
michael@0 367 /*
michael@0 368 * This is a macro for implementing cache preload. The main idea is that
michael@0 369 * cache preload logic is mostly independent from the rest of pixels
michael@0 370 * processing code. It starts at the top left pixel and moves forward
michael@0 371 * across pixels and can jump across scanlines. Prefetch distance is
michael@0 372 * handled in an 'incremental' way: it starts from 0 and advances to the
michael@0 373 * optimal distance over time. After reaching optimal prefetch distance,
michael@0 374 * it is kept constant. There are some checks which prevent prefetching
michael@0 375 * unneeded pixel lines below the image (but it still can prefetch a bit
michael@0 376 * more data on the right side of the image - not a big issue and may
michael@0 377 * be actually helpful when rendering text glyphs). Additional trick is
michael@0 378 * the use of LDR instruction for prefetch instead of PLD when moving to
michael@0 379 * the next line, the point is that we have a high chance of getting TLB
michael@0 380 * miss in this case, and PLD would be useless.
michael@0 381 *
michael@0 382 * This sounds like it may introduce a noticeable overhead (when working with
michael@0 383 * fully cached data). But in reality, due to having a separate pipeline and
michael@0 384 * instruction queue for NEON unit in ARM Cortex-A8, normal ARM code can
michael@0 385 * execute simultaneously with NEON and be completely shadowed by it. Thus
michael@0 386 * we get no performance overhead at all (*). This looks like a very nice
michael@0 387 * feature of Cortex-A8, if used wisely. We don't have a hardware prefetcher,
michael@0 388 * but still can implement some rather advanced prefetch logic in sofware
michael@0 389 * for almost zero cost!
michael@0 390 *
michael@0 391 * (*) The overhead of the prefetcher is visible when running some trivial
michael@0 392 * pixels processing like simple copy. Anyway, having prefetch is a must
michael@0 393 * when working with the graphics data.
michael@0 394 */
michael@0 395 .macro PF a, x:vararg
michael@0 396 .if (PREFETCH_TYPE_CURRENT == PREFETCH_TYPE_ADVANCED)
michael@0 397 a x
michael@0 398 .endif
michael@0 399 .endm
michael@0 400
michael@0 401 .macro cache_preload std_increment, boost_increment
michael@0 402 .if (src_bpp_shift >= 0) || (dst_r_bpp != 0) || (mask_bpp_shift >= 0)
michael@0 403 .if regs_shortage
michael@0 404 PF ldr ORIG_W, [sp] /* If we are short on regs, ORIG_W is kept on stack */
michael@0 405 .endif
michael@0 406 .if std_increment != 0
michael@0 407 PF add PF_X, PF_X, #std_increment
michael@0 408 .endif
michael@0 409 PF tst PF_CTL, #0xF
michael@0 410 PF addne PF_X, PF_X, #boost_increment
michael@0 411 PF subne PF_CTL, PF_CTL, #1
michael@0 412 PF cmp PF_X, ORIG_W
michael@0 413 .if src_bpp_shift >= 0
michael@0 414 PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift]
michael@0 415 .endif
michael@0 416 .if dst_r_bpp != 0
michael@0 417 PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift]
michael@0 418 .endif
michael@0 419 .if mask_bpp_shift >= 0
michael@0 420 PF pld, [PF_MASK, PF_X, lsl #mask_bpp_shift]
michael@0 421 .endif
michael@0 422 PF subge PF_X, PF_X, ORIG_W
michael@0 423 PF subges PF_CTL, PF_CTL, #0x10
michael@0 424 .if src_bpp_shift >= 0
michael@0 425 PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
michael@0 426 .endif
michael@0 427 .if dst_r_bpp != 0
michael@0 428 PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]!
michael@0 429 .endif
michael@0 430 .if mask_bpp_shift >= 0
michael@0 431 PF ldrgeb DUMMY, [PF_MASK, MASK_STRIDE, lsl #mask_bpp_shift]!
michael@0 432 .endif
michael@0 433 .endif
michael@0 434 .endm
michael@0 435
michael@0 436 .macro cache_preload_simple
michael@0 437 .if (PREFETCH_TYPE_CURRENT == PREFETCH_TYPE_SIMPLE)
michael@0 438 .if src_bpp > 0
michael@0 439 pld [SRC, #(PREFETCH_DISTANCE_SIMPLE * src_bpp / 8)]
michael@0 440 .endif
michael@0 441 .if dst_r_bpp > 0
michael@0 442 pld [DST_R, #(PREFETCH_DISTANCE_SIMPLE * dst_r_bpp / 8)]
michael@0 443 .endif
michael@0 444 .if mask_bpp > 0
michael@0 445 pld [MASK, #(PREFETCH_DISTANCE_SIMPLE * mask_bpp / 8)]
michael@0 446 .endif
michael@0 447 .endif
michael@0 448 .endm
michael@0 449
michael@0 450 .macro fetch_mask_pixblock
michael@0 451 pixld pixblock_size, mask_bpp, \
michael@0 452 (mask_basereg - pixblock_size * mask_bpp / 64), MASK
michael@0 453 .endm
michael@0 454
michael@0 455 /*
michael@0 456 * Macro which is used to process leading pixels until destination
michael@0 457 * pointer is properly aligned (at 16 bytes boundary). When destination
michael@0 458 * buffer uses 16bpp format, this is unnecessary, or even pointless.
michael@0 459 */
michael@0 460 .macro ensure_destination_ptr_alignment process_pixblock_head, \
michael@0 461 process_pixblock_tail, \
michael@0 462 process_pixblock_tail_head
michael@0 463 .if dst_w_bpp != 24
michael@0 464 tst DST_R, #0xF
michael@0 465 beq 2f
michael@0 466
michael@0 467 .irp lowbit, 1, 2, 4, 8, 16
michael@0 468 local skip1
michael@0 469 .if (dst_w_bpp <= (lowbit * 8)) && ((lowbit * 8) < (pixblock_size * dst_w_bpp))
michael@0 470 .if lowbit < 16 /* we don't need more than 16-byte alignment */
michael@0 471 tst DST_R, #lowbit
michael@0 472 beq 1f
michael@0 473 .endif
michael@0 474 pixld_src (lowbit * 8 / dst_w_bpp), src_bpp, src_basereg, SRC
michael@0 475 pixld (lowbit * 8 / dst_w_bpp), mask_bpp, mask_basereg, MASK
michael@0 476 .if dst_r_bpp > 0
michael@0 477 pixld_a (lowbit * 8 / dst_r_bpp), dst_r_bpp, dst_r_basereg, DST_R
michael@0 478 .else
michael@0 479 add DST_R, DST_R, #lowbit
michael@0 480 .endif
michael@0 481 PF add PF_X, PF_X, #(lowbit * 8 / dst_w_bpp)
michael@0 482 sub W, W, #(lowbit * 8 / dst_w_bpp)
michael@0 483 1:
michael@0 484 .endif
michael@0 485 .endr
michael@0 486 pixdeinterleave src_bpp, src_basereg
michael@0 487 pixdeinterleave mask_bpp, mask_basereg
michael@0 488 pixdeinterleave dst_r_bpp, dst_r_basereg
michael@0 489
michael@0 490 process_pixblock_head
michael@0 491 cache_preload 0, pixblock_size
michael@0 492 cache_preload_simple
michael@0 493 process_pixblock_tail
michael@0 494
michael@0 495 pixinterleave dst_w_bpp, dst_w_basereg
michael@0 496 .irp lowbit, 1, 2, 4, 8, 16
michael@0 497 .if (dst_w_bpp <= (lowbit * 8)) && ((lowbit * 8) < (pixblock_size * dst_w_bpp))
michael@0 498 .if lowbit < 16 /* we don't need more than 16-byte alignment */
michael@0 499 tst DST_W, #lowbit
michael@0 500 beq 1f
michael@0 501 .endif
michael@0 502 pixst_a (lowbit * 8 / dst_w_bpp), dst_w_bpp, dst_w_basereg, DST_W
michael@0 503 1:
michael@0 504 .endif
michael@0 505 .endr
michael@0 506 .endif
michael@0 507 2:
michael@0 508 .endm
michael@0 509
michael@0 510 /*
michael@0 511 * Special code for processing up to (pixblock_size - 1) remaining
michael@0 512 * trailing pixels. As SIMD processing performs operation on
michael@0 513 * pixblock_size pixels, anything smaller than this has to be loaded
michael@0 514 * and stored in a special way. Loading and storing of pixel data is
michael@0 515 * performed in such a way that we fill some 'slots' in the NEON
michael@0 516 * registers (some slots naturally are unused), then perform compositing
michael@0 517 * operation as usual. In the end, the data is taken from these 'slots'
michael@0 518 * and saved to memory.
michael@0 519 *
michael@0 520 * cache_preload_flag - allows to suppress prefetch if
michael@0 521 * set to 0
michael@0 522 * dst_aligned_flag - selects whether destination buffer
michael@0 523 * is aligned
michael@0 524 */
michael@0 525 .macro process_trailing_pixels cache_preload_flag, \
michael@0 526 dst_aligned_flag, \
michael@0 527 process_pixblock_head, \
michael@0 528 process_pixblock_tail, \
michael@0 529 process_pixblock_tail_head
michael@0 530 tst W, #(pixblock_size - 1)
michael@0 531 beq 2f
michael@0 532 .irp chunk_size, 16, 8, 4, 2, 1
michael@0 533 .if pixblock_size > chunk_size
michael@0 534 tst W, #chunk_size
michael@0 535 beq 1f
michael@0 536 pixld_src chunk_size, src_bpp, src_basereg, SRC
michael@0 537 pixld chunk_size, mask_bpp, mask_basereg, MASK
michael@0 538 .if dst_aligned_flag != 0
michael@0 539 pixld_a chunk_size, dst_r_bpp, dst_r_basereg, DST_R
michael@0 540 .else
michael@0 541 pixld chunk_size, dst_r_bpp, dst_r_basereg, DST_R
michael@0 542 .endif
michael@0 543 .if cache_preload_flag != 0
michael@0 544 PF add PF_X, PF_X, #chunk_size
michael@0 545 .endif
michael@0 546 1:
michael@0 547 .endif
michael@0 548 .endr
michael@0 549 pixdeinterleave src_bpp, src_basereg
michael@0 550 pixdeinterleave mask_bpp, mask_basereg
michael@0 551 pixdeinterleave dst_r_bpp, dst_r_basereg
michael@0 552
michael@0 553 process_pixblock_head
michael@0 554 .if cache_preload_flag != 0
michael@0 555 cache_preload 0, pixblock_size
michael@0 556 cache_preload_simple
michael@0 557 .endif
michael@0 558 process_pixblock_tail
michael@0 559 pixinterleave dst_w_bpp, dst_w_basereg
michael@0 560 .irp chunk_size, 16, 8, 4, 2, 1
michael@0 561 .if pixblock_size > chunk_size
michael@0 562 tst W, #chunk_size
michael@0 563 beq 1f
michael@0 564 .if dst_aligned_flag != 0
michael@0 565 pixst_a chunk_size, dst_w_bpp, dst_w_basereg, DST_W
michael@0 566 .else
michael@0 567 pixst chunk_size, dst_w_bpp, dst_w_basereg, DST_W
michael@0 568 .endif
michael@0 569 1:
michael@0 570 .endif
michael@0 571 .endr
michael@0 572 2:
michael@0 573 .endm
michael@0 574
michael@0 575 /*
michael@0 576 * Macro, which performs all the needed operations to switch to the next
michael@0 577 * scanline and start the next loop iteration unless all the scanlines
michael@0 578 * are already processed.
michael@0 579 */
michael@0 580 .macro advance_to_next_scanline start_of_loop_label
michael@0 581 .if regs_shortage
michael@0 582 ldrd W, [sp] /* load W and H (width and height) from stack */
michael@0 583 .else
michael@0 584 mov W, ORIG_W
michael@0 585 .endif
michael@0 586 add DST_W, DST_W, DST_STRIDE, lsl #dst_bpp_shift
michael@0 587 .if src_bpp != 0
michael@0 588 add SRC, SRC, SRC_STRIDE, lsl #src_bpp_shift
michael@0 589 .endif
michael@0 590 .if mask_bpp != 0
michael@0 591 add MASK, MASK, MASK_STRIDE, lsl #mask_bpp_shift
michael@0 592 .endif
michael@0 593 .if (dst_w_bpp != 24)
michael@0 594 sub DST_W, DST_W, W, lsl #dst_bpp_shift
michael@0 595 .endif
michael@0 596 .if (src_bpp != 24) && (src_bpp != 0)
michael@0 597 sub SRC, SRC, W, lsl #src_bpp_shift
michael@0 598 .endif
michael@0 599 .if (mask_bpp != 24) && (mask_bpp != 0)
michael@0 600 sub MASK, MASK, W, lsl #mask_bpp_shift
michael@0 601 .endif
michael@0 602 subs H, H, #1
michael@0 603 mov DST_R, DST_W
michael@0 604 .if regs_shortage
michael@0 605 str H, [sp, #4] /* save updated height to stack */
michael@0 606 .endif
michael@0 607 bge start_of_loop_label
michael@0 608 .endm
michael@0 609
michael@0 610 /*
michael@0 611 * Registers are allocated in the following way by default:
michael@0 612 * d0, d1, d2, d3 - reserved for loading source pixel data
michael@0 613 * d4, d5, d6, d7 - reserved for loading destination pixel data
michael@0 614 * d24, d25, d26, d27 - reserved for loading mask pixel data
michael@0 615 * d28, d29, d30, d31 - final destination pixel data for writeback to memory
michael@0 616 */
michael@0 617 .macro generate_composite_function fname, \
michael@0 618 src_bpp_, \
michael@0 619 mask_bpp_, \
michael@0 620 dst_w_bpp_, \
michael@0 621 flags, \
michael@0 622 pixblock_size_, \
michael@0 623 prefetch_distance, \
michael@0 624 init, \
michael@0 625 cleanup, \
michael@0 626 process_pixblock_head, \
michael@0 627 process_pixblock_tail, \
michael@0 628 process_pixblock_tail_head, \
michael@0 629 dst_w_basereg_ = 28, \
michael@0 630 dst_r_basereg_ = 4, \
michael@0 631 src_basereg_ = 0, \
michael@0 632 mask_basereg_ = 24
michael@0 633
michael@0 634 .func fname
michael@0 635 .global fname
michael@0 636 /* For ELF format also set function visibility to hidden */
michael@0 637 #ifdef __ELF__
michael@0 638 .hidden fname
michael@0 639 .type fname, %function
michael@0 640 #endif
michael@0 641 fname:
michael@0 642 .fnstart
michael@0 643 .save {r4-r12, lr}
michael@0 644 push {r4-r12, lr} /* save all registers */
michael@0 645
michael@0 646 /*
michael@0 647 * Select prefetch type for this function. If prefetch distance is
michael@0 648 * set to 0 or one of the color formats is 24bpp, SIMPLE prefetch
michael@0 649 * has to be used instead of ADVANCED.
michael@0 650 */
michael@0 651 .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_DEFAULT
michael@0 652 .if prefetch_distance == 0
michael@0 653 .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_NONE
michael@0 654 .elseif (PREFETCH_TYPE_CURRENT > PREFETCH_TYPE_SIMPLE) && \
michael@0 655 ((src_bpp_ == 24) || (mask_bpp_ == 24) || (dst_w_bpp_ == 24))
michael@0 656 .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_SIMPLE
michael@0 657 .endif
michael@0 658
michael@0 659 /*
michael@0 660 * Make some macro arguments globally visible and accessible
michael@0 661 * from other macros
michael@0 662 */
michael@0 663 .set src_bpp, src_bpp_
michael@0 664 .set mask_bpp, mask_bpp_
michael@0 665 .set dst_w_bpp, dst_w_bpp_
michael@0 666 .set pixblock_size, pixblock_size_
michael@0 667 .set dst_w_basereg, dst_w_basereg_
michael@0 668 .set dst_r_basereg, dst_r_basereg_
michael@0 669 .set src_basereg, src_basereg_
michael@0 670 .set mask_basereg, mask_basereg_
michael@0 671
michael@0 672 .macro pixld_src x:vararg
michael@0 673 pixld x
michael@0 674 .endm
michael@0 675 .macro fetch_src_pixblock
michael@0 676 pixld_src pixblock_size, src_bpp, \
michael@0 677 (src_basereg - pixblock_size * src_bpp / 64), SRC
michael@0 678 .endm
michael@0 679 /*
michael@0 680 * Assign symbolic names to registers
michael@0 681 */
michael@0 682 W .req r0 /* width (is updated during processing) */
michael@0 683 H .req r1 /* height (is updated during processing) */
michael@0 684 DST_W .req r2 /* destination buffer pointer for writes */
michael@0 685 DST_STRIDE .req r3 /* destination image stride */
michael@0 686 SRC .req r4 /* source buffer pointer */
michael@0 687 SRC_STRIDE .req r5 /* source image stride */
michael@0 688 DST_R .req r6 /* destination buffer pointer for reads */
michael@0 689
michael@0 690 MASK .req r7 /* mask pointer */
michael@0 691 MASK_STRIDE .req r8 /* mask stride */
michael@0 692
michael@0 693 PF_CTL .req r9 /* combined lines counter and prefetch */
michael@0 694 /* distance increment counter */
michael@0 695 PF_X .req r10 /* pixel index in a scanline for current */
michael@0 696 /* pretetch position */
michael@0 697 PF_SRC .req r11 /* pointer to source scanline start */
michael@0 698 /* for prefetch purposes */
michael@0 699 PF_DST .req r12 /* pointer to destination scanline start */
michael@0 700 /* for prefetch purposes */
michael@0 701 PF_MASK .req r14 /* pointer to mask scanline start */
michael@0 702 /* for prefetch purposes */
michael@0 703 /*
michael@0 704 * Check whether we have enough registers for all the local variables.
michael@0 705 * If we don't have enough registers, original width and height are
michael@0 706 * kept on top of stack (and 'regs_shortage' variable is set to indicate
michael@0 707 * this for the rest of code). Even if there are enough registers, the
michael@0 708 * allocation scheme may be a bit different depending on whether source
michael@0 709 * or mask is not used.
michael@0 710 */
michael@0 711 .if (PREFETCH_TYPE_CURRENT < PREFETCH_TYPE_ADVANCED)
michael@0 712 ORIG_W .req r10 /* saved original width */
michael@0 713 DUMMY .req r12 /* temporary register */
michael@0 714 .set regs_shortage, 0
michael@0 715 .elseif mask_bpp == 0
michael@0 716 ORIG_W .req r7 /* saved original width */
michael@0 717 DUMMY .req r8 /* temporary register */
michael@0 718 .set regs_shortage, 0
michael@0 719 .elseif src_bpp == 0
michael@0 720 ORIG_W .req r4 /* saved original width */
michael@0 721 DUMMY .req r5 /* temporary register */
michael@0 722 .set regs_shortage, 0
michael@0 723 .else
michael@0 724 ORIG_W .req r1 /* saved original width */
michael@0 725 DUMMY .req r1 /* temporary register */
michael@0 726 .set regs_shortage, 1
michael@0 727 .endif
michael@0 728
michael@0 729 .set mask_bpp_shift, -1
michael@0 730 .if src_bpp == 32
michael@0 731 .set src_bpp_shift, 2
michael@0 732 .elseif src_bpp == 24
michael@0 733 .set src_bpp_shift, 0
michael@0 734 .elseif src_bpp == 16
michael@0 735 .set src_bpp_shift, 1
michael@0 736 .elseif src_bpp == 8
michael@0 737 .set src_bpp_shift, 0
michael@0 738 .elseif src_bpp == 0
michael@0 739 .set src_bpp_shift, -1
michael@0 740 .else
michael@0 741 .error "requested src bpp (src_bpp) is not supported"
michael@0 742 .endif
michael@0 743 .if mask_bpp == 32
michael@0 744 .set mask_bpp_shift, 2
michael@0 745 .elseif mask_bpp == 24
michael@0 746 .set mask_bpp_shift, 0
michael@0 747 .elseif mask_bpp == 8
michael@0 748 .set mask_bpp_shift, 0
michael@0 749 .elseif mask_bpp == 0
michael@0 750 .set mask_bpp_shift, -1
michael@0 751 .else
michael@0 752 .error "requested mask bpp (mask_bpp) is not supported"
michael@0 753 .endif
michael@0 754 .if dst_w_bpp == 32
michael@0 755 .set dst_bpp_shift, 2
michael@0 756 .elseif dst_w_bpp == 24
michael@0 757 .set dst_bpp_shift, 0
michael@0 758 .elseif dst_w_bpp == 16
michael@0 759 .set dst_bpp_shift, 1
michael@0 760 .elseif dst_w_bpp == 8
michael@0 761 .set dst_bpp_shift, 0
michael@0 762 .else
michael@0 763 .error "requested dst bpp (dst_w_bpp) is not supported"
michael@0 764 .endif
michael@0 765
michael@0 766 .if (((flags) & FLAG_DST_READWRITE) != 0)
michael@0 767 .set dst_r_bpp, dst_w_bpp
michael@0 768 .else
michael@0 769 .set dst_r_bpp, 0
michael@0 770 .endif
michael@0 771 .if (((flags) & FLAG_DEINTERLEAVE_32BPP) != 0)
michael@0 772 .set DEINTERLEAVE_32BPP_ENABLED, 1
michael@0 773 .else
michael@0 774 .set DEINTERLEAVE_32BPP_ENABLED, 0
michael@0 775 .endif
michael@0 776
michael@0 777 .if prefetch_distance < 0 || prefetch_distance > 15
michael@0 778 .error "invalid prefetch distance (prefetch_distance)"
michael@0 779 .endif
michael@0 780
michael@0 781 .if src_bpp > 0
michael@0 782 ldr SRC, [sp, #40]
michael@0 783 .endif
michael@0 784 .if mask_bpp > 0
michael@0 785 ldr MASK, [sp, #48]
michael@0 786 .endif
michael@0 787 PF mov PF_X, #0
michael@0 788 .if src_bpp > 0
michael@0 789 ldr SRC_STRIDE, [sp, #44]
michael@0 790 .endif
michael@0 791 .if mask_bpp > 0
michael@0 792 ldr MASK_STRIDE, [sp, #52]
michael@0 793 .endif
michael@0 794 mov DST_R, DST_W
michael@0 795
michael@0 796 .if src_bpp == 24
michael@0 797 sub SRC_STRIDE, SRC_STRIDE, W
michael@0 798 sub SRC_STRIDE, SRC_STRIDE, W, lsl #1
michael@0 799 .endif
michael@0 800 .if mask_bpp == 24
michael@0 801 sub MASK_STRIDE, MASK_STRIDE, W
michael@0 802 sub MASK_STRIDE, MASK_STRIDE, W, lsl #1
michael@0 803 .endif
michael@0 804 .if dst_w_bpp == 24
michael@0 805 sub DST_STRIDE, DST_STRIDE, W
michael@0 806 sub DST_STRIDE, DST_STRIDE, W, lsl #1
michael@0 807 .endif
michael@0 808
michael@0 809 /*
michael@0 810 * Setup advanced prefetcher initial state
michael@0 811 */
michael@0 812 PF mov PF_SRC, SRC
michael@0 813 PF mov PF_DST, DST_R
michael@0 814 PF mov PF_MASK, MASK
michael@0 815 /* PF_CTL = prefetch_distance | ((h - 1) << 4) */
michael@0 816 PF mov PF_CTL, H, lsl #4
michael@0 817 PF add PF_CTL, #(prefetch_distance - 0x10)
michael@0 818
michael@0 819 init
michael@0 820 .if regs_shortage
michael@0 821 .save {r0, r1}
michael@0 822 push {r0, r1}
michael@0 823 .endif
michael@0 824 subs H, H, #1
michael@0 825 .if regs_shortage
michael@0 826 str H, [sp, #4] /* save updated height to stack */
michael@0 827 .else
michael@0 828 mov ORIG_W, W
michael@0 829 .endif
michael@0 830 blt 9f
michael@0 831 cmp W, #(pixblock_size * 2)
michael@0 832 blt 8f
michael@0 833 /*
michael@0 834 * This is the start of the pipelined loop, which if optimized for
michael@0 835 * long scanlines
michael@0 836 */
michael@0 837 0:
michael@0 838 ensure_destination_ptr_alignment process_pixblock_head, \
michael@0 839 process_pixblock_tail, \
michael@0 840 process_pixblock_tail_head
michael@0 841
michael@0 842 /* Implement "head (tail_head) ... (tail_head) tail" loop pattern */
michael@0 843 pixld_a pixblock_size, dst_r_bpp, \
michael@0 844 (dst_r_basereg - pixblock_size * dst_r_bpp / 64), DST_R
michael@0 845 fetch_src_pixblock
michael@0 846 pixld pixblock_size, mask_bpp, \
michael@0 847 (mask_basereg - pixblock_size * mask_bpp / 64), MASK
michael@0 848 PF add PF_X, PF_X, #pixblock_size
michael@0 849 process_pixblock_head
michael@0 850 cache_preload 0, pixblock_size
michael@0 851 cache_preload_simple
michael@0 852 subs W, W, #(pixblock_size * 2)
michael@0 853 blt 2f
michael@0 854 1:
michael@0 855 process_pixblock_tail_head
michael@0 856 cache_preload_simple
michael@0 857 subs W, W, #pixblock_size
michael@0 858 bge 1b
michael@0 859 2:
michael@0 860 process_pixblock_tail
michael@0 861 pixst_a pixblock_size, dst_w_bpp, \
michael@0 862 (dst_w_basereg - pixblock_size * dst_w_bpp / 64), DST_W
michael@0 863
michael@0 864 /* Process the remaining trailing pixels in the scanline */
michael@0 865 process_trailing_pixels 1, 1, \
michael@0 866 process_pixblock_head, \
michael@0 867 process_pixblock_tail, \
michael@0 868 process_pixblock_tail_head
michael@0 869 advance_to_next_scanline 0b
michael@0 870
michael@0 871 .if regs_shortage
michael@0 872 pop {r0, r1}
michael@0 873 .endif
michael@0 874 cleanup
michael@0 875 pop {r4-r12, pc} /* exit */
michael@0 876 /*
michael@0 877 * This is the start of the loop, designed to process images with small width
michael@0 878 * (less than pixblock_size * 2 pixels). In this case neither pipelining
michael@0 879 * nor prefetch are used.
michael@0 880 */
michael@0 881 8:
michael@0 882 /* Process exactly pixblock_size pixels if needed */
michael@0 883 tst W, #pixblock_size
michael@0 884 beq 1f
michael@0 885 pixld pixblock_size, dst_r_bpp, \
michael@0 886 (dst_r_basereg - pixblock_size * dst_r_bpp / 64), DST_R
michael@0 887 fetch_src_pixblock
michael@0 888 pixld pixblock_size, mask_bpp, \
michael@0 889 (mask_basereg - pixblock_size * mask_bpp / 64), MASK
michael@0 890 process_pixblock_head
michael@0 891 process_pixblock_tail
michael@0 892 pixst pixblock_size, dst_w_bpp, \
michael@0 893 (dst_w_basereg - pixblock_size * dst_w_bpp / 64), DST_W
michael@0 894 1:
michael@0 895 /* Process the remaining trailing pixels in the scanline */
michael@0 896 process_trailing_pixels 0, 0, \
michael@0 897 process_pixblock_head, \
michael@0 898 process_pixblock_tail, \
michael@0 899 process_pixblock_tail_head
michael@0 900 advance_to_next_scanline 8b
michael@0 901 9:
michael@0 902 .if regs_shortage
michael@0 903 pop {r0, r1}
michael@0 904 .endif
michael@0 905 cleanup
michael@0 906 pop {r4-r12, pc} /* exit */
michael@0 907 .fnend
michael@0 908
michael@0 909 .purgem fetch_src_pixblock
michael@0 910 .purgem pixld_src
michael@0 911
michael@0 912 .unreq SRC
michael@0 913 .unreq MASK
michael@0 914 .unreq DST_R
michael@0 915 .unreq DST_W
michael@0 916 .unreq ORIG_W
michael@0 917 .unreq W
michael@0 918 .unreq H
michael@0 919 .unreq SRC_STRIDE
michael@0 920 .unreq DST_STRIDE
michael@0 921 .unreq MASK_STRIDE
michael@0 922 .unreq PF_CTL
michael@0 923 .unreq PF_X
michael@0 924 .unreq PF_SRC
michael@0 925 .unreq PF_DST
michael@0 926 .unreq PF_MASK
michael@0 927 .unreq DUMMY
michael@0 928 .endfunc
michael@0 929 .endm
michael@0 930
michael@0 931 /*
michael@0 932 * A simplified variant of function generation template for a single
michael@0 933 * scanline processing (for implementing pixman combine functions)
michael@0 934 */
michael@0 935 .macro generate_composite_function_scanline use_nearest_scaling, \
michael@0 936 fname, \
michael@0 937 src_bpp_, \
michael@0 938 mask_bpp_, \
michael@0 939 dst_w_bpp_, \
michael@0 940 flags, \
michael@0 941 pixblock_size_, \
michael@0 942 init, \
michael@0 943 cleanup, \
michael@0 944 process_pixblock_head, \
michael@0 945 process_pixblock_tail, \
michael@0 946 process_pixblock_tail_head, \
michael@0 947 dst_w_basereg_ = 28, \
michael@0 948 dst_r_basereg_ = 4, \
michael@0 949 src_basereg_ = 0, \
michael@0 950 mask_basereg_ = 24
michael@0 951
michael@0 952 .func fname
michael@0 953 .global fname
michael@0 954 /* For ELF format also set function visibility to hidden */
michael@0 955 #ifdef __ELF__
michael@0 956 .hidden fname
michael@0 957 .type fname, %function
michael@0 958 #endif
michael@0 959 fname:
michael@0 960 .fnstart
michael@0 961 .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_NONE
michael@0 962 /*
michael@0 963 * Make some macro arguments globally visible and accessible
michael@0 964 * from other macros
michael@0 965 */
michael@0 966 .set src_bpp, src_bpp_
michael@0 967 .set mask_bpp, mask_bpp_
michael@0 968 .set dst_w_bpp, dst_w_bpp_
michael@0 969 .set pixblock_size, pixblock_size_
michael@0 970 .set dst_w_basereg, dst_w_basereg_
michael@0 971 .set dst_r_basereg, dst_r_basereg_
michael@0 972 .set src_basereg, src_basereg_
michael@0 973 .set mask_basereg, mask_basereg_
michael@0 974
michael@0 975 .if use_nearest_scaling != 0
michael@0 976 /*
michael@0 977 * Assign symbolic names to registers for nearest scaling
michael@0 978 */
michael@0 979 W .req r0
michael@0 980 DST_W .req r1
michael@0 981 SRC .req r2
michael@0 982 VX .req r3
michael@0 983 UNIT_X .req ip
michael@0 984 MASK .req lr
michael@0 985 TMP1 .req r4
michael@0 986 TMP2 .req r5
michael@0 987 DST_R .req r6
michael@0 988 SRC_WIDTH_FIXED .req r7
michael@0 989
michael@0 990 .macro pixld_src x:vararg
michael@0 991 pixld_s x
michael@0 992 .endm
michael@0 993
michael@0 994 ldr UNIT_X, [sp]
michael@0 995 .save {r4-r8, lr}
michael@0 996 push {r4-r8, lr}
michael@0 997 ldr SRC_WIDTH_FIXED, [sp, #(24 + 4)]
michael@0 998 .if mask_bpp != 0
michael@0 999 ldr MASK, [sp, #(24 + 8)]
michael@0 1000 .endif
michael@0 1001 .else
michael@0 1002 /*
michael@0 1003 * Assign symbolic names to registers
michael@0 1004 */
michael@0 1005 W .req r0 /* width (is updated during processing) */
michael@0 1006 DST_W .req r1 /* destination buffer pointer for writes */
michael@0 1007 SRC .req r2 /* source buffer pointer */
michael@0 1008 DST_R .req ip /* destination buffer pointer for reads */
michael@0 1009 MASK .req r3 /* mask pointer */
michael@0 1010
michael@0 1011 .macro pixld_src x:vararg
michael@0 1012 pixld x
michael@0 1013 .endm
michael@0 1014 .endif
michael@0 1015
michael@0 1016 .if (((flags) & FLAG_DST_READWRITE) != 0)
michael@0 1017 .set dst_r_bpp, dst_w_bpp
michael@0 1018 .else
michael@0 1019 .set dst_r_bpp, 0
michael@0 1020 .endif
michael@0 1021 .if (((flags) & FLAG_DEINTERLEAVE_32BPP) != 0)
michael@0 1022 .set DEINTERLEAVE_32BPP_ENABLED, 1
michael@0 1023 .else
michael@0 1024 .set DEINTERLEAVE_32BPP_ENABLED, 0
michael@0 1025 .endif
michael@0 1026
michael@0 1027 .macro fetch_src_pixblock
michael@0 1028 pixld_src pixblock_size, src_bpp, \
michael@0 1029 (src_basereg - pixblock_size * src_bpp / 64), SRC
michael@0 1030 .endm
michael@0 1031
michael@0 1032 init
michael@0 1033 mov DST_R, DST_W
michael@0 1034
michael@0 1035 cmp W, #pixblock_size
michael@0 1036 blt 8f
michael@0 1037
michael@0 1038 ensure_destination_ptr_alignment process_pixblock_head, \
michael@0 1039 process_pixblock_tail, \
michael@0 1040 process_pixblock_tail_head
michael@0 1041
michael@0 1042 subs W, W, #pixblock_size
michael@0 1043 blt 7f
michael@0 1044
michael@0 1045 /* Implement "head (tail_head) ... (tail_head) tail" loop pattern */
michael@0 1046 pixld_a pixblock_size, dst_r_bpp, \
michael@0 1047 (dst_r_basereg - pixblock_size * dst_r_bpp / 64), DST_R
michael@0 1048 fetch_src_pixblock
michael@0 1049 pixld pixblock_size, mask_bpp, \
michael@0 1050 (mask_basereg - pixblock_size * mask_bpp / 64), MASK
michael@0 1051 process_pixblock_head
michael@0 1052 subs W, W, #pixblock_size
michael@0 1053 blt 2f
michael@0 1054 1:
michael@0 1055 process_pixblock_tail_head
michael@0 1056 subs W, W, #pixblock_size
michael@0 1057 bge 1b
michael@0 1058 2:
michael@0 1059 process_pixblock_tail
michael@0 1060 pixst_a pixblock_size, dst_w_bpp, \
michael@0 1061 (dst_w_basereg - pixblock_size * dst_w_bpp / 64), DST_W
michael@0 1062 7:
michael@0 1063 /* Process the remaining trailing pixels in the scanline (dst aligned) */
michael@0 1064 process_trailing_pixels 0, 1, \
michael@0 1065 process_pixblock_head, \
michael@0 1066 process_pixblock_tail, \
michael@0 1067 process_pixblock_tail_head
michael@0 1068
michael@0 1069 cleanup
michael@0 1070 .if use_nearest_scaling != 0
michael@0 1071 pop {r4-r8, pc} /* exit */
michael@0 1072 .else
michael@0 1073 bx lr /* exit */
michael@0 1074 .endif
michael@0 1075 8:
michael@0 1076 /* Process the remaining trailing pixels in the scanline (dst unaligned) */
michael@0 1077 process_trailing_pixels 0, 0, \
michael@0 1078 process_pixblock_head, \
michael@0 1079 process_pixblock_tail, \
michael@0 1080 process_pixblock_tail_head
michael@0 1081
michael@0 1082 cleanup
michael@0 1083
michael@0 1084 .if use_nearest_scaling != 0
michael@0 1085 pop {r4-r8, pc} /* exit */
michael@0 1086
michael@0 1087 .unreq DST_R
michael@0 1088 .unreq SRC
michael@0 1089 .unreq W
michael@0 1090 .unreq VX
michael@0 1091 .unreq UNIT_X
michael@0 1092 .unreq TMP1
michael@0 1093 .unreq TMP2
michael@0 1094 .unreq DST_W
michael@0 1095 .unreq MASK
michael@0 1096 .unreq SRC_WIDTH_FIXED
michael@0 1097
michael@0 1098 .else
michael@0 1099 bx lr /* exit */
michael@0 1100
michael@0 1101 .unreq SRC
michael@0 1102 .unreq MASK
michael@0 1103 .unreq DST_R
michael@0 1104 .unreq DST_W
michael@0 1105 .unreq W
michael@0 1106 .endif
michael@0 1107
michael@0 1108 .purgem fetch_src_pixblock
michael@0 1109 .purgem pixld_src
michael@0 1110
michael@0 1111 .fnend
michael@0 1112 .endfunc
michael@0 1113 .endm
michael@0 1114
michael@0 1115 .macro generate_composite_function_single_scanline x:vararg
michael@0 1116 generate_composite_function_scanline 0, x
michael@0 1117 .endm
michael@0 1118
michael@0 1119 .macro generate_composite_function_nearest_scanline x:vararg
michael@0 1120 generate_composite_function_scanline 1, x
michael@0 1121 .endm
michael@0 1122
michael@0 1123 /* Default prologue/epilogue, nothing special needs to be done */
michael@0 1124
michael@0 1125 .macro default_init
michael@0 1126 .endm
michael@0 1127
michael@0 1128 .macro default_cleanup
michael@0 1129 .endm
michael@0 1130
michael@0 1131 /*
michael@0 1132 * Prologue/epilogue variant which additionally saves/restores d8-d15
michael@0 1133 * registers (they need to be saved/restored by callee according to ABI).
michael@0 1134 * This is required if the code needs to use all the NEON registers.
michael@0 1135 */
michael@0 1136
michael@0 1137 .macro default_init_need_all_regs
michael@0 1138 .vsave {d8-d15}
michael@0 1139 vpush {d8-d15}
michael@0 1140 .endm
michael@0 1141
michael@0 1142 .macro default_cleanup_need_all_regs
michael@0 1143 vpop {d8-d15}
michael@0 1144 .endm
michael@0 1145
michael@0 1146 /******************************************************************************/
michael@0 1147
michael@0 1148 /*
michael@0 1149 * Conversion of 8 r5g6b6 pixels packed in 128-bit register (in)
michael@0 1150 * into a planar a8r8g8b8 format (with a, r, g, b color components
michael@0 1151 * stored into 64-bit registers out_a, out_r, out_g, out_b respectively).
michael@0 1152 *
michael@0 1153 * Warning: the conversion is destructive and the original
michael@0 1154 * value (in) is lost.
michael@0 1155 */
michael@0 1156 .macro convert_0565_to_8888 in, out_a, out_r, out_g, out_b
michael@0 1157 vshrn.u16 out_r, in, #8
michael@0 1158 vshrn.u16 out_g, in, #3
michael@0 1159 vsli.u16 in, in, #5
michael@0 1160 vmov.u8 out_a, #255
michael@0 1161 vsri.u8 out_r, out_r, #5
michael@0 1162 vsri.u8 out_g, out_g, #6
michael@0 1163 vshrn.u16 out_b, in, #2
michael@0 1164 .endm
michael@0 1165
michael@0 1166 .macro convert_0565_to_x888 in, out_r, out_g, out_b
michael@0 1167 vshrn.u16 out_r, in, #8
michael@0 1168 vshrn.u16 out_g, in, #3
michael@0 1169 vsli.u16 in, in, #5
michael@0 1170 vsri.u8 out_r, out_r, #5
michael@0 1171 vsri.u8 out_g, out_g, #6
michael@0 1172 vshrn.u16 out_b, in, #2
michael@0 1173 .endm
michael@0 1174
michael@0 1175 /*
michael@0 1176 * Conversion from planar a8r8g8b8 format (with a, r, g, b color components
michael@0 1177 * in 64-bit registers in_a, in_r, in_g, in_b respectively) into 8 r5g6b6
michael@0 1178 * pixels packed in 128-bit register (out). Requires two temporary 128-bit
michael@0 1179 * registers (tmp1, tmp2)
michael@0 1180 */
michael@0 1181 .macro convert_8888_to_0565 in_r, in_g, in_b, out, tmp1, tmp2
michael@0 1182 vshll.u8 tmp1, in_g, #8
michael@0 1183 vshll.u8 out, in_r, #8
michael@0 1184 vshll.u8 tmp2, in_b, #8
michael@0 1185 vsri.u16 out, tmp1, #5
michael@0 1186 vsri.u16 out, tmp2, #11
michael@0 1187 .endm
michael@0 1188
michael@0 1189 /*
michael@0 1190 * Conversion of four r5g6b5 pixels (in) to four x8r8g8b8 pixels
michael@0 1191 * returned in (out0, out1) registers pair. Requires one temporary
michael@0 1192 * 64-bit register (tmp). 'out1' and 'in' may overlap, the original
michael@0 1193 * value from 'in' is lost
michael@0 1194 */
michael@0 1195 .macro convert_four_0565_to_x888_packed in, out0, out1, tmp
michael@0 1196 vshl.u16 out0, in, #5 /* G top 6 bits */
michael@0 1197 vshl.u16 tmp, in, #11 /* B top 5 bits */
michael@0 1198 vsri.u16 in, in, #5 /* R is ready in top bits */
michael@0 1199 vsri.u16 out0, out0, #6 /* G is ready in top bits */
michael@0 1200 vsri.u16 tmp, tmp, #5 /* B is ready in top bits */
michael@0 1201 vshr.u16 out1, in, #8 /* R is in place */
michael@0 1202 vsri.u16 out0, tmp, #8 /* G & B is in place */
michael@0 1203 vzip.u16 out0, out1 /* everything is in place */
michael@0 1204 .endm

mercurial