gfx/cairo/libpixman/src/pixman-mips-dspr2-asm.h

Wed, 31 Dec 2014 06:09:35 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:09:35 +0100
changeset 0
6474c204b198
permissions
-rw-r--r--

Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.

michael@0 1 /*
michael@0 2 * Copyright (c) 2012
michael@0 3 * MIPS Technologies, Inc., California.
michael@0 4 *
michael@0 5 * Redistribution and use in source and binary forms, with or without
michael@0 6 * modification, are permitted provided that the following conditions
michael@0 7 * are met:
michael@0 8 * 1. Redistributions of source code must retain the above copyright
michael@0 9 * notice, this list of conditions and the following disclaimer.
michael@0 10 * 2. Redistributions in binary form must reproduce the above copyright
michael@0 11 * notice, this list of conditions and the following disclaimer in the
michael@0 12 * documentation and/or other materials provided with the distribution.
michael@0 13 * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
michael@0 14 * contributors may be used to endorse or promote products derived from
michael@0 15 * this software without specific prior written permission.
michael@0 16 *
michael@0 17 * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
michael@0 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
michael@0 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
michael@0 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
michael@0 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
michael@0 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
michael@0 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
michael@0 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
michael@0 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
michael@0 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
michael@0 27 * SUCH DAMAGE.
michael@0 28 *
michael@0 29 * Author: Nemanja Lukic (nlukic@mips.com)
michael@0 30 */
michael@0 31
michael@0 32 #ifndef PIXMAN_MIPS_DSPR2_ASM_H
michael@0 33 #define PIXMAN_MIPS_DSPR2_ASM_H
michael@0 34
michael@0 35 #define zero $0
michael@0 36 #define AT $1
michael@0 37 #define v0 $2
michael@0 38 #define v1 $3
michael@0 39 #define a0 $4
michael@0 40 #define a1 $5
michael@0 41 #define a2 $6
michael@0 42 #define a3 $7
michael@0 43 #define t0 $8
michael@0 44 #define t1 $9
michael@0 45 #define t2 $10
michael@0 46 #define t3 $11
michael@0 47 #define t4 $12
michael@0 48 #define t5 $13
michael@0 49 #define t6 $14
michael@0 50 #define t7 $15
michael@0 51 #define s0 $16
michael@0 52 #define s1 $17
michael@0 53 #define s2 $18
michael@0 54 #define s3 $19
michael@0 55 #define s4 $20
michael@0 56 #define s5 $21
michael@0 57 #define s6 $22
michael@0 58 #define s7 $23
michael@0 59 #define t8 $24
michael@0 60 #define t9 $25
michael@0 61 #define k0 $26
michael@0 62 #define k1 $27
michael@0 63 #define gp $28
michael@0 64 #define sp $29
michael@0 65 #define fp $30
michael@0 66 #define s8 $30
michael@0 67 #define ra $31
michael@0 68
michael@0 69 /*
michael@0 70 * LEAF_MIPS32R2 - declare leaf routine for MIPS32r2
michael@0 71 */
michael@0 72 #define LEAF_MIPS32R2(symbol) \
michael@0 73 .globl symbol; \
michael@0 74 .align 2; \
michael@0 75 .type symbol, @function; \
michael@0 76 .ent symbol, 0; \
michael@0 77 symbol: .frame sp, 0, ra; \
michael@0 78 .set push; \
michael@0 79 .set arch=mips32r2; \
michael@0 80 .set noreorder; \
michael@0 81 .set noat;
michael@0 82
michael@0 83 /*
michael@0 84 * LEAF_MIPS32R2 - declare leaf routine for MIPS DSPr2
michael@0 85 */
michael@0 86 #define LEAF_MIPS_DSPR2(symbol) \
michael@0 87 LEAF_MIPS32R2(symbol) \
michael@0 88 .set dspr2;
michael@0 89
michael@0 90 /*
michael@0 91 * END - mark end of function
michael@0 92 */
michael@0 93 #define END(function) \
michael@0 94 .set pop; \
michael@0 95 .end function; \
michael@0 96 .size function,.-function
michael@0 97
michael@0 98 /*
michael@0 99 * Checks if stack offset is big enough for storing/restoring regs_num
michael@0 100 * number of register to/from stack. Stack offset must be greater than
michael@0 101 * or equal to the number of bytes needed for storing registers (regs_num*4).
michael@0 102 * Since MIPS ABI allows usage of first 16 bytes of stack frame (this is
michael@0 103 * preserved for input arguments of the functions, already stored in a0-a3),
michael@0 104 * stack size can be further optimized by utilizing this space.
michael@0 105 */
michael@0 106 .macro CHECK_STACK_OFFSET regs_num, stack_offset
michael@0 107 .if \stack_offset < \regs_num * 4 - 16
michael@0 108 .error "Stack offset too small."
michael@0 109 .endif
michael@0 110 .endm
michael@0 111
michael@0 112 /*
michael@0 113 * Saves set of registers on stack. Maximum number of registers that
michael@0 114 * can be saved on stack is limitted to 14 (a0-a3, v0-v1 and s0-s7).
michael@0 115 * Stack offset is number of bytes that are added to stack pointer (sp)
michael@0 116 * before registers are pushed in order to provide enough space on stack
michael@0 117 * (offset must be multiple of 4, and must be big enough, as described by
michael@0 118 * CHECK_STACK_OFFSET macro). This macro is intended to be used in
michael@0 119 * combination with RESTORE_REGS_FROM_STACK macro. Example:
michael@0 120 * SAVE_REGS_ON_STACK 4, v0, v1, s0, s1
michael@0 121 * RESTORE_REGS_FROM_STACK 4, v0, v1, s0, s1
michael@0 122 */
michael@0 123 .macro SAVE_REGS_ON_STACK stack_offset = 0, r1, \
michael@0 124 r2 = 0, r3 = 0, r4 = 0, \
michael@0 125 r5 = 0, r6 = 0, r7 = 0, \
michael@0 126 r8 = 0, r9 = 0, r10 = 0, \
michael@0 127 r11 = 0, r12 = 0, r13 = 0, \
michael@0 128 r14 = 0
michael@0 129 .if (\stack_offset < 0) || (\stack_offset - (\stack_offset / 4) * 4)
michael@0 130 .error "Stack offset must be pozitive and multiple of 4."
michael@0 131 .endif
michael@0 132 .if \stack_offset != 0
michael@0 133 addiu sp, sp, -\stack_offset
michael@0 134 .endif
michael@0 135 sw \r1, 0(sp)
michael@0 136 .if \r2 != 0
michael@0 137 sw \r2, 4(sp)
michael@0 138 .endif
michael@0 139 .if \r3 != 0
michael@0 140 sw \r3, 8(sp)
michael@0 141 .endif
michael@0 142 .if \r4 != 0
michael@0 143 sw \r4, 12(sp)
michael@0 144 .endif
michael@0 145 .if \r5 != 0
michael@0 146 CHECK_STACK_OFFSET 5, \stack_offset
michael@0 147 sw \r5, 16(sp)
michael@0 148 .endif
michael@0 149 .if \r6 != 0
michael@0 150 CHECK_STACK_OFFSET 6, \stack_offset
michael@0 151 sw \r6, 20(sp)
michael@0 152 .endif
michael@0 153 .if \r7 != 0
michael@0 154 CHECK_STACK_OFFSET 7, \stack_offset
michael@0 155 sw \r7, 24(sp)
michael@0 156 .endif
michael@0 157 .if \r8 != 0
michael@0 158 CHECK_STACK_OFFSET 8, \stack_offset
michael@0 159 sw \r8, 28(sp)
michael@0 160 .endif
michael@0 161 .if \r9 != 0
michael@0 162 CHECK_STACK_OFFSET 9, \stack_offset
michael@0 163 sw \r9, 32(sp)
michael@0 164 .endif
michael@0 165 .if \r10 != 0
michael@0 166 CHECK_STACK_OFFSET 10, \stack_offset
michael@0 167 sw \r10, 36(sp)
michael@0 168 .endif
michael@0 169 .if \r11 != 0
michael@0 170 CHECK_STACK_OFFSET 11, \stack_offset
michael@0 171 sw \r11, 40(sp)
michael@0 172 .endif
michael@0 173 .if \r12 != 0
michael@0 174 CHECK_STACK_OFFSET 12, \stack_offset
michael@0 175 sw \r12, 44(sp)
michael@0 176 .endif
michael@0 177 .if \r13 != 0
michael@0 178 CHECK_STACK_OFFSET 13, \stack_offset
michael@0 179 sw \r13, 48(sp)
michael@0 180 .endif
michael@0 181 .if \r14 != 0
michael@0 182 CHECK_STACK_OFFSET 14, \stack_offset
michael@0 183 sw \r14, 52(sp)
michael@0 184 .endif
michael@0 185 .endm
michael@0 186
michael@0 187 /*
michael@0 188 * Restores set of registers from stack. Maximum number of registers that
michael@0 189 * can be restored from stack is limitted to 14 (a0-a3, v0-v1 and s0-s7).
michael@0 190 * Stack offset is number of bytes that are added to stack pointer (sp)
michael@0 191 * after registers are restored (offset must be multiple of 4, and must
michael@0 192 * be big enough, as described by CHECK_STACK_OFFSET macro). This macro is
michael@0 193 * intended to be used in combination with RESTORE_REGS_FROM_STACK macro.
michael@0 194 * Example:
michael@0 195 * SAVE_REGS_ON_STACK 4, v0, v1, s0, s1
michael@0 196 * RESTORE_REGS_FROM_STACK 4, v0, v1, s0, s1
michael@0 197 */
michael@0 198 .macro RESTORE_REGS_FROM_STACK stack_offset = 0, r1, \
michael@0 199 r2 = 0, r3 = 0, r4 = 0, \
michael@0 200 r5 = 0, r6 = 0, r7 = 0, \
michael@0 201 r8 = 0, r9 = 0, r10 = 0, \
michael@0 202 r11 = 0, r12 = 0, r13 = 0, \
michael@0 203 r14 = 0
michael@0 204 .if (\stack_offset < 0) || (\stack_offset - (\stack_offset/4)*4)
michael@0 205 .error "Stack offset must be pozitive and multiple of 4."
michael@0 206 .endif
michael@0 207 lw \r1, 0(sp)
michael@0 208 .if \r2 != 0
michael@0 209 lw \r2, 4(sp)
michael@0 210 .endif
michael@0 211 .if \r3 != 0
michael@0 212 lw \r3, 8(sp)
michael@0 213 .endif
michael@0 214 .if \r4 != 0
michael@0 215 lw \r4, 12(sp)
michael@0 216 .endif
michael@0 217 .if \r5 != 0
michael@0 218 CHECK_STACK_OFFSET 5, \stack_offset
michael@0 219 lw \r5, 16(sp)
michael@0 220 .endif
michael@0 221 .if \r6 != 0
michael@0 222 CHECK_STACK_OFFSET 6, \stack_offset
michael@0 223 lw \r6, 20(sp)
michael@0 224 .endif
michael@0 225 .if \r7 != 0
michael@0 226 CHECK_STACK_OFFSET 7, \stack_offset
michael@0 227 lw \r7, 24(sp)
michael@0 228 .endif
michael@0 229 .if \r8 != 0
michael@0 230 CHECK_STACK_OFFSET 8, \stack_offset
michael@0 231 lw \r8, 28(sp)
michael@0 232 .endif
michael@0 233 .if \r9 != 0
michael@0 234 CHECK_STACK_OFFSET 9, \stack_offset
michael@0 235 lw \r9, 32(sp)
michael@0 236 .endif
michael@0 237 .if \r10 != 0
michael@0 238 CHECK_STACK_OFFSET 10, \stack_offset
michael@0 239 lw \r10, 36(sp)
michael@0 240 .endif
michael@0 241 .if \r11 != 0
michael@0 242 CHECK_STACK_OFFSET 11, \stack_offset
michael@0 243 lw \r11, 40(sp)
michael@0 244 .endif
michael@0 245 .if \r12 != 0
michael@0 246 CHECK_STACK_OFFSET 12, \stack_offset
michael@0 247 lw \r12, 44(sp)
michael@0 248 .endif
michael@0 249 .if \r13 != 0
michael@0 250 CHECK_STACK_OFFSET 13, \stack_offset
michael@0 251 lw \r13, 48(sp)
michael@0 252 .endif
michael@0 253 .if \r14 != 0
michael@0 254 CHECK_STACK_OFFSET 14, \stack_offset
michael@0 255 lw \r14, 52(sp)
michael@0 256 .endif
michael@0 257 .if \stack_offset != 0
michael@0 258 addiu sp, sp, \stack_offset
michael@0 259 .endif
michael@0 260 .endm
michael@0 261
michael@0 262 /*
michael@0 263 * Conversion of single r5g6b5 pixel (in_565) to single a8r8g8b8 pixel
michael@0 264 * returned in (out_8888) register. Requires two temporary registers
michael@0 265 * (scratch1 and scratch2).
michael@0 266 */
michael@0 267 .macro CONVERT_1x0565_TO_1x8888 in_565, \
michael@0 268 out_8888, \
michael@0 269 scratch1, scratch2
michael@0 270 lui \out_8888, 0xff00
michael@0 271 sll \scratch1, \in_565, 0x3
michael@0 272 andi \scratch2, \scratch1, 0xff
michael@0 273 ext \scratch1, \in_565, 0x2, 0x3
michael@0 274 or \scratch1, \scratch2, \scratch1
michael@0 275 or \out_8888, \out_8888, \scratch1
michael@0 276
michael@0 277 sll \scratch1, \in_565, 0x5
michael@0 278 andi \scratch1, \scratch1, 0xfc00
michael@0 279 srl \scratch2, \in_565, 0x1
michael@0 280 andi \scratch2, \scratch2, 0x300
michael@0 281 or \scratch2, \scratch1, \scratch2
michael@0 282 or \out_8888, \out_8888, \scratch2
michael@0 283
michael@0 284 andi \scratch1, \in_565, 0xf800
michael@0 285 srl \scratch2, \scratch1, 0x5
michael@0 286 andi \scratch2, \scratch2, 0xff00
michael@0 287 or \scratch1, \scratch1, \scratch2
michael@0 288 sll \scratch1, \scratch1, 0x8
michael@0 289 or \out_8888, \out_8888, \scratch1
michael@0 290 .endm
michael@0 291
michael@0 292 /*
michael@0 293 * Conversion of two r5g6b5 pixels (in1_565 and in2_565) to two a8r8g8b8 pixels
michael@0 294 * returned in (out1_8888 and out2_8888) registers. Requires four scratch
michael@0 295 * registers (scratch1 ... scratch4). It also requires maskG and maskB for
michael@0 296 * color component extractions. These masks must have following values:
michael@0 297 * li maskG, 0x07e007e0
michael@0 298 * li maskB, 0x001F001F
michael@0 299 */
michael@0 300 .macro CONVERT_2x0565_TO_2x8888 in1_565, in2_565, \
michael@0 301 out1_8888, out2_8888, \
michael@0 302 maskG, maskB, \
michael@0 303 scratch1, scratch2, scratch3, scratch4
michael@0 304 sll \scratch1, \in1_565, 16
michael@0 305 or \scratch1, \scratch1, \in2_565
michael@0 306 lui \out2_8888, 0xff00
michael@0 307 ori \out2_8888, \out2_8888, 0xff00
michael@0 308 shrl.ph \scratch2, \scratch1, 11
michael@0 309 and \scratch3, \scratch1, \maskG
michael@0 310 shra.ph \scratch4, \scratch2, 2
michael@0 311 shll.ph \scratch2, \scratch2, 3
michael@0 312 shll.ph \scratch3, \scratch3, 5
michael@0 313 or \scratch2, \scratch2, \scratch4
michael@0 314 shrl.qb \scratch4, \scratch3, 6
michael@0 315 or \out2_8888, \out2_8888, \scratch2
michael@0 316 or \scratch3, \scratch3, \scratch4
michael@0 317 and \scratch1, \scratch1, \maskB
michael@0 318 shll.ph \scratch2, \scratch1, 3
michael@0 319 shra.ph \scratch4, \scratch1, 2
michael@0 320 or \scratch2, \scratch2, \scratch4
michael@0 321 or \scratch3, \scratch2, \scratch3
michael@0 322 precrq.ph.w \out1_8888, \out2_8888, \scratch3
michael@0 323 precr_sra.ph.w \out2_8888, \scratch3, 0
michael@0 324 .endm
michael@0 325
michael@0 326 /*
michael@0 327 * Conversion of single a8r8g8b8 pixel (in_8888) to single r5g6b5 pixel
michael@0 328 * returned in (out_565) register. Requires two temporary registers
michael@0 329 * (scratch1 and scratch2).
michael@0 330 */
michael@0 331 .macro CONVERT_1x8888_TO_1x0565 in_8888, \
michael@0 332 out_565, \
michael@0 333 scratch1, scratch2
michael@0 334 ext \out_565, \in_8888, 0x3, 0x5
michael@0 335 srl \scratch1, \in_8888, 0x5
michael@0 336 andi \scratch1, \scratch1, 0x07e0
michael@0 337 srl \scratch2, \in_8888, 0x8
michael@0 338 andi \scratch2, \scratch2, 0xf800
michael@0 339 or \out_565, \out_565, \scratch1
michael@0 340 or \out_565, \out_565, \scratch2
michael@0 341 .endm
michael@0 342
michael@0 343 /*
michael@0 344 * Conversion of two a8r8g8b8 pixels (in1_8888 and in2_8888) to two r5g6b5
michael@0 345 * pixels returned in (out1_565 and out2_565) registers. Requires two temporary
michael@0 346 * registers (scratch1 and scratch2). It also requires maskR, maskG and maskB
michael@0 347 * for color component extractions. These masks must have following values:
michael@0 348 * li maskR, 0xf800f800
michael@0 349 * li maskG, 0x07e007e0
michael@0 350 * li maskB, 0x001F001F
michael@0 351 * Value of input register in2_8888 is lost.
michael@0 352 */
michael@0 353 .macro CONVERT_2x8888_TO_2x0565 in1_8888, in2_8888, \
michael@0 354 out1_565, out2_565, \
michael@0 355 maskR, maskG, maskB, \
michael@0 356 scratch1, scratch2
michael@0 357 precrq.ph.w \scratch1, \in2_8888, \in1_8888
michael@0 358 precr_sra.ph.w \in2_8888, \in1_8888, 0
michael@0 359 shll.ph \scratch1, \scratch1, 8
michael@0 360 srl \in2_8888, \in2_8888, 3
michael@0 361 and \scratch2, \in2_8888, \maskB
michael@0 362 and \scratch1, \scratch1, \maskR
michael@0 363 srl \in2_8888, \in2_8888, 2
michael@0 364 and \out2_565, \in2_8888, \maskG
michael@0 365 or \out2_565, \out2_565, \scratch2
michael@0 366 or \out1_565, \out2_565, \scratch1
michael@0 367 srl \out2_565, \out1_565, 16
michael@0 368 .endm
michael@0 369
michael@0 370 /*
michael@0 371 * Multiply pixel (a8) with single pixel (a8r8g8b8). It requires maskLSR needed
michael@0 372 * for rounding process. maskLSR must have following value:
michael@0 373 * li maskLSR, 0x00ff00ff
michael@0 374 */
michael@0 375 .macro MIPS_UN8x4_MUL_UN8 s_8888, \
michael@0 376 m_8, \
michael@0 377 d_8888, \
michael@0 378 maskLSR, \
michael@0 379 scratch1, scratch2, scratch3
michael@0 380 replv.ph \m_8, \m_8 /* 0 | M | 0 | M */
michael@0 381 muleu_s.ph.qbl \scratch1, \s_8888, \m_8 /* A*M | R*M */
michael@0 382 muleu_s.ph.qbr \scratch2, \s_8888, \m_8 /* G*M | B*M */
michael@0 383 shra_r.ph \scratch3, \scratch1, 8
michael@0 384 shra_r.ph \d_8888, \scratch2, 8
michael@0 385 and \scratch3, \scratch3, \maskLSR /* 0 |A*M| 0 |R*M */
michael@0 386 and \d_8888, \d_8888, \maskLSR /* 0 |G*M| 0 |B*M */
michael@0 387 addq.ph \scratch1, \scratch1, \scratch3 /* A*M+A*M | R*M+R*M */
michael@0 388 addq.ph \scratch2, \scratch2, \d_8888 /* G*M+G*M | B*M+B*M */
michael@0 389 shra_r.ph \scratch1, \scratch1, 8
michael@0 390 shra_r.ph \scratch2, \scratch2, 8
michael@0 391 precr.qb.ph \d_8888, \scratch1, \scratch2
michael@0 392 .endm
michael@0 393
michael@0 394 /*
michael@0 395 * Multiply two pixels (a8) with two pixels (a8r8g8b8). It requires maskLSR
michael@0 396 * needed for rounding process. maskLSR must have following value:
michael@0 397 * li maskLSR, 0x00ff00ff
michael@0 398 */
michael@0 399 .macro MIPS_2xUN8x4_MUL_2xUN8 s1_8888, \
michael@0 400 s2_8888, \
michael@0 401 m1_8, \
michael@0 402 m2_8, \
michael@0 403 d1_8888, \
michael@0 404 d2_8888, \
michael@0 405 maskLSR, \
michael@0 406 scratch1, scratch2, scratch3, \
michael@0 407 scratch4, scratch5, scratch6
michael@0 408 replv.ph \m1_8, \m1_8 /* 0 | M1 | 0 | M1 */
michael@0 409 replv.ph \m2_8, \m2_8 /* 0 | M2 | 0 | M2 */
michael@0 410 muleu_s.ph.qbl \scratch1, \s1_8888, \m1_8 /* A1*M1 | R1*M1 */
michael@0 411 muleu_s.ph.qbr \scratch2, \s1_8888, \m1_8 /* G1*M1 | B1*M1 */
michael@0 412 muleu_s.ph.qbl \scratch3, \s2_8888, \m2_8 /* A2*M2 | R2*M2 */
michael@0 413 muleu_s.ph.qbr \scratch4, \s2_8888, \m2_8 /* G2*M2 | B2*M2 */
michael@0 414 shra_r.ph \scratch5, \scratch1, 8
michael@0 415 shra_r.ph \d1_8888, \scratch2, 8
michael@0 416 shra_r.ph \scratch6, \scratch3, 8
michael@0 417 shra_r.ph \d2_8888, \scratch4, 8
michael@0 418 and \scratch5, \scratch5, \maskLSR /* 0 |A1*M1| 0 |R1*M1 */
michael@0 419 and \d1_8888, \d1_8888, \maskLSR /* 0 |G1*M1| 0 |B1*M1 */
michael@0 420 and \scratch6, \scratch6, \maskLSR /* 0 |A2*M2| 0 |R2*M2 */
michael@0 421 and \d2_8888, \d2_8888, \maskLSR /* 0 |G2*M2| 0 |B2*M2 */
michael@0 422 addq.ph \scratch1, \scratch1, \scratch5
michael@0 423 addq.ph \scratch2, \scratch2, \d1_8888
michael@0 424 addq.ph \scratch3, \scratch3, \scratch6
michael@0 425 addq.ph \scratch4, \scratch4, \d2_8888
michael@0 426 shra_r.ph \scratch1, \scratch1, 8
michael@0 427 shra_r.ph \scratch2, \scratch2, 8
michael@0 428 shra_r.ph \scratch3, \scratch3, 8
michael@0 429 shra_r.ph \scratch4, \scratch4, 8
michael@0 430 precr.qb.ph \d1_8888, \scratch1, \scratch2
michael@0 431 precr.qb.ph \d2_8888, \scratch3, \scratch4
michael@0 432 .endm
michael@0 433
michael@0 434 /*
michael@0 435 * Multiply pixel (a8r8g8b8) with single pixel (a8r8g8b8). It requires maskLSR
michael@0 436 * needed for rounding process. maskLSR must have following value:
michael@0 437 * li maskLSR, 0x00ff00ff
michael@0 438 */
michael@0 439 .macro MIPS_UN8x4_MUL_UN8x4 s_8888, \
michael@0 440 m_8888, \
michael@0 441 d_8888, \
michael@0 442 maskLSR, \
michael@0 443 scratch1, scratch2, scratch3, scratch4
michael@0 444 preceu.ph.qbl \scratch1, \m_8888 /* 0 | A | 0 | R */
michael@0 445 preceu.ph.qbr \scratch2, \m_8888 /* 0 | G | 0 | B */
michael@0 446 muleu_s.ph.qbl \scratch3, \s_8888, \scratch1 /* A*A | R*R */
michael@0 447 muleu_s.ph.qbr \scratch4, \s_8888, \scratch2 /* G*G | B*B */
michael@0 448 shra_r.ph \scratch1, \scratch3, 8
michael@0 449 shra_r.ph \scratch2, \scratch4, 8
michael@0 450 and \scratch1, \scratch1, \maskLSR /* 0 |A*A| 0 |R*R */
michael@0 451 and \scratch2, \scratch2, \maskLSR /* 0 |G*G| 0 |B*B */
michael@0 452 addq.ph \scratch1, \scratch1, \scratch3
michael@0 453 addq.ph \scratch2, \scratch2, \scratch4
michael@0 454 shra_r.ph \scratch1, \scratch1, 8
michael@0 455 shra_r.ph \scratch2, \scratch2, 8
michael@0 456 precr.qb.ph \d_8888, \scratch1, \scratch2
michael@0 457 .endm
michael@0 458
michael@0 459 /*
michael@0 460 * Multiply two pixels (a8r8g8b8) with two pixels (a8r8g8b8). It requires
michael@0 461 * maskLSR needed for rounding process. maskLSR must have following value:
michael@0 462 * li maskLSR, 0x00ff00ff
michael@0 463 */
michael@0 464
michael@0 465 .macro MIPS_2xUN8x4_MUL_2xUN8x4 s1_8888, \
michael@0 466 s2_8888, \
michael@0 467 m1_8888, \
michael@0 468 m2_8888, \
michael@0 469 d1_8888, \
michael@0 470 d2_8888, \
michael@0 471 maskLSR, \
michael@0 472 scratch1, scratch2, scratch3, \
michael@0 473 scratch4, scratch5, scratch6
michael@0 474 preceu.ph.qbl \scratch1, \m1_8888 /* 0 | A | 0 | R */
michael@0 475 preceu.ph.qbr \scratch2, \m1_8888 /* 0 | G | 0 | B */
michael@0 476 preceu.ph.qbl \scratch3, \m2_8888 /* 0 | A | 0 | R */
michael@0 477 preceu.ph.qbr \scratch4, \m2_8888 /* 0 | G | 0 | B */
michael@0 478 muleu_s.ph.qbl \scratch5, \s1_8888, \scratch1 /* A*A | R*R */
michael@0 479 muleu_s.ph.qbr \scratch6, \s1_8888, \scratch2 /* G*G | B*B */
michael@0 480 muleu_s.ph.qbl \scratch1, \s2_8888, \scratch3 /* A*A | R*R */
michael@0 481 muleu_s.ph.qbr \scratch2, \s2_8888, \scratch4 /* G*G | B*B */
michael@0 482 shra_r.ph \scratch3, \scratch5, 8
michael@0 483 shra_r.ph \scratch4, \scratch6, 8
michael@0 484 shra_r.ph \d1_8888, \scratch1, 8
michael@0 485 shra_r.ph \d2_8888, \scratch2, 8
michael@0 486 and \scratch3, \scratch3, \maskLSR /* 0 |A*A| 0 |R*R */
michael@0 487 and \scratch4, \scratch4, \maskLSR /* 0 |G*G| 0 |B*B */
michael@0 488 and \d1_8888, \d1_8888, \maskLSR /* 0 |A*A| 0 |R*R */
michael@0 489 and \d2_8888, \d2_8888, \maskLSR /* 0 |G*G| 0 |B*B */
michael@0 490 addq.ph \scratch3, \scratch3, \scratch5
michael@0 491 addq.ph \scratch4, \scratch4, \scratch6
michael@0 492 addq.ph \d1_8888, \d1_8888, \scratch1
michael@0 493 addq.ph \d2_8888, \d2_8888, \scratch2
michael@0 494 shra_r.ph \scratch3, \scratch3, 8
michael@0 495 shra_r.ph \scratch4, \scratch4, 8
michael@0 496 shra_r.ph \scratch5, \d1_8888, 8
michael@0 497 shra_r.ph \scratch6, \d2_8888, 8
michael@0 498 precr.qb.ph \d1_8888, \scratch3, \scratch4
michael@0 499 precr.qb.ph \d2_8888, \scratch5, \scratch6
michael@0 500 .endm
michael@0 501
michael@0 502 /*
michael@0 503 * OVER operation on single a8r8g8b8 source pixel (s_8888) and single a8r8g8b8
michael@0 504 * destination pixel (d_8888) using a8 mask (m_8). It also requires maskLSR
michael@0 505 * needed for rounding process. maskLSR must have following value:
michael@0 506 * li maskLSR, 0x00ff00ff
michael@0 507 */
michael@0 508 .macro OVER_8888_8_8888 s_8888, \
michael@0 509 m_8, \
michael@0 510 d_8888, \
michael@0 511 out_8888, \
michael@0 512 maskLSR, \
michael@0 513 scratch1, scratch2, scratch3, scratch4
michael@0 514 MIPS_UN8x4_MUL_UN8 \s_8888, \m_8, \
michael@0 515 \scratch1, \maskLSR, \
michael@0 516 \scratch2, \scratch3, \scratch4
michael@0 517
michael@0 518 not \scratch2, \scratch1
michael@0 519 srl \scratch2, \scratch2, 24
michael@0 520
michael@0 521 MIPS_UN8x4_MUL_UN8 \d_8888, \scratch2, \
michael@0 522 \d_8888, \maskLSR, \
michael@0 523 \scratch3, \scratch4, \out_8888
michael@0 524
michael@0 525 addu_s.qb \out_8888, \d_8888, \scratch1
michael@0 526 .endm
michael@0 527
michael@0 528 /*
michael@0 529 * OVER operation on two a8r8g8b8 source pixels (s1_8888 and s2_8888) and two
michael@0 530 * a8r8g8b8 destination pixels (d1_8888 and d2_8888) using a8 masks (m1_8 and
michael@0 531 * m2_8). It also requires maskLSR needed for rounding process. maskLSR must
michael@0 532 * have following value:
michael@0 533 * li maskLSR, 0x00ff00ff
michael@0 534 */
michael@0 535 .macro OVER_2x8888_2x8_2x8888 s1_8888, \
michael@0 536 s2_8888, \
michael@0 537 m1_8, \
michael@0 538 m2_8, \
michael@0 539 d1_8888, \
michael@0 540 d2_8888, \
michael@0 541 out1_8888, \
michael@0 542 out2_8888, \
michael@0 543 maskLSR, \
michael@0 544 scratch1, scratch2, scratch3, \
michael@0 545 scratch4, scratch5, scratch6
michael@0 546 MIPS_2xUN8x4_MUL_2xUN8 \s1_8888, \s2_8888, \
michael@0 547 \m1_8, \m2_8, \
michael@0 548 \scratch1, \scratch2, \
michael@0 549 \maskLSR, \
michael@0 550 \scratch3, \scratch4, \out1_8888, \
michael@0 551 \out2_8888, \scratch5, \scratch6
michael@0 552
michael@0 553 not \scratch3, \scratch1
michael@0 554 srl \scratch3, \scratch3, 24
michael@0 555 not \scratch4, \scratch2
michael@0 556 srl \scratch4, \scratch4, 24
michael@0 557
michael@0 558 MIPS_2xUN8x4_MUL_2xUN8 \d1_8888, \d2_8888, \
michael@0 559 \scratch3, \scratch4, \
michael@0 560 \d1_8888, \d2_8888, \
michael@0 561 \maskLSR, \
michael@0 562 \scratch5, \scratch6, \out1_8888, \
michael@0 563 \out2_8888, \scratch3, \scratch4
michael@0 564
michael@0 565 addu_s.qb \out1_8888, \d1_8888, \scratch1
michael@0 566 addu_s.qb \out2_8888, \d2_8888, \scratch2
michael@0 567 .endm
michael@0 568
michael@0 569 /*
michael@0 570 * OVER operation on single a8r8g8b8 source pixel (s_8888) and single a8r8g8b8
michael@0 571 * destination pixel (d_8888). It also requires maskLSR needed for rounding
michael@0 572 * process. maskLSR must have following value:
michael@0 573 * li maskLSR, 0x00ff00ff
michael@0 574 */
michael@0 575 .macro OVER_8888_8888 s_8888, \
michael@0 576 d_8888, \
michael@0 577 out_8888, \
michael@0 578 maskLSR, \
michael@0 579 scratch1, scratch2, scratch3, scratch4
michael@0 580 not \scratch1, \s_8888
michael@0 581 srl \scratch1, \scratch1, 24
michael@0 582
michael@0 583 MIPS_UN8x4_MUL_UN8 \d_8888, \scratch1, \
michael@0 584 \out_8888, \maskLSR, \
michael@0 585 \scratch2, \scratch3, \scratch4
michael@0 586
michael@0 587 addu_s.qb \out_8888, \out_8888, \s_8888
michael@0 588 .endm
michael@0 589
michael@0 590 .macro MIPS_UN8x4_MUL_UN8_ADD_UN8x4 s_8888, \
michael@0 591 m_8, \
michael@0 592 d_8888, \
michael@0 593 out_8888, \
michael@0 594 maskLSR, \
michael@0 595 scratch1, scratch2, scratch3
michael@0 596 MIPS_UN8x4_MUL_UN8 \s_8888, \m_8, \
michael@0 597 \out_8888, \maskLSR, \
michael@0 598 \scratch1, \scratch2, \scratch3
michael@0 599
michael@0 600 addu_s.qb \out_8888, \out_8888, \d_8888
michael@0 601 .endm
michael@0 602
michael@0 603 .macro MIPS_2xUN8x4_MUL_2xUN8_ADD_2xUN8x4 s1_8888, \
michael@0 604 s2_8888, \
michael@0 605 m1_8, \
michael@0 606 m2_8, \
michael@0 607 d1_8888, \
michael@0 608 d2_8888, \
michael@0 609 out1_8888, \
michael@0 610 out2_8888, \
michael@0 611 maskLSR, \
michael@0 612 scratch1, scratch2, scratch3, \
michael@0 613 scratch4, scratch5, scratch6
michael@0 614 MIPS_2xUN8x4_MUL_2xUN8 \s1_8888, \s2_8888, \
michael@0 615 \m1_8, \m2_8, \
michael@0 616 \out1_8888, \out2_8888, \
michael@0 617 \maskLSR, \
michael@0 618 \scratch1, \scratch2, \scratch3, \
michael@0 619 \scratch4, \scratch5, \scratch6
michael@0 620
michael@0 621 addu_s.qb \out1_8888, \out1_8888, \d1_8888
michael@0 622 addu_s.qb \out2_8888, \out2_8888, \d2_8888
michael@0 623 .endm
michael@0 624
michael@0 625 .macro BILINEAR_INTERPOLATE_SINGLE_PIXEL tl, tr, bl, br, \
michael@0 626 scratch1, scratch2, \
michael@0 627 alpha, red, green, blue \
michael@0 628 wt1, wt2, wb1, wb2
michael@0 629 andi \scratch1, \tl, 0xff
michael@0 630 andi \scratch2, \tr, 0xff
michael@0 631 andi \alpha, \bl, 0xff
michael@0 632 andi \red, \br, 0xff
michael@0 633
michael@0 634 multu $ac0, \wt1, \scratch1
michael@0 635 maddu $ac0, \wt2, \scratch2
michael@0 636 maddu $ac0, \wb1, \alpha
michael@0 637 maddu $ac0, \wb2, \red
michael@0 638
michael@0 639 ext \scratch1, \tl, 8, 8
michael@0 640 ext \scratch2, \tr, 8, 8
michael@0 641 ext \alpha, \bl, 8, 8
michael@0 642 ext \red, \br, 8, 8
michael@0 643
michael@0 644 multu $ac1, \wt1, \scratch1
michael@0 645 maddu $ac1, \wt2, \scratch2
michael@0 646 maddu $ac1, \wb1, \alpha
michael@0 647 maddu $ac1, \wb2, \red
michael@0 648
michael@0 649 ext \scratch1, \tl, 16, 8
michael@0 650 ext \scratch2, \tr, 16, 8
michael@0 651 ext \alpha, \bl, 16, 8
michael@0 652 ext \red, \br, 16, 8
michael@0 653
michael@0 654 mflo \blue, $ac0
michael@0 655
michael@0 656 multu $ac2, \wt1, \scratch1
michael@0 657 maddu $ac2, \wt2, \scratch2
michael@0 658 maddu $ac2, \wb1, \alpha
michael@0 659 maddu $ac2, \wb2, \red
michael@0 660
michael@0 661 ext \scratch1, \tl, 24, 8
michael@0 662 ext \scratch2, \tr, 24, 8
michael@0 663 ext \alpha, \bl, 24, 8
michael@0 664 ext \red, \br, 24, 8
michael@0 665
michael@0 666 mflo \green, $ac1
michael@0 667
michael@0 668 multu $ac3, \wt1, \scratch1
michael@0 669 maddu $ac3, \wt2, \scratch2
michael@0 670 maddu $ac3, \wb1, \alpha
michael@0 671 maddu $ac3, \wb2, \red
michael@0 672
michael@0 673 mflo \red, $ac2
michael@0 674 mflo \alpha, $ac3
michael@0 675
michael@0 676 precr.qb.ph \alpha, \alpha, \red
michael@0 677 precr.qb.ph \scratch1, \green, \blue
michael@0 678 precrq.qb.ph \tl, \alpha, \scratch1
michael@0 679 .endm
michael@0 680
michael@0 681 #endif //PIXMAN_MIPS_DSPR2_ASM_H

mercurial