michael@0: /* michael@0: * Copyright (c) 2012 michael@0: * MIPS Technologies, Inc., California. michael@0: * michael@0: * Redistribution and use in source and binary forms, with or without michael@0: * modification, are permitted provided that the following conditions michael@0: * are met: michael@0: * 1. Redistributions of source code must retain the above copyright michael@0: * notice, this list of conditions and the following disclaimer. michael@0: * 2. Redistributions in binary form must reproduce the above copyright michael@0: * notice, this list of conditions and the following disclaimer in the michael@0: * documentation and/or other materials provided with the distribution. michael@0: * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its michael@0: * contributors may be used to endorse or promote products derived from michael@0: * this software without specific prior written permission. michael@0: * michael@0: * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND michael@0: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE michael@0: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE michael@0: * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE michael@0: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL michael@0: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS michael@0: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) michael@0: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT michael@0: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY michael@0: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF michael@0: * SUCH DAMAGE. michael@0: * michael@0: * Author: Nemanja Lukic (nlukic@mips.com) michael@0: */ michael@0: michael@0: #ifndef PIXMAN_MIPS_DSPR2_ASM_H michael@0: #define PIXMAN_MIPS_DSPR2_ASM_H michael@0: michael@0: #define zero $0 michael@0: #define AT $1 michael@0: #define v0 $2 michael@0: #define v1 $3 michael@0: #define a0 $4 michael@0: #define a1 $5 michael@0: #define a2 $6 michael@0: #define a3 $7 michael@0: #define t0 $8 michael@0: #define t1 $9 michael@0: #define t2 $10 michael@0: #define t3 $11 michael@0: #define t4 $12 michael@0: #define t5 $13 michael@0: #define t6 $14 michael@0: #define t7 $15 michael@0: #define s0 $16 michael@0: #define s1 $17 michael@0: #define s2 $18 michael@0: #define s3 $19 michael@0: #define s4 $20 michael@0: #define s5 $21 michael@0: #define s6 $22 michael@0: #define s7 $23 michael@0: #define t8 $24 michael@0: #define t9 $25 michael@0: #define k0 $26 michael@0: #define k1 $27 michael@0: #define gp $28 michael@0: #define sp $29 michael@0: #define fp $30 michael@0: #define s8 $30 michael@0: #define ra $31 michael@0: michael@0: /* michael@0: * LEAF_MIPS32R2 - declare leaf routine for MIPS32r2 michael@0: */ michael@0: #define LEAF_MIPS32R2(symbol) \ michael@0: .globl symbol; \ michael@0: .align 2; \ michael@0: .type symbol, @function; \ michael@0: .ent symbol, 0; \ michael@0: symbol: .frame sp, 0, ra; \ michael@0: .set push; \ michael@0: .set arch=mips32r2; \ michael@0: .set noreorder; \ michael@0: .set noat; michael@0: michael@0: /* michael@0: * LEAF_MIPS32R2 - declare leaf routine for MIPS DSPr2 michael@0: */ michael@0: #define LEAF_MIPS_DSPR2(symbol) \ michael@0: LEAF_MIPS32R2(symbol) \ michael@0: .set dspr2; michael@0: michael@0: /* michael@0: * END - mark end of function michael@0: */ michael@0: #define END(function) \ michael@0: .set pop; \ michael@0: .end function; \ michael@0: .size function,.-function michael@0: michael@0: /* michael@0: * Checks if stack offset is big enough for storing/restoring regs_num michael@0: * number of register to/from stack. Stack offset must be greater than michael@0: * or equal to the number of bytes needed for storing registers (regs_num*4). michael@0: * Since MIPS ABI allows usage of first 16 bytes of stack frame (this is michael@0: * preserved for input arguments of the functions, already stored in a0-a3), michael@0: * stack size can be further optimized by utilizing this space. michael@0: */ michael@0: .macro CHECK_STACK_OFFSET regs_num, stack_offset michael@0: .if \stack_offset < \regs_num * 4 - 16 michael@0: .error "Stack offset too small." michael@0: .endif michael@0: .endm michael@0: michael@0: /* michael@0: * Saves set of registers on stack. Maximum number of registers that michael@0: * can be saved on stack is limitted to 14 (a0-a3, v0-v1 and s0-s7). michael@0: * Stack offset is number of bytes that are added to stack pointer (sp) michael@0: * before registers are pushed in order to provide enough space on stack michael@0: * (offset must be multiple of 4, and must be big enough, as described by michael@0: * CHECK_STACK_OFFSET macro). This macro is intended to be used in michael@0: * combination with RESTORE_REGS_FROM_STACK macro. Example: michael@0: * SAVE_REGS_ON_STACK 4, v0, v1, s0, s1 michael@0: * RESTORE_REGS_FROM_STACK 4, v0, v1, s0, s1 michael@0: */ michael@0: .macro SAVE_REGS_ON_STACK stack_offset = 0, r1, \ michael@0: r2 = 0, r3 = 0, r4 = 0, \ michael@0: r5 = 0, r6 = 0, r7 = 0, \ michael@0: r8 = 0, r9 = 0, r10 = 0, \ michael@0: r11 = 0, r12 = 0, r13 = 0, \ michael@0: r14 = 0 michael@0: .if (\stack_offset < 0) || (\stack_offset - (\stack_offset / 4) * 4) michael@0: .error "Stack offset must be pozitive and multiple of 4." michael@0: .endif michael@0: .if \stack_offset != 0 michael@0: addiu sp, sp, -\stack_offset michael@0: .endif michael@0: sw \r1, 0(sp) michael@0: .if \r2 != 0 michael@0: sw \r2, 4(sp) michael@0: .endif michael@0: .if \r3 != 0 michael@0: sw \r3, 8(sp) michael@0: .endif michael@0: .if \r4 != 0 michael@0: sw \r4, 12(sp) michael@0: .endif michael@0: .if \r5 != 0 michael@0: CHECK_STACK_OFFSET 5, \stack_offset michael@0: sw \r5, 16(sp) michael@0: .endif michael@0: .if \r6 != 0 michael@0: CHECK_STACK_OFFSET 6, \stack_offset michael@0: sw \r6, 20(sp) michael@0: .endif michael@0: .if \r7 != 0 michael@0: CHECK_STACK_OFFSET 7, \stack_offset michael@0: sw \r7, 24(sp) michael@0: .endif michael@0: .if \r8 != 0 michael@0: CHECK_STACK_OFFSET 8, \stack_offset michael@0: sw \r8, 28(sp) michael@0: .endif michael@0: .if \r9 != 0 michael@0: CHECK_STACK_OFFSET 9, \stack_offset michael@0: sw \r9, 32(sp) michael@0: .endif michael@0: .if \r10 != 0 michael@0: CHECK_STACK_OFFSET 10, \stack_offset michael@0: sw \r10, 36(sp) michael@0: .endif michael@0: .if \r11 != 0 michael@0: CHECK_STACK_OFFSET 11, \stack_offset michael@0: sw \r11, 40(sp) michael@0: .endif michael@0: .if \r12 != 0 michael@0: CHECK_STACK_OFFSET 12, \stack_offset michael@0: sw \r12, 44(sp) michael@0: .endif michael@0: .if \r13 != 0 michael@0: CHECK_STACK_OFFSET 13, \stack_offset michael@0: sw \r13, 48(sp) michael@0: .endif michael@0: .if \r14 != 0 michael@0: CHECK_STACK_OFFSET 14, \stack_offset michael@0: sw \r14, 52(sp) michael@0: .endif michael@0: .endm michael@0: michael@0: /* michael@0: * Restores set of registers from stack. Maximum number of registers that michael@0: * can be restored from stack is limitted to 14 (a0-a3, v0-v1 and s0-s7). michael@0: * Stack offset is number of bytes that are added to stack pointer (sp) michael@0: * after registers are restored (offset must be multiple of 4, and must michael@0: * be big enough, as described by CHECK_STACK_OFFSET macro). This macro is michael@0: * intended to be used in combination with RESTORE_REGS_FROM_STACK macro. michael@0: * Example: michael@0: * SAVE_REGS_ON_STACK 4, v0, v1, s0, s1 michael@0: * RESTORE_REGS_FROM_STACK 4, v0, v1, s0, s1 michael@0: */ michael@0: .macro RESTORE_REGS_FROM_STACK stack_offset = 0, r1, \ michael@0: r2 = 0, r3 = 0, r4 = 0, \ michael@0: r5 = 0, r6 = 0, r7 = 0, \ michael@0: r8 = 0, r9 = 0, r10 = 0, \ michael@0: r11 = 0, r12 = 0, r13 = 0, \ michael@0: r14 = 0 michael@0: .if (\stack_offset < 0) || (\stack_offset - (\stack_offset/4)*4) michael@0: .error "Stack offset must be pozitive and multiple of 4." michael@0: .endif michael@0: lw \r1, 0(sp) michael@0: .if \r2 != 0 michael@0: lw \r2, 4(sp) michael@0: .endif michael@0: .if \r3 != 0 michael@0: lw \r3, 8(sp) michael@0: .endif michael@0: .if \r4 != 0 michael@0: lw \r4, 12(sp) michael@0: .endif michael@0: .if \r5 != 0 michael@0: CHECK_STACK_OFFSET 5, \stack_offset michael@0: lw \r5, 16(sp) michael@0: .endif michael@0: .if \r6 != 0 michael@0: CHECK_STACK_OFFSET 6, \stack_offset michael@0: lw \r6, 20(sp) michael@0: .endif michael@0: .if \r7 != 0 michael@0: CHECK_STACK_OFFSET 7, \stack_offset michael@0: lw \r7, 24(sp) michael@0: .endif michael@0: .if \r8 != 0 michael@0: CHECK_STACK_OFFSET 8, \stack_offset michael@0: lw \r8, 28(sp) michael@0: .endif michael@0: .if \r9 != 0 michael@0: CHECK_STACK_OFFSET 9, \stack_offset michael@0: lw \r9, 32(sp) michael@0: .endif michael@0: .if \r10 != 0 michael@0: CHECK_STACK_OFFSET 10, \stack_offset michael@0: lw \r10, 36(sp) michael@0: .endif michael@0: .if \r11 != 0 michael@0: CHECK_STACK_OFFSET 11, \stack_offset michael@0: lw \r11, 40(sp) michael@0: .endif michael@0: .if \r12 != 0 michael@0: CHECK_STACK_OFFSET 12, \stack_offset michael@0: lw \r12, 44(sp) michael@0: .endif michael@0: .if \r13 != 0 michael@0: CHECK_STACK_OFFSET 13, \stack_offset michael@0: lw \r13, 48(sp) michael@0: .endif michael@0: .if \r14 != 0 michael@0: CHECK_STACK_OFFSET 14, \stack_offset michael@0: lw \r14, 52(sp) michael@0: .endif michael@0: .if \stack_offset != 0 michael@0: addiu sp, sp, \stack_offset michael@0: .endif michael@0: .endm michael@0: michael@0: /* michael@0: * Conversion of single r5g6b5 pixel (in_565) to single a8r8g8b8 pixel michael@0: * returned in (out_8888) register. Requires two temporary registers michael@0: * (scratch1 and scratch2). michael@0: */ michael@0: .macro CONVERT_1x0565_TO_1x8888 in_565, \ michael@0: out_8888, \ michael@0: scratch1, scratch2 michael@0: lui \out_8888, 0xff00 michael@0: sll \scratch1, \in_565, 0x3 michael@0: andi \scratch2, \scratch1, 0xff michael@0: ext \scratch1, \in_565, 0x2, 0x3 michael@0: or \scratch1, \scratch2, \scratch1 michael@0: or \out_8888, \out_8888, \scratch1 michael@0: michael@0: sll \scratch1, \in_565, 0x5 michael@0: andi \scratch1, \scratch1, 0xfc00 michael@0: srl \scratch2, \in_565, 0x1 michael@0: andi \scratch2, \scratch2, 0x300 michael@0: or \scratch2, \scratch1, \scratch2 michael@0: or \out_8888, \out_8888, \scratch2 michael@0: michael@0: andi \scratch1, \in_565, 0xf800 michael@0: srl \scratch2, \scratch1, 0x5 michael@0: andi \scratch2, \scratch2, 0xff00 michael@0: or \scratch1, \scratch1, \scratch2 michael@0: sll \scratch1, \scratch1, 0x8 michael@0: or \out_8888, \out_8888, \scratch1 michael@0: .endm michael@0: michael@0: /* michael@0: * Conversion of two r5g6b5 pixels (in1_565 and in2_565) to two a8r8g8b8 pixels michael@0: * returned in (out1_8888 and out2_8888) registers. Requires four scratch michael@0: * registers (scratch1 ... scratch4). It also requires maskG and maskB for michael@0: * color component extractions. These masks must have following values: michael@0: * li maskG, 0x07e007e0 michael@0: * li maskB, 0x001F001F michael@0: */ michael@0: .macro CONVERT_2x0565_TO_2x8888 in1_565, in2_565, \ michael@0: out1_8888, out2_8888, \ michael@0: maskG, maskB, \ michael@0: scratch1, scratch2, scratch3, scratch4 michael@0: sll \scratch1, \in1_565, 16 michael@0: or \scratch1, \scratch1, \in2_565 michael@0: lui \out2_8888, 0xff00 michael@0: ori \out2_8888, \out2_8888, 0xff00 michael@0: shrl.ph \scratch2, \scratch1, 11 michael@0: and \scratch3, \scratch1, \maskG michael@0: shra.ph \scratch4, \scratch2, 2 michael@0: shll.ph \scratch2, \scratch2, 3 michael@0: shll.ph \scratch3, \scratch3, 5 michael@0: or \scratch2, \scratch2, \scratch4 michael@0: shrl.qb \scratch4, \scratch3, 6 michael@0: or \out2_8888, \out2_8888, \scratch2 michael@0: or \scratch3, \scratch3, \scratch4 michael@0: and \scratch1, \scratch1, \maskB michael@0: shll.ph \scratch2, \scratch1, 3 michael@0: shra.ph \scratch4, \scratch1, 2 michael@0: or \scratch2, \scratch2, \scratch4 michael@0: or \scratch3, \scratch2, \scratch3 michael@0: precrq.ph.w \out1_8888, \out2_8888, \scratch3 michael@0: precr_sra.ph.w \out2_8888, \scratch3, 0 michael@0: .endm michael@0: michael@0: /* michael@0: * Conversion of single a8r8g8b8 pixel (in_8888) to single r5g6b5 pixel michael@0: * returned in (out_565) register. Requires two temporary registers michael@0: * (scratch1 and scratch2). michael@0: */ michael@0: .macro CONVERT_1x8888_TO_1x0565 in_8888, \ michael@0: out_565, \ michael@0: scratch1, scratch2 michael@0: ext \out_565, \in_8888, 0x3, 0x5 michael@0: srl \scratch1, \in_8888, 0x5 michael@0: andi \scratch1, \scratch1, 0x07e0 michael@0: srl \scratch2, \in_8888, 0x8 michael@0: andi \scratch2, \scratch2, 0xf800 michael@0: or \out_565, \out_565, \scratch1 michael@0: or \out_565, \out_565, \scratch2 michael@0: .endm michael@0: michael@0: /* michael@0: * Conversion of two a8r8g8b8 pixels (in1_8888 and in2_8888) to two r5g6b5 michael@0: * pixels returned in (out1_565 and out2_565) registers. Requires two temporary michael@0: * registers (scratch1 and scratch2). It also requires maskR, maskG and maskB michael@0: * for color component extractions. These masks must have following values: michael@0: * li maskR, 0xf800f800 michael@0: * li maskG, 0x07e007e0 michael@0: * li maskB, 0x001F001F michael@0: * Value of input register in2_8888 is lost. michael@0: */ michael@0: .macro CONVERT_2x8888_TO_2x0565 in1_8888, in2_8888, \ michael@0: out1_565, out2_565, \ michael@0: maskR, maskG, maskB, \ michael@0: scratch1, scratch2 michael@0: precrq.ph.w \scratch1, \in2_8888, \in1_8888 michael@0: precr_sra.ph.w \in2_8888, \in1_8888, 0 michael@0: shll.ph \scratch1, \scratch1, 8 michael@0: srl \in2_8888, \in2_8888, 3 michael@0: and \scratch2, \in2_8888, \maskB michael@0: and \scratch1, \scratch1, \maskR michael@0: srl \in2_8888, \in2_8888, 2 michael@0: and \out2_565, \in2_8888, \maskG michael@0: or \out2_565, \out2_565, \scratch2 michael@0: or \out1_565, \out2_565, \scratch1 michael@0: srl \out2_565, \out1_565, 16 michael@0: .endm michael@0: michael@0: /* michael@0: * Multiply pixel (a8) with single pixel (a8r8g8b8). It requires maskLSR needed michael@0: * for rounding process. maskLSR must have following value: michael@0: * li maskLSR, 0x00ff00ff michael@0: */ michael@0: .macro MIPS_UN8x4_MUL_UN8 s_8888, \ michael@0: m_8, \ michael@0: d_8888, \ michael@0: maskLSR, \ michael@0: scratch1, scratch2, scratch3 michael@0: replv.ph \m_8, \m_8 /* 0 | M | 0 | M */ michael@0: muleu_s.ph.qbl \scratch1, \s_8888, \m_8 /* A*M | R*M */ michael@0: muleu_s.ph.qbr \scratch2, \s_8888, \m_8 /* G*M | B*M */ michael@0: shra_r.ph \scratch3, \scratch1, 8 michael@0: shra_r.ph \d_8888, \scratch2, 8 michael@0: and \scratch3, \scratch3, \maskLSR /* 0 |A*M| 0 |R*M */ michael@0: and \d_8888, \d_8888, \maskLSR /* 0 |G*M| 0 |B*M */ michael@0: addq.ph \scratch1, \scratch1, \scratch3 /* A*M+A*M | R*M+R*M */ michael@0: addq.ph \scratch2, \scratch2, \d_8888 /* G*M+G*M | B*M+B*M */ michael@0: shra_r.ph \scratch1, \scratch1, 8 michael@0: shra_r.ph \scratch2, \scratch2, 8 michael@0: precr.qb.ph \d_8888, \scratch1, \scratch2 michael@0: .endm michael@0: michael@0: /* michael@0: * Multiply two pixels (a8) with two pixels (a8r8g8b8). It requires maskLSR michael@0: * needed for rounding process. maskLSR must have following value: michael@0: * li maskLSR, 0x00ff00ff michael@0: */ michael@0: .macro MIPS_2xUN8x4_MUL_2xUN8 s1_8888, \ michael@0: s2_8888, \ michael@0: m1_8, \ michael@0: m2_8, \ michael@0: d1_8888, \ michael@0: d2_8888, \ michael@0: maskLSR, \ michael@0: scratch1, scratch2, scratch3, \ michael@0: scratch4, scratch5, scratch6 michael@0: replv.ph \m1_8, \m1_8 /* 0 | M1 | 0 | M1 */ michael@0: replv.ph \m2_8, \m2_8 /* 0 | M2 | 0 | M2 */ michael@0: muleu_s.ph.qbl \scratch1, \s1_8888, \m1_8 /* A1*M1 | R1*M1 */ michael@0: muleu_s.ph.qbr \scratch2, \s1_8888, \m1_8 /* G1*M1 | B1*M1 */ michael@0: muleu_s.ph.qbl \scratch3, \s2_8888, \m2_8 /* A2*M2 | R2*M2 */ michael@0: muleu_s.ph.qbr \scratch4, \s2_8888, \m2_8 /* G2*M2 | B2*M2 */ michael@0: shra_r.ph \scratch5, \scratch1, 8 michael@0: shra_r.ph \d1_8888, \scratch2, 8 michael@0: shra_r.ph \scratch6, \scratch3, 8 michael@0: shra_r.ph \d2_8888, \scratch4, 8 michael@0: and \scratch5, \scratch5, \maskLSR /* 0 |A1*M1| 0 |R1*M1 */ michael@0: and \d1_8888, \d1_8888, \maskLSR /* 0 |G1*M1| 0 |B1*M1 */ michael@0: and \scratch6, \scratch6, \maskLSR /* 0 |A2*M2| 0 |R2*M2 */ michael@0: and \d2_8888, \d2_8888, \maskLSR /* 0 |G2*M2| 0 |B2*M2 */ michael@0: addq.ph \scratch1, \scratch1, \scratch5 michael@0: addq.ph \scratch2, \scratch2, \d1_8888 michael@0: addq.ph \scratch3, \scratch3, \scratch6 michael@0: addq.ph \scratch4, \scratch4, \d2_8888 michael@0: shra_r.ph \scratch1, \scratch1, 8 michael@0: shra_r.ph \scratch2, \scratch2, 8 michael@0: shra_r.ph \scratch3, \scratch3, 8 michael@0: shra_r.ph \scratch4, \scratch4, 8 michael@0: precr.qb.ph \d1_8888, \scratch1, \scratch2 michael@0: precr.qb.ph \d2_8888, \scratch3, \scratch4 michael@0: .endm michael@0: michael@0: /* michael@0: * Multiply pixel (a8r8g8b8) with single pixel (a8r8g8b8). It requires maskLSR michael@0: * needed for rounding process. maskLSR must have following value: michael@0: * li maskLSR, 0x00ff00ff michael@0: */ michael@0: .macro MIPS_UN8x4_MUL_UN8x4 s_8888, \ michael@0: m_8888, \ michael@0: d_8888, \ michael@0: maskLSR, \ michael@0: scratch1, scratch2, scratch3, scratch4 michael@0: preceu.ph.qbl \scratch1, \m_8888 /* 0 | A | 0 | R */ michael@0: preceu.ph.qbr \scratch2, \m_8888 /* 0 | G | 0 | B */ michael@0: muleu_s.ph.qbl \scratch3, \s_8888, \scratch1 /* A*A | R*R */ michael@0: muleu_s.ph.qbr \scratch4, \s_8888, \scratch2 /* G*G | B*B */ michael@0: shra_r.ph \scratch1, \scratch3, 8 michael@0: shra_r.ph \scratch2, \scratch4, 8 michael@0: and \scratch1, \scratch1, \maskLSR /* 0 |A*A| 0 |R*R */ michael@0: and \scratch2, \scratch2, \maskLSR /* 0 |G*G| 0 |B*B */ michael@0: addq.ph \scratch1, \scratch1, \scratch3 michael@0: addq.ph \scratch2, \scratch2, \scratch4 michael@0: shra_r.ph \scratch1, \scratch1, 8 michael@0: shra_r.ph \scratch2, \scratch2, 8 michael@0: precr.qb.ph \d_8888, \scratch1, \scratch2 michael@0: .endm michael@0: michael@0: /* michael@0: * Multiply two pixels (a8r8g8b8) with two pixels (a8r8g8b8). It requires michael@0: * maskLSR needed for rounding process. maskLSR must have following value: michael@0: * li maskLSR, 0x00ff00ff michael@0: */ michael@0: michael@0: .macro MIPS_2xUN8x4_MUL_2xUN8x4 s1_8888, \ michael@0: s2_8888, \ michael@0: m1_8888, \ michael@0: m2_8888, \ michael@0: d1_8888, \ michael@0: d2_8888, \ michael@0: maskLSR, \ michael@0: scratch1, scratch2, scratch3, \ michael@0: scratch4, scratch5, scratch6 michael@0: preceu.ph.qbl \scratch1, \m1_8888 /* 0 | A | 0 | R */ michael@0: preceu.ph.qbr \scratch2, \m1_8888 /* 0 | G | 0 | B */ michael@0: preceu.ph.qbl \scratch3, \m2_8888 /* 0 | A | 0 | R */ michael@0: preceu.ph.qbr \scratch4, \m2_8888 /* 0 | G | 0 | B */ michael@0: muleu_s.ph.qbl \scratch5, \s1_8888, \scratch1 /* A*A | R*R */ michael@0: muleu_s.ph.qbr \scratch6, \s1_8888, \scratch2 /* G*G | B*B */ michael@0: muleu_s.ph.qbl \scratch1, \s2_8888, \scratch3 /* A*A | R*R */ michael@0: muleu_s.ph.qbr \scratch2, \s2_8888, \scratch4 /* G*G | B*B */ michael@0: shra_r.ph \scratch3, \scratch5, 8 michael@0: shra_r.ph \scratch4, \scratch6, 8 michael@0: shra_r.ph \d1_8888, \scratch1, 8 michael@0: shra_r.ph \d2_8888, \scratch2, 8 michael@0: and \scratch3, \scratch3, \maskLSR /* 0 |A*A| 0 |R*R */ michael@0: and \scratch4, \scratch4, \maskLSR /* 0 |G*G| 0 |B*B */ michael@0: and \d1_8888, \d1_8888, \maskLSR /* 0 |A*A| 0 |R*R */ michael@0: and \d2_8888, \d2_8888, \maskLSR /* 0 |G*G| 0 |B*B */ michael@0: addq.ph \scratch3, \scratch3, \scratch5 michael@0: addq.ph \scratch4, \scratch4, \scratch6 michael@0: addq.ph \d1_8888, \d1_8888, \scratch1 michael@0: addq.ph \d2_8888, \d2_8888, \scratch2 michael@0: shra_r.ph \scratch3, \scratch3, 8 michael@0: shra_r.ph \scratch4, \scratch4, 8 michael@0: shra_r.ph \scratch5, \d1_8888, 8 michael@0: shra_r.ph \scratch6, \d2_8888, 8 michael@0: precr.qb.ph \d1_8888, \scratch3, \scratch4 michael@0: precr.qb.ph \d2_8888, \scratch5, \scratch6 michael@0: .endm michael@0: michael@0: /* michael@0: * OVER operation on single a8r8g8b8 source pixel (s_8888) and single a8r8g8b8 michael@0: * destination pixel (d_8888) using a8 mask (m_8). It also requires maskLSR michael@0: * needed for rounding process. maskLSR must have following value: michael@0: * li maskLSR, 0x00ff00ff michael@0: */ michael@0: .macro OVER_8888_8_8888 s_8888, \ michael@0: m_8, \ michael@0: d_8888, \ michael@0: out_8888, \ michael@0: maskLSR, \ michael@0: scratch1, scratch2, scratch3, scratch4 michael@0: MIPS_UN8x4_MUL_UN8 \s_8888, \m_8, \ michael@0: \scratch1, \maskLSR, \ michael@0: \scratch2, \scratch3, \scratch4 michael@0: michael@0: not \scratch2, \scratch1 michael@0: srl \scratch2, \scratch2, 24 michael@0: michael@0: MIPS_UN8x4_MUL_UN8 \d_8888, \scratch2, \ michael@0: \d_8888, \maskLSR, \ michael@0: \scratch3, \scratch4, \out_8888 michael@0: michael@0: addu_s.qb \out_8888, \d_8888, \scratch1 michael@0: .endm michael@0: michael@0: /* michael@0: * OVER operation on two a8r8g8b8 source pixels (s1_8888 and s2_8888) and two michael@0: * a8r8g8b8 destination pixels (d1_8888 and d2_8888) using a8 masks (m1_8 and michael@0: * m2_8). It also requires maskLSR needed for rounding process. maskLSR must michael@0: * have following value: michael@0: * li maskLSR, 0x00ff00ff michael@0: */ michael@0: .macro OVER_2x8888_2x8_2x8888 s1_8888, \ michael@0: s2_8888, \ michael@0: m1_8, \ michael@0: m2_8, \ michael@0: d1_8888, \ michael@0: d2_8888, \ michael@0: out1_8888, \ michael@0: out2_8888, \ michael@0: maskLSR, \ michael@0: scratch1, scratch2, scratch3, \ michael@0: scratch4, scratch5, scratch6 michael@0: MIPS_2xUN8x4_MUL_2xUN8 \s1_8888, \s2_8888, \ michael@0: \m1_8, \m2_8, \ michael@0: \scratch1, \scratch2, \ michael@0: \maskLSR, \ michael@0: \scratch3, \scratch4, \out1_8888, \ michael@0: \out2_8888, \scratch5, \scratch6 michael@0: michael@0: not \scratch3, \scratch1 michael@0: srl \scratch3, \scratch3, 24 michael@0: not \scratch4, \scratch2 michael@0: srl \scratch4, \scratch4, 24 michael@0: michael@0: MIPS_2xUN8x4_MUL_2xUN8 \d1_8888, \d2_8888, \ michael@0: \scratch3, \scratch4, \ michael@0: \d1_8888, \d2_8888, \ michael@0: \maskLSR, \ michael@0: \scratch5, \scratch6, \out1_8888, \ michael@0: \out2_8888, \scratch3, \scratch4 michael@0: michael@0: addu_s.qb \out1_8888, \d1_8888, \scratch1 michael@0: addu_s.qb \out2_8888, \d2_8888, \scratch2 michael@0: .endm michael@0: michael@0: /* michael@0: * OVER operation on single a8r8g8b8 source pixel (s_8888) and single a8r8g8b8 michael@0: * destination pixel (d_8888). It also requires maskLSR needed for rounding michael@0: * process. maskLSR must have following value: michael@0: * li maskLSR, 0x00ff00ff michael@0: */ michael@0: .macro OVER_8888_8888 s_8888, \ michael@0: d_8888, \ michael@0: out_8888, \ michael@0: maskLSR, \ michael@0: scratch1, scratch2, scratch3, scratch4 michael@0: not \scratch1, \s_8888 michael@0: srl \scratch1, \scratch1, 24 michael@0: michael@0: MIPS_UN8x4_MUL_UN8 \d_8888, \scratch1, \ michael@0: \out_8888, \maskLSR, \ michael@0: \scratch2, \scratch3, \scratch4 michael@0: michael@0: addu_s.qb \out_8888, \out_8888, \s_8888 michael@0: .endm michael@0: michael@0: .macro MIPS_UN8x4_MUL_UN8_ADD_UN8x4 s_8888, \ michael@0: m_8, \ michael@0: d_8888, \ michael@0: out_8888, \ michael@0: maskLSR, \ michael@0: scratch1, scratch2, scratch3 michael@0: MIPS_UN8x4_MUL_UN8 \s_8888, \m_8, \ michael@0: \out_8888, \maskLSR, \ michael@0: \scratch1, \scratch2, \scratch3 michael@0: michael@0: addu_s.qb \out_8888, \out_8888, \d_8888 michael@0: .endm michael@0: michael@0: .macro MIPS_2xUN8x4_MUL_2xUN8_ADD_2xUN8x4 s1_8888, \ michael@0: s2_8888, \ michael@0: m1_8, \ michael@0: m2_8, \ michael@0: d1_8888, \ michael@0: d2_8888, \ michael@0: out1_8888, \ michael@0: out2_8888, \ michael@0: maskLSR, \ michael@0: scratch1, scratch2, scratch3, \ michael@0: scratch4, scratch5, scratch6 michael@0: MIPS_2xUN8x4_MUL_2xUN8 \s1_8888, \s2_8888, \ michael@0: \m1_8, \m2_8, \ michael@0: \out1_8888, \out2_8888, \ michael@0: \maskLSR, \ michael@0: \scratch1, \scratch2, \scratch3, \ michael@0: \scratch4, \scratch5, \scratch6 michael@0: michael@0: addu_s.qb \out1_8888, \out1_8888, \d1_8888 michael@0: addu_s.qb \out2_8888, \out2_8888, \d2_8888 michael@0: .endm michael@0: michael@0: .macro BILINEAR_INTERPOLATE_SINGLE_PIXEL tl, tr, bl, br, \ michael@0: scratch1, scratch2, \ michael@0: alpha, red, green, blue \ michael@0: wt1, wt2, wb1, wb2 michael@0: andi \scratch1, \tl, 0xff michael@0: andi \scratch2, \tr, 0xff michael@0: andi \alpha, \bl, 0xff michael@0: andi \red, \br, 0xff michael@0: michael@0: multu $ac0, \wt1, \scratch1 michael@0: maddu $ac0, \wt2, \scratch2 michael@0: maddu $ac0, \wb1, \alpha michael@0: maddu $ac0, \wb2, \red michael@0: michael@0: ext \scratch1, \tl, 8, 8 michael@0: ext \scratch2, \tr, 8, 8 michael@0: ext \alpha, \bl, 8, 8 michael@0: ext \red, \br, 8, 8 michael@0: michael@0: multu $ac1, \wt1, \scratch1 michael@0: maddu $ac1, \wt2, \scratch2 michael@0: maddu $ac1, \wb1, \alpha michael@0: maddu $ac1, \wb2, \red michael@0: michael@0: ext \scratch1, \tl, 16, 8 michael@0: ext \scratch2, \tr, 16, 8 michael@0: ext \alpha, \bl, 16, 8 michael@0: ext \red, \br, 16, 8 michael@0: michael@0: mflo \blue, $ac0 michael@0: michael@0: multu $ac2, \wt1, \scratch1 michael@0: maddu $ac2, \wt2, \scratch2 michael@0: maddu $ac2, \wb1, \alpha michael@0: maddu $ac2, \wb2, \red michael@0: michael@0: ext \scratch1, \tl, 24, 8 michael@0: ext \scratch2, \tr, 24, 8 michael@0: ext \alpha, \bl, 24, 8 michael@0: ext \red, \br, 24, 8 michael@0: michael@0: mflo \green, $ac1 michael@0: michael@0: multu $ac3, \wt1, \scratch1 michael@0: maddu $ac3, \wt2, \scratch2 michael@0: maddu $ac3, \wb1, \alpha michael@0: maddu $ac3, \wb2, \red michael@0: michael@0: mflo \red, $ac2 michael@0: mflo \alpha, $ac3 michael@0: michael@0: precr.qb.ph \alpha, \alpha, \red michael@0: precr.qb.ph \scratch1, \green, \blue michael@0: precrq.qb.ph \tl, \alpha, \scratch1 michael@0: .endm michael@0: michael@0: #endif //PIXMAN_MIPS_DSPR2_ASM_H