Thu, 22 Jan 2015 13:21:57 +0100
Incorporate requested changes from Mozilla in review:
https://bugzilla.mozilla.org/show_bug.cgi?id=1123480#c6
michael@0 | 1 | /* |
michael@0 | 2 | * Copyright © 2007 Luca Barbato |
michael@0 | 3 | * |
michael@0 | 4 | * Permission to use, copy, modify, distribute, and sell this software and its |
michael@0 | 5 | * documentation for any purpose is hereby granted without fee, provided that |
michael@0 | 6 | * the above copyright notice appear in all copies and that both that |
michael@0 | 7 | * copyright notice and this permission notice appear in supporting |
michael@0 | 8 | * documentation, and that the name of Luca Barbato not be used in advertising or |
michael@0 | 9 | * publicity pertaining to distribution of the software without specific, |
michael@0 | 10 | * written prior permission. Luca Barbato makes no representations about the |
michael@0 | 11 | * suitability of this software for any purpose. It is provided "as is" |
michael@0 | 12 | * without express or implied warranty. |
michael@0 | 13 | * |
michael@0 | 14 | * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS |
michael@0 | 15 | * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND |
michael@0 | 16 | * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY |
michael@0 | 17 | * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
michael@0 | 18 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN |
michael@0 | 19 | * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING |
michael@0 | 20 | * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS |
michael@0 | 21 | * SOFTWARE. |
michael@0 | 22 | * |
michael@0 | 23 | * Author: Luca Barbato (lu_zero@gentoo.org) |
michael@0 | 24 | * |
michael@0 | 25 | * Based on fbmmx.c by Owen Taylor, Søren Sandmann and Nicholas Miell |
michael@0 | 26 | */ |
michael@0 | 27 | |
michael@0 | 28 | #include <config.h> |
michael@0 | 29 | #include "pixman-private.h" |
michael@0 | 30 | #include "pixman-combine32.h" |
michael@0 | 31 | #include <altivec.h> |
michael@0 | 32 | |
michael@0 | 33 | #define AVV(x...) {x} |
michael@0 | 34 | |
michael@0 | 35 | static force_inline vector unsigned int |
michael@0 | 36 | splat_alpha (vector unsigned int pix) |
michael@0 | 37 | { |
michael@0 | 38 | return vec_perm (pix, pix, |
michael@0 | 39 | (vector unsigned char)AVV ( |
michael@0 | 40 | 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x04, 0x04, |
michael@0 | 41 | 0x08, 0x08, 0x08, 0x08, 0x0C, 0x0C, 0x0C, 0x0C)); |
michael@0 | 42 | } |
michael@0 | 43 | |
michael@0 | 44 | static force_inline vector unsigned int |
michael@0 | 45 | pix_multiply (vector unsigned int p, vector unsigned int a) |
michael@0 | 46 | { |
michael@0 | 47 | vector unsigned short hi, lo, mod; |
michael@0 | 48 | |
michael@0 | 49 | /* unpack to short */ |
michael@0 | 50 | hi = (vector unsigned short) |
michael@0 | 51 | vec_mergeh ((vector unsigned char)AVV (0), |
michael@0 | 52 | (vector unsigned char)p); |
michael@0 | 53 | |
michael@0 | 54 | mod = (vector unsigned short) |
michael@0 | 55 | vec_mergeh ((vector unsigned char)AVV (0), |
michael@0 | 56 | (vector unsigned char)a); |
michael@0 | 57 | |
michael@0 | 58 | hi = vec_mladd (hi, mod, (vector unsigned short) |
michael@0 | 59 | AVV (0x0080, 0x0080, 0x0080, 0x0080, |
michael@0 | 60 | 0x0080, 0x0080, 0x0080, 0x0080)); |
michael@0 | 61 | |
michael@0 | 62 | hi = vec_adds (hi, vec_sr (hi, vec_splat_u16 (8))); |
michael@0 | 63 | |
michael@0 | 64 | hi = vec_sr (hi, vec_splat_u16 (8)); |
michael@0 | 65 | |
michael@0 | 66 | /* unpack to short */ |
michael@0 | 67 | lo = (vector unsigned short) |
michael@0 | 68 | vec_mergel ((vector unsigned char)AVV (0), |
michael@0 | 69 | (vector unsigned char)p); |
michael@0 | 70 | mod = (vector unsigned short) |
michael@0 | 71 | vec_mergel ((vector unsigned char)AVV (0), |
michael@0 | 72 | (vector unsigned char)a); |
michael@0 | 73 | |
michael@0 | 74 | lo = vec_mladd (lo, mod, (vector unsigned short) |
michael@0 | 75 | AVV (0x0080, 0x0080, 0x0080, 0x0080, |
michael@0 | 76 | 0x0080, 0x0080, 0x0080, 0x0080)); |
michael@0 | 77 | |
michael@0 | 78 | lo = vec_adds (lo, vec_sr (lo, vec_splat_u16 (8))); |
michael@0 | 79 | |
michael@0 | 80 | lo = vec_sr (lo, vec_splat_u16 (8)); |
michael@0 | 81 | |
michael@0 | 82 | return (vector unsigned int)vec_packsu (hi, lo); |
michael@0 | 83 | } |
michael@0 | 84 | |
michael@0 | 85 | static force_inline vector unsigned int |
michael@0 | 86 | pix_add (vector unsigned int a, vector unsigned int b) |
michael@0 | 87 | { |
michael@0 | 88 | return (vector unsigned int)vec_adds ((vector unsigned char)a, |
michael@0 | 89 | (vector unsigned char)b); |
michael@0 | 90 | } |
michael@0 | 91 | |
michael@0 | 92 | static force_inline vector unsigned int |
michael@0 | 93 | pix_add_mul (vector unsigned int x, |
michael@0 | 94 | vector unsigned int a, |
michael@0 | 95 | vector unsigned int y, |
michael@0 | 96 | vector unsigned int b) |
michael@0 | 97 | { |
michael@0 | 98 | vector unsigned int t1, t2; |
michael@0 | 99 | |
michael@0 | 100 | t1 = pix_multiply (x, a); |
michael@0 | 101 | t2 = pix_multiply (y, b); |
michael@0 | 102 | |
michael@0 | 103 | return pix_add (t1, t2); |
michael@0 | 104 | } |
michael@0 | 105 | |
michael@0 | 106 | static force_inline vector unsigned int |
michael@0 | 107 | negate (vector unsigned int src) |
michael@0 | 108 | { |
michael@0 | 109 | return vec_nor (src, src); |
michael@0 | 110 | } |
michael@0 | 111 | |
michael@0 | 112 | /* dest*~srca + src */ |
michael@0 | 113 | static force_inline vector unsigned int |
michael@0 | 114 | over (vector unsigned int src, |
michael@0 | 115 | vector unsigned int srca, |
michael@0 | 116 | vector unsigned int dest) |
michael@0 | 117 | { |
michael@0 | 118 | vector unsigned char tmp = (vector unsigned char) |
michael@0 | 119 | pix_multiply (dest, negate (srca)); |
michael@0 | 120 | |
michael@0 | 121 | tmp = vec_adds ((vector unsigned char)src, tmp); |
michael@0 | 122 | return (vector unsigned int)tmp; |
michael@0 | 123 | } |
michael@0 | 124 | |
michael@0 | 125 | /* in == pix_multiply */ |
michael@0 | 126 | #define in_over(src, srca, mask, dest) \ |
michael@0 | 127 | over (pix_multiply (src, mask), \ |
michael@0 | 128 | pix_multiply (srca, mask), dest) |
michael@0 | 129 | |
michael@0 | 130 | |
michael@0 | 131 | #define COMPUTE_SHIFT_MASK(source) \ |
michael@0 | 132 | source ## _mask = vec_lvsl (0, source); |
michael@0 | 133 | |
michael@0 | 134 | #define COMPUTE_SHIFT_MASKS(dest, source) \ |
michael@0 | 135 | dest ## _mask = vec_lvsl (0, dest); \ |
michael@0 | 136 | source ## _mask = vec_lvsl (0, source); \ |
michael@0 | 137 | store_mask = vec_lvsr (0, dest); |
michael@0 | 138 | |
michael@0 | 139 | #define COMPUTE_SHIFT_MASKC(dest, source, mask) \ |
michael@0 | 140 | mask ## _mask = vec_lvsl (0, mask); \ |
michael@0 | 141 | dest ## _mask = vec_lvsl (0, dest); \ |
michael@0 | 142 | source ## _mask = vec_lvsl (0, source); \ |
michael@0 | 143 | store_mask = vec_lvsr (0, dest); |
michael@0 | 144 | |
michael@0 | 145 | /* notice you have to declare temp vars... |
michael@0 | 146 | * Note: tmp3 and tmp4 must remain untouched! |
michael@0 | 147 | */ |
michael@0 | 148 | |
michael@0 | 149 | #define LOAD_VECTORS(dest, source) \ |
michael@0 | 150 | tmp1 = (typeof(tmp1))vec_ld (0, source); \ |
michael@0 | 151 | tmp2 = (typeof(tmp2))vec_ld (15, source); \ |
michael@0 | 152 | tmp3 = (typeof(tmp3))vec_ld (0, dest); \ |
michael@0 | 153 | v ## source = (typeof(v ## source)) \ |
michael@0 | 154 | vec_perm (tmp1, tmp2, source ## _mask); \ |
michael@0 | 155 | tmp4 = (typeof(tmp4))vec_ld (15, dest); \ |
michael@0 | 156 | v ## dest = (typeof(v ## dest)) \ |
michael@0 | 157 | vec_perm (tmp3, tmp4, dest ## _mask); |
michael@0 | 158 | |
michael@0 | 159 | #define LOAD_VECTORSC(dest, source, mask) \ |
michael@0 | 160 | tmp1 = (typeof(tmp1))vec_ld (0, source); \ |
michael@0 | 161 | tmp2 = (typeof(tmp2))vec_ld (15, source); \ |
michael@0 | 162 | tmp3 = (typeof(tmp3))vec_ld (0, dest); \ |
michael@0 | 163 | v ## source = (typeof(v ## source)) \ |
michael@0 | 164 | vec_perm (tmp1, tmp2, source ## _mask); \ |
michael@0 | 165 | tmp4 = (typeof(tmp4))vec_ld (15, dest); \ |
michael@0 | 166 | tmp1 = (typeof(tmp1))vec_ld (0, mask); \ |
michael@0 | 167 | v ## dest = (typeof(v ## dest)) \ |
michael@0 | 168 | vec_perm (tmp3, tmp4, dest ## _mask); \ |
michael@0 | 169 | tmp2 = (typeof(tmp2))vec_ld (15, mask); \ |
michael@0 | 170 | v ## mask = (typeof(v ## mask)) \ |
michael@0 | 171 | vec_perm (tmp1, tmp2, mask ## _mask); |
michael@0 | 172 | |
michael@0 | 173 | #define LOAD_VECTORSM(dest, source, mask) \ |
michael@0 | 174 | LOAD_VECTORSC (dest, source, mask) \ |
michael@0 | 175 | v ## source = pix_multiply (v ## source, \ |
michael@0 | 176 | splat_alpha (v ## mask)); |
michael@0 | 177 | |
michael@0 | 178 | #define STORE_VECTOR(dest) \ |
michael@0 | 179 | edges = vec_perm (tmp4, tmp3, dest ## _mask); \ |
michael@0 | 180 | tmp3 = vec_perm ((vector unsigned char)v ## dest, edges, store_mask); \ |
michael@0 | 181 | tmp1 = vec_perm (edges, (vector unsigned char)v ## dest, store_mask); \ |
michael@0 | 182 | vec_st ((vector unsigned int) tmp3, 15, dest); \ |
michael@0 | 183 | vec_st ((vector unsigned int) tmp1, 0, dest); |
michael@0 | 184 | |
michael@0 | 185 | static void |
michael@0 | 186 | vmx_combine_over_u_no_mask (uint32_t * dest, |
michael@0 | 187 | const uint32_t *src, |
michael@0 | 188 | int width) |
michael@0 | 189 | { |
michael@0 | 190 | int i; |
michael@0 | 191 | vector unsigned int vdest, vsrc; |
michael@0 | 192 | vector unsigned char tmp1, tmp2, tmp3, tmp4, edges, |
michael@0 | 193 | dest_mask, src_mask, store_mask; |
michael@0 | 194 | |
michael@0 | 195 | COMPUTE_SHIFT_MASKS (dest, src); |
michael@0 | 196 | |
michael@0 | 197 | /* printf ("%s\n",__PRETTY_FUNCTION__); */ |
michael@0 | 198 | for (i = width / 4; i > 0; i--) |
michael@0 | 199 | { |
michael@0 | 200 | |
michael@0 | 201 | LOAD_VECTORS (dest, src); |
michael@0 | 202 | |
michael@0 | 203 | vdest = over (vsrc, splat_alpha (vsrc), vdest); |
michael@0 | 204 | |
michael@0 | 205 | STORE_VECTOR (dest); |
michael@0 | 206 | |
michael@0 | 207 | src += 4; |
michael@0 | 208 | dest += 4; |
michael@0 | 209 | } |
michael@0 | 210 | |
michael@0 | 211 | for (i = width % 4; --i >= 0;) |
michael@0 | 212 | { |
michael@0 | 213 | uint32_t s = src[i]; |
michael@0 | 214 | uint32_t d = dest[i]; |
michael@0 | 215 | uint32_t ia = ALPHA_8 (~s); |
michael@0 | 216 | |
michael@0 | 217 | UN8x4_MUL_UN8_ADD_UN8x4 (d, ia, s); |
michael@0 | 218 | |
michael@0 | 219 | dest[i] = d; |
michael@0 | 220 | } |
michael@0 | 221 | } |
michael@0 | 222 | |
michael@0 | 223 | static void |
michael@0 | 224 | vmx_combine_over_u_mask (uint32_t * dest, |
michael@0 | 225 | const uint32_t *src, |
michael@0 | 226 | const uint32_t *mask, |
michael@0 | 227 | int width) |
michael@0 | 228 | { |
michael@0 | 229 | int i; |
michael@0 | 230 | vector unsigned int vdest, vsrc, vmask; |
michael@0 | 231 | vector unsigned char tmp1, tmp2, tmp3, tmp4, edges, |
michael@0 | 232 | dest_mask, src_mask, mask_mask, store_mask; |
michael@0 | 233 | |
michael@0 | 234 | COMPUTE_SHIFT_MASKC (dest, src, mask); |
michael@0 | 235 | |
michael@0 | 236 | /* printf ("%s\n",__PRETTY_FUNCTION__); */ |
michael@0 | 237 | for (i = width / 4; i > 0; i--) |
michael@0 | 238 | { |
michael@0 | 239 | LOAD_VECTORSM (dest, src, mask); |
michael@0 | 240 | |
michael@0 | 241 | vdest = over (vsrc, splat_alpha (vsrc), vdest); |
michael@0 | 242 | |
michael@0 | 243 | STORE_VECTOR (dest); |
michael@0 | 244 | |
michael@0 | 245 | src += 4; |
michael@0 | 246 | dest += 4; |
michael@0 | 247 | mask += 4; |
michael@0 | 248 | } |
michael@0 | 249 | |
michael@0 | 250 | for (i = width % 4; --i >= 0;) |
michael@0 | 251 | { |
michael@0 | 252 | uint32_t m = ALPHA_8 (mask[i]); |
michael@0 | 253 | uint32_t s = src[i]; |
michael@0 | 254 | uint32_t d = dest[i]; |
michael@0 | 255 | uint32_t ia; |
michael@0 | 256 | |
michael@0 | 257 | UN8x4_MUL_UN8 (s, m); |
michael@0 | 258 | |
michael@0 | 259 | ia = ALPHA_8 (~s); |
michael@0 | 260 | |
michael@0 | 261 | UN8x4_MUL_UN8_ADD_UN8x4 (d, ia, s); |
michael@0 | 262 | dest[i] = d; |
michael@0 | 263 | } |
michael@0 | 264 | } |
michael@0 | 265 | |
michael@0 | 266 | static void |
michael@0 | 267 | vmx_combine_over_u (pixman_implementation_t *imp, |
michael@0 | 268 | pixman_op_t op, |
michael@0 | 269 | uint32_t * dest, |
michael@0 | 270 | const uint32_t * src, |
michael@0 | 271 | const uint32_t * mask, |
michael@0 | 272 | int width) |
michael@0 | 273 | { |
michael@0 | 274 | if (mask) |
michael@0 | 275 | vmx_combine_over_u_mask (dest, src, mask, width); |
michael@0 | 276 | else |
michael@0 | 277 | vmx_combine_over_u_no_mask (dest, src, width); |
michael@0 | 278 | } |
michael@0 | 279 | |
michael@0 | 280 | static void |
michael@0 | 281 | vmx_combine_over_reverse_u_no_mask (uint32_t * dest, |
michael@0 | 282 | const uint32_t *src, |
michael@0 | 283 | int width) |
michael@0 | 284 | { |
michael@0 | 285 | int i; |
michael@0 | 286 | vector unsigned int vdest, vsrc; |
michael@0 | 287 | vector unsigned char tmp1, tmp2, tmp3, tmp4, edges, |
michael@0 | 288 | dest_mask, src_mask, store_mask; |
michael@0 | 289 | |
michael@0 | 290 | COMPUTE_SHIFT_MASKS (dest, src); |
michael@0 | 291 | |
michael@0 | 292 | /* printf ("%s\n",__PRETTY_FUNCTION__); */ |
michael@0 | 293 | for (i = width / 4; i > 0; i--) |
michael@0 | 294 | { |
michael@0 | 295 | |
michael@0 | 296 | LOAD_VECTORS (dest, src); |
michael@0 | 297 | |
michael@0 | 298 | vdest = over (vdest, splat_alpha (vdest), vsrc); |
michael@0 | 299 | |
michael@0 | 300 | STORE_VECTOR (dest); |
michael@0 | 301 | |
michael@0 | 302 | src += 4; |
michael@0 | 303 | dest += 4; |
michael@0 | 304 | } |
michael@0 | 305 | |
michael@0 | 306 | for (i = width % 4; --i >= 0;) |
michael@0 | 307 | { |
michael@0 | 308 | uint32_t s = src[i]; |
michael@0 | 309 | uint32_t d = dest[i]; |
michael@0 | 310 | uint32_t ia = ALPHA_8 (~dest[i]); |
michael@0 | 311 | |
michael@0 | 312 | UN8x4_MUL_UN8_ADD_UN8x4 (s, ia, d); |
michael@0 | 313 | dest[i] = s; |
michael@0 | 314 | } |
michael@0 | 315 | } |
michael@0 | 316 | |
michael@0 | 317 | static void |
michael@0 | 318 | vmx_combine_over_reverse_u_mask (uint32_t * dest, |
michael@0 | 319 | const uint32_t *src, |
michael@0 | 320 | const uint32_t *mask, |
michael@0 | 321 | int width) |
michael@0 | 322 | { |
michael@0 | 323 | int i; |
michael@0 | 324 | vector unsigned int vdest, vsrc, vmask; |
michael@0 | 325 | vector unsigned char tmp1, tmp2, tmp3, tmp4, edges, |
michael@0 | 326 | dest_mask, src_mask, mask_mask, store_mask; |
michael@0 | 327 | |
michael@0 | 328 | COMPUTE_SHIFT_MASKC (dest, src, mask); |
michael@0 | 329 | |
michael@0 | 330 | /* printf ("%s\n",__PRETTY_FUNCTION__); */ |
michael@0 | 331 | for (i = width / 4; i > 0; i--) |
michael@0 | 332 | { |
michael@0 | 333 | |
michael@0 | 334 | LOAD_VECTORSM (dest, src, mask); |
michael@0 | 335 | |
michael@0 | 336 | vdest = over (vdest, splat_alpha (vdest), vsrc); |
michael@0 | 337 | |
michael@0 | 338 | STORE_VECTOR (dest); |
michael@0 | 339 | |
michael@0 | 340 | src += 4; |
michael@0 | 341 | dest += 4; |
michael@0 | 342 | mask += 4; |
michael@0 | 343 | } |
michael@0 | 344 | |
michael@0 | 345 | for (i = width % 4; --i >= 0;) |
michael@0 | 346 | { |
michael@0 | 347 | uint32_t m = ALPHA_8 (mask[i]); |
michael@0 | 348 | uint32_t s = src[i]; |
michael@0 | 349 | uint32_t d = dest[i]; |
michael@0 | 350 | uint32_t ia = ALPHA_8 (~dest[i]); |
michael@0 | 351 | |
michael@0 | 352 | UN8x4_MUL_UN8 (s, m); |
michael@0 | 353 | |
michael@0 | 354 | UN8x4_MUL_UN8_ADD_UN8x4 (s, ia, d); |
michael@0 | 355 | dest[i] = s; |
michael@0 | 356 | } |
michael@0 | 357 | } |
michael@0 | 358 | |
michael@0 | 359 | static void |
michael@0 | 360 | vmx_combine_over_reverse_u (pixman_implementation_t *imp, |
michael@0 | 361 | pixman_op_t op, |
michael@0 | 362 | uint32_t * dest, |
michael@0 | 363 | const uint32_t * src, |
michael@0 | 364 | const uint32_t * mask, |
michael@0 | 365 | int width) |
michael@0 | 366 | { |
michael@0 | 367 | if (mask) |
michael@0 | 368 | vmx_combine_over_reverse_u_mask (dest, src, mask, width); |
michael@0 | 369 | else |
michael@0 | 370 | vmx_combine_over_reverse_u_no_mask (dest, src, width); |
michael@0 | 371 | } |
michael@0 | 372 | |
michael@0 | 373 | static void |
michael@0 | 374 | vmx_combine_in_u_no_mask (uint32_t * dest, |
michael@0 | 375 | const uint32_t *src, |
michael@0 | 376 | int width) |
michael@0 | 377 | { |
michael@0 | 378 | int i; |
michael@0 | 379 | vector unsigned int vdest, vsrc; |
michael@0 | 380 | vector unsigned char tmp1, tmp2, tmp3, tmp4, edges, |
michael@0 | 381 | dest_mask, src_mask, store_mask; |
michael@0 | 382 | |
michael@0 | 383 | COMPUTE_SHIFT_MASKS (dest, src); |
michael@0 | 384 | |
michael@0 | 385 | /* printf ("%s\n",__PRETTY_FUNCTION__); */ |
michael@0 | 386 | for (i = width / 4; i > 0; i--) |
michael@0 | 387 | { |
michael@0 | 388 | LOAD_VECTORS (dest, src); |
michael@0 | 389 | |
michael@0 | 390 | vdest = pix_multiply (vsrc, splat_alpha (vdest)); |
michael@0 | 391 | |
michael@0 | 392 | STORE_VECTOR (dest); |
michael@0 | 393 | |
michael@0 | 394 | src += 4; |
michael@0 | 395 | dest += 4; |
michael@0 | 396 | } |
michael@0 | 397 | |
michael@0 | 398 | for (i = width % 4; --i >= 0;) |
michael@0 | 399 | { |
michael@0 | 400 | uint32_t s = src[i]; |
michael@0 | 401 | uint32_t a = ALPHA_8 (dest[i]); |
michael@0 | 402 | |
michael@0 | 403 | UN8x4_MUL_UN8 (s, a); |
michael@0 | 404 | dest[i] = s; |
michael@0 | 405 | } |
michael@0 | 406 | } |
michael@0 | 407 | |
michael@0 | 408 | static void |
michael@0 | 409 | vmx_combine_in_u_mask (uint32_t * dest, |
michael@0 | 410 | const uint32_t *src, |
michael@0 | 411 | const uint32_t *mask, |
michael@0 | 412 | int width) |
michael@0 | 413 | { |
michael@0 | 414 | int i; |
michael@0 | 415 | vector unsigned int vdest, vsrc, vmask; |
michael@0 | 416 | vector unsigned char tmp1, tmp2, tmp3, tmp4, edges, |
michael@0 | 417 | dest_mask, src_mask, mask_mask, store_mask; |
michael@0 | 418 | |
michael@0 | 419 | COMPUTE_SHIFT_MASKC (dest, src, mask); |
michael@0 | 420 | |
michael@0 | 421 | /* printf ("%s\n",__PRETTY_FUNCTION__); */ |
michael@0 | 422 | for (i = width / 4; i > 0; i--) |
michael@0 | 423 | { |
michael@0 | 424 | LOAD_VECTORSM (dest, src, mask); |
michael@0 | 425 | |
michael@0 | 426 | vdest = pix_multiply (vsrc, splat_alpha (vdest)); |
michael@0 | 427 | |
michael@0 | 428 | STORE_VECTOR (dest); |
michael@0 | 429 | |
michael@0 | 430 | src += 4; |
michael@0 | 431 | dest += 4; |
michael@0 | 432 | mask += 4; |
michael@0 | 433 | } |
michael@0 | 434 | |
michael@0 | 435 | for (i = width % 4; --i >= 0;) |
michael@0 | 436 | { |
michael@0 | 437 | uint32_t m = ALPHA_8 (mask[i]); |
michael@0 | 438 | uint32_t s = src[i]; |
michael@0 | 439 | uint32_t a = ALPHA_8 (dest[i]); |
michael@0 | 440 | |
michael@0 | 441 | UN8x4_MUL_UN8 (s, m); |
michael@0 | 442 | UN8x4_MUL_UN8 (s, a); |
michael@0 | 443 | |
michael@0 | 444 | dest[i] = s; |
michael@0 | 445 | } |
michael@0 | 446 | } |
michael@0 | 447 | |
michael@0 | 448 | static void |
michael@0 | 449 | vmx_combine_in_u (pixman_implementation_t *imp, |
michael@0 | 450 | pixman_op_t op, |
michael@0 | 451 | uint32_t * dest, |
michael@0 | 452 | const uint32_t * src, |
michael@0 | 453 | const uint32_t * mask, |
michael@0 | 454 | int width) |
michael@0 | 455 | { |
michael@0 | 456 | if (mask) |
michael@0 | 457 | vmx_combine_in_u_mask (dest, src, mask, width); |
michael@0 | 458 | else |
michael@0 | 459 | vmx_combine_in_u_no_mask (dest, src, width); |
michael@0 | 460 | } |
michael@0 | 461 | |
michael@0 | 462 | static void |
michael@0 | 463 | vmx_combine_in_reverse_u_no_mask (uint32_t * dest, |
michael@0 | 464 | const uint32_t *src, |
michael@0 | 465 | int width) |
michael@0 | 466 | { |
michael@0 | 467 | int i; |
michael@0 | 468 | vector unsigned int vdest, vsrc; |
michael@0 | 469 | vector unsigned char tmp1, tmp2, tmp3, tmp4, edges, |
michael@0 | 470 | dest_mask, src_mask, store_mask; |
michael@0 | 471 | |
michael@0 | 472 | COMPUTE_SHIFT_MASKS (dest, src); |
michael@0 | 473 | |
michael@0 | 474 | /* printf ("%s\n",__PRETTY_FUNCTION__); */ |
michael@0 | 475 | for (i = width / 4; i > 0; i--) |
michael@0 | 476 | { |
michael@0 | 477 | LOAD_VECTORS (dest, src); |
michael@0 | 478 | |
michael@0 | 479 | vdest = pix_multiply (vdest, splat_alpha (vsrc)); |
michael@0 | 480 | |
michael@0 | 481 | STORE_VECTOR (dest); |
michael@0 | 482 | |
michael@0 | 483 | src += 4; |
michael@0 | 484 | dest += 4; |
michael@0 | 485 | } |
michael@0 | 486 | |
michael@0 | 487 | for (i = width % 4; --i >= 0;) |
michael@0 | 488 | { |
michael@0 | 489 | uint32_t d = dest[i]; |
michael@0 | 490 | uint32_t a = ALPHA_8 (src[i]); |
michael@0 | 491 | |
michael@0 | 492 | UN8x4_MUL_UN8 (d, a); |
michael@0 | 493 | |
michael@0 | 494 | dest[i] = d; |
michael@0 | 495 | } |
michael@0 | 496 | } |
michael@0 | 497 | |
michael@0 | 498 | static void |
michael@0 | 499 | vmx_combine_in_reverse_u_mask (uint32_t * dest, |
michael@0 | 500 | const uint32_t *src, |
michael@0 | 501 | const uint32_t *mask, |
michael@0 | 502 | int width) |
michael@0 | 503 | { |
michael@0 | 504 | int i; |
michael@0 | 505 | vector unsigned int vdest, vsrc, vmask; |
michael@0 | 506 | vector unsigned char tmp1, tmp2, tmp3, tmp4, edges, |
michael@0 | 507 | dest_mask, src_mask, mask_mask, store_mask; |
michael@0 | 508 | |
michael@0 | 509 | COMPUTE_SHIFT_MASKC (dest, src, mask); |
michael@0 | 510 | |
michael@0 | 511 | /* printf ("%s\n",__PRETTY_FUNCTION__); */ |
michael@0 | 512 | for (i = width / 4; i > 0; i--) |
michael@0 | 513 | { |
michael@0 | 514 | LOAD_VECTORSM (dest, src, mask); |
michael@0 | 515 | |
michael@0 | 516 | vdest = pix_multiply (vdest, splat_alpha (vsrc)); |
michael@0 | 517 | |
michael@0 | 518 | STORE_VECTOR (dest); |
michael@0 | 519 | |
michael@0 | 520 | src += 4; |
michael@0 | 521 | dest += 4; |
michael@0 | 522 | mask += 4; |
michael@0 | 523 | } |
michael@0 | 524 | |
michael@0 | 525 | for (i = width % 4; --i >= 0;) |
michael@0 | 526 | { |
michael@0 | 527 | uint32_t m = ALPHA_8 (mask[i]); |
michael@0 | 528 | uint32_t d = dest[i]; |
michael@0 | 529 | uint32_t a = src[i]; |
michael@0 | 530 | |
michael@0 | 531 | UN8x4_MUL_UN8 (a, m); |
michael@0 | 532 | a = ALPHA_8 (a); |
michael@0 | 533 | UN8x4_MUL_UN8 (d, a); |
michael@0 | 534 | |
michael@0 | 535 | dest[i] = d; |
michael@0 | 536 | } |
michael@0 | 537 | } |
michael@0 | 538 | |
michael@0 | 539 | static void |
michael@0 | 540 | vmx_combine_in_reverse_u (pixman_implementation_t *imp, |
michael@0 | 541 | pixman_op_t op, |
michael@0 | 542 | uint32_t * dest, |
michael@0 | 543 | const uint32_t * src, |
michael@0 | 544 | const uint32_t * mask, |
michael@0 | 545 | int width) |
michael@0 | 546 | { |
michael@0 | 547 | if (mask) |
michael@0 | 548 | vmx_combine_in_reverse_u_mask (dest, src, mask, width); |
michael@0 | 549 | else |
michael@0 | 550 | vmx_combine_in_reverse_u_no_mask (dest, src, width); |
michael@0 | 551 | } |
michael@0 | 552 | |
michael@0 | 553 | static void |
michael@0 | 554 | vmx_combine_out_u_no_mask (uint32_t * dest, |
michael@0 | 555 | const uint32_t *src, |
michael@0 | 556 | int width) |
michael@0 | 557 | { |
michael@0 | 558 | int i; |
michael@0 | 559 | vector unsigned int vdest, vsrc; |
michael@0 | 560 | vector unsigned char tmp1, tmp2, tmp3, tmp4, edges, |
michael@0 | 561 | dest_mask, src_mask, store_mask; |
michael@0 | 562 | |
michael@0 | 563 | COMPUTE_SHIFT_MASKS (dest, src); |
michael@0 | 564 | |
michael@0 | 565 | /* printf ("%s\n",__PRETTY_FUNCTION__); */ |
michael@0 | 566 | for (i = width / 4; i > 0; i--) |
michael@0 | 567 | { |
michael@0 | 568 | LOAD_VECTORS (dest, src); |
michael@0 | 569 | |
michael@0 | 570 | vdest = pix_multiply (vsrc, splat_alpha (negate (vdest))); |
michael@0 | 571 | |
michael@0 | 572 | STORE_VECTOR (dest); |
michael@0 | 573 | |
michael@0 | 574 | src += 4; |
michael@0 | 575 | dest += 4; |
michael@0 | 576 | } |
michael@0 | 577 | |
michael@0 | 578 | for (i = width % 4; --i >= 0;) |
michael@0 | 579 | { |
michael@0 | 580 | uint32_t s = src[i]; |
michael@0 | 581 | uint32_t a = ALPHA_8 (~dest[i]); |
michael@0 | 582 | |
michael@0 | 583 | UN8x4_MUL_UN8 (s, a); |
michael@0 | 584 | |
michael@0 | 585 | dest[i] = s; |
michael@0 | 586 | } |
michael@0 | 587 | } |
michael@0 | 588 | |
michael@0 | 589 | static void |
michael@0 | 590 | vmx_combine_out_u_mask (uint32_t * dest, |
michael@0 | 591 | const uint32_t *src, |
michael@0 | 592 | const uint32_t *mask, |
michael@0 | 593 | int width) |
michael@0 | 594 | { |
michael@0 | 595 | int i; |
michael@0 | 596 | vector unsigned int vdest, vsrc, vmask; |
michael@0 | 597 | vector unsigned char tmp1, tmp2, tmp3, tmp4, edges, |
michael@0 | 598 | dest_mask, src_mask, mask_mask, store_mask; |
michael@0 | 599 | |
michael@0 | 600 | COMPUTE_SHIFT_MASKC (dest, src, mask); |
michael@0 | 601 | |
michael@0 | 602 | /* printf ("%s\n",__PRETTY_FUNCTION__); */ |
michael@0 | 603 | for (i = width / 4; i > 0; i--) |
michael@0 | 604 | { |
michael@0 | 605 | LOAD_VECTORSM (dest, src, mask); |
michael@0 | 606 | |
michael@0 | 607 | vdest = pix_multiply (vsrc, splat_alpha (negate (vdest))); |
michael@0 | 608 | |
michael@0 | 609 | STORE_VECTOR (dest); |
michael@0 | 610 | |
michael@0 | 611 | src += 4; |
michael@0 | 612 | dest += 4; |
michael@0 | 613 | mask += 4; |
michael@0 | 614 | } |
michael@0 | 615 | |
michael@0 | 616 | for (i = width % 4; --i >= 0;) |
michael@0 | 617 | { |
michael@0 | 618 | uint32_t m = ALPHA_8 (mask[i]); |
michael@0 | 619 | uint32_t s = src[i]; |
michael@0 | 620 | uint32_t a = ALPHA_8 (~dest[i]); |
michael@0 | 621 | |
michael@0 | 622 | UN8x4_MUL_UN8 (s, m); |
michael@0 | 623 | UN8x4_MUL_UN8 (s, a); |
michael@0 | 624 | |
michael@0 | 625 | dest[i] = s; |
michael@0 | 626 | } |
michael@0 | 627 | } |
michael@0 | 628 | |
michael@0 | 629 | static void |
michael@0 | 630 | vmx_combine_out_u (pixman_implementation_t *imp, |
michael@0 | 631 | pixman_op_t op, |
michael@0 | 632 | uint32_t * dest, |
michael@0 | 633 | const uint32_t * src, |
michael@0 | 634 | const uint32_t * mask, |
michael@0 | 635 | int width) |
michael@0 | 636 | { |
michael@0 | 637 | if (mask) |
michael@0 | 638 | vmx_combine_out_u_mask (dest, src, mask, width); |
michael@0 | 639 | else |
michael@0 | 640 | vmx_combine_out_u_no_mask (dest, src, width); |
michael@0 | 641 | } |
michael@0 | 642 | |
michael@0 | 643 | static void |
michael@0 | 644 | vmx_combine_out_reverse_u_no_mask (uint32_t * dest, |
michael@0 | 645 | const uint32_t *src, |
michael@0 | 646 | int width) |
michael@0 | 647 | { |
michael@0 | 648 | int i; |
michael@0 | 649 | vector unsigned int vdest, vsrc; |
michael@0 | 650 | vector unsigned char tmp1, tmp2, tmp3, tmp4, edges, |
michael@0 | 651 | dest_mask, src_mask, store_mask; |
michael@0 | 652 | |
michael@0 | 653 | COMPUTE_SHIFT_MASKS (dest, src); |
michael@0 | 654 | |
michael@0 | 655 | /* printf ("%s\n",__PRETTY_FUNCTION__); */ |
michael@0 | 656 | for (i = width / 4; i > 0; i--) |
michael@0 | 657 | { |
michael@0 | 658 | |
michael@0 | 659 | LOAD_VECTORS (dest, src); |
michael@0 | 660 | |
michael@0 | 661 | vdest = pix_multiply (vdest, splat_alpha (negate (vsrc))); |
michael@0 | 662 | |
michael@0 | 663 | STORE_VECTOR (dest); |
michael@0 | 664 | |
michael@0 | 665 | src += 4; |
michael@0 | 666 | dest += 4; |
michael@0 | 667 | } |
michael@0 | 668 | |
michael@0 | 669 | for (i = width % 4; --i >= 0;) |
michael@0 | 670 | { |
michael@0 | 671 | uint32_t d = dest[i]; |
michael@0 | 672 | uint32_t a = ALPHA_8 (~src[i]); |
michael@0 | 673 | |
michael@0 | 674 | UN8x4_MUL_UN8 (d, a); |
michael@0 | 675 | |
michael@0 | 676 | dest[i] = d; |
michael@0 | 677 | } |
michael@0 | 678 | } |
michael@0 | 679 | |
michael@0 | 680 | static void |
michael@0 | 681 | vmx_combine_out_reverse_u_mask (uint32_t * dest, |
michael@0 | 682 | const uint32_t *src, |
michael@0 | 683 | const uint32_t *mask, |
michael@0 | 684 | int width) |
michael@0 | 685 | { |
michael@0 | 686 | int i; |
michael@0 | 687 | vector unsigned int vdest, vsrc, vmask; |
michael@0 | 688 | vector unsigned char tmp1, tmp2, tmp3, tmp4, edges, |
michael@0 | 689 | dest_mask, src_mask, mask_mask, store_mask; |
michael@0 | 690 | |
michael@0 | 691 | COMPUTE_SHIFT_MASKC (dest, src, mask); |
michael@0 | 692 | |
michael@0 | 693 | /* printf ("%s\n",__PRETTY_FUNCTION__); */ |
michael@0 | 694 | for (i = width / 4; i > 0; i--) |
michael@0 | 695 | { |
michael@0 | 696 | LOAD_VECTORSM (dest, src, mask); |
michael@0 | 697 | |
michael@0 | 698 | vdest = pix_multiply (vdest, splat_alpha (negate (vsrc))); |
michael@0 | 699 | |
michael@0 | 700 | STORE_VECTOR (dest); |
michael@0 | 701 | |
michael@0 | 702 | src += 4; |
michael@0 | 703 | dest += 4; |
michael@0 | 704 | mask += 4; |
michael@0 | 705 | } |
michael@0 | 706 | |
michael@0 | 707 | for (i = width % 4; --i >= 0;) |
michael@0 | 708 | { |
michael@0 | 709 | uint32_t m = ALPHA_8 (mask[i]); |
michael@0 | 710 | uint32_t d = dest[i]; |
michael@0 | 711 | uint32_t a = src[i]; |
michael@0 | 712 | |
michael@0 | 713 | UN8x4_MUL_UN8 (a, m); |
michael@0 | 714 | a = ALPHA_8 (~a); |
michael@0 | 715 | UN8x4_MUL_UN8 (d, a); |
michael@0 | 716 | |
michael@0 | 717 | dest[i] = d; |
michael@0 | 718 | } |
michael@0 | 719 | } |
michael@0 | 720 | |
michael@0 | 721 | static void |
michael@0 | 722 | vmx_combine_out_reverse_u (pixman_implementation_t *imp, |
michael@0 | 723 | pixman_op_t op, |
michael@0 | 724 | uint32_t * dest, |
michael@0 | 725 | const uint32_t * src, |
michael@0 | 726 | const uint32_t * mask, |
michael@0 | 727 | int width) |
michael@0 | 728 | { |
michael@0 | 729 | if (mask) |
michael@0 | 730 | vmx_combine_out_reverse_u_mask (dest, src, mask, width); |
michael@0 | 731 | else |
michael@0 | 732 | vmx_combine_out_reverse_u_no_mask (dest, src, width); |
michael@0 | 733 | } |
michael@0 | 734 | |
michael@0 | 735 | static void |
michael@0 | 736 | vmx_combine_atop_u_no_mask (uint32_t * dest, |
michael@0 | 737 | const uint32_t *src, |
michael@0 | 738 | int width) |
michael@0 | 739 | { |
michael@0 | 740 | int i; |
michael@0 | 741 | vector unsigned int vdest, vsrc; |
michael@0 | 742 | vector unsigned char tmp1, tmp2, tmp3, tmp4, edges, |
michael@0 | 743 | dest_mask, src_mask, store_mask; |
michael@0 | 744 | |
michael@0 | 745 | COMPUTE_SHIFT_MASKS (dest, src); |
michael@0 | 746 | |
michael@0 | 747 | /* printf ("%s\n",__PRETTY_FUNCTION__); */ |
michael@0 | 748 | for (i = width / 4; i > 0; i--) |
michael@0 | 749 | { |
michael@0 | 750 | LOAD_VECTORS (dest, src); |
michael@0 | 751 | |
michael@0 | 752 | vdest = pix_add_mul (vsrc, splat_alpha (vdest), |
michael@0 | 753 | vdest, splat_alpha (negate (vsrc))); |
michael@0 | 754 | |
michael@0 | 755 | STORE_VECTOR (dest); |
michael@0 | 756 | |
michael@0 | 757 | src += 4; |
michael@0 | 758 | dest += 4; |
michael@0 | 759 | } |
michael@0 | 760 | |
michael@0 | 761 | for (i = width % 4; --i >= 0;) |
michael@0 | 762 | { |
michael@0 | 763 | uint32_t s = src[i]; |
michael@0 | 764 | uint32_t d = dest[i]; |
michael@0 | 765 | uint32_t dest_a = ALPHA_8 (d); |
michael@0 | 766 | uint32_t src_ia = ALPHA_8 (~s); |
michael@0 | 767 | |
michael@0 | 768 | UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_a, d, src_ia); |
michael@0 | 769 | |
michael@0 | 770 | dest[i] = s; |
michael@0 | 771 | } |
michael@0 | 772 | } |
michael@0 | 773 | |
michael@0 | 774 | static void |
michael@0 | 775 | vmx_combine_atop_u_mask (uint32_t * dest, |
michael@0 | 776 | const uint32_t *src, |
michael@0 | 777 | const uint32_t *mask, |
michael@0 | 778 | int width) |
michael@0 | 779 | { |
michael@0 | 780 | int i; |
michael@0 | 781 | vector unsigned int vdest, vsrc, vmask; |
michael@0 | 782 | vector unsigned char tmp1, tmp2, tmp3, tmp4, edges, |
michael@0 | 783 | dest_mask, src_mask, mask_mask, store_mask; |
michael@0 | 784 | |
michael@0 | 785 | COMPUTE_SHIFT_MASKC (dest, src, mask); |
michael@0 | 786 | |
michael@0 | 787 | /* printf ("%s\n",__PRETTY_FUNCTION__); */ |
michael@0 | 788 | for (i = width / 4; i > 0; i--) |
michael@0 | 789 | { |
michael@0 | 790 | LOAD_VECTORSM (dest, src, mask); |
michael@0 | 791 | |
michael@0 | 792 | vdest = pix_add_mul (vsrc, splat_alpha (vdest), |
michael@0 | 793 | vdest, splat_alpha (negate (vsrc))); |
michael@0 | 794 | |
michael@0 | 795 | STORE_VECTOR (dest); |
michael@0 | 796 | |
michael@0 | 797 | src += 4; |
michael@0 | 798 | dest += 4; |
michael@0 | 799 | mask += 4; |
michael@0 | 800 | } |
michael@0 | 801 | |
michael@0 | 802 | for (i = width % 4; --i >= 0;) |
michael@0 | 803 | { |
michael@0 | 804 | uint32_t m = ALPHA_8 (mask[i]); |
michael@0 | 805 | uint32_t s = src[i]; |
michael@0 | 806 | uint32_t d = dest[i]; |
michael@0 | 807 | uint32_t dest_a = ALPHA_8 (d); |
michael@0 | 808 | uint32_t src_ia; |
michael@0 | 809 | |
michael@0 | 810 | UN8x4_MUL_UN8 (s, m); |
michael@0 | 811 | |
michael@0 | 812 | src_ia = ALPHA_8 (~s); |
michael@0 | 813 | |
michael@0 | 814 | UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_a, d, src_ia); |
michael@0 | 815 | |
michael@0 | 816 | dest[i] = s; |
michael@0 | 817 | } |
michael@0 | 818 | } |
michael@0 | 819 | |
michael@0 | 820 | static void |
michael@0 | 821 | vmx_combine_atop_u (pixman_implementation_t *imp, |
michael@0 | 822 | pixman_op_t op, |
michael@0 | 823 | uint32_t * dest, |
michael@0 | 824 | const uint32_t * src, |
michael@0 | 825 | const uint32_t * mask, |
michael@0 | 826 | int width) |
michael@0 | 827 | { |
michael@0 | 828 | if (mask) |
michael@0 | 829 | vmx_combine_atop_u_mask (dest, src, mask, width); |
michael@0 | 830 | else |
michael@0 | 831 | vmx_combine_atop_u_no_mask (dest, src, width); |
michael@0 | 832 | } |
michael@0 | 833 | |
michael@0 | 834 | static void |
michael@0 | 835 | vmx_combine_atop_reverse_u_no_mask (uint32_t * dest, |
michael@0 | 836 | const uint32_t *src, |
michael@0 | 837 | int width) |
michael@0 | 838 | { |
michael@0 | 839 | int i; |
michael@0 | 840 | vector unsigned int vdest, vsrc; |
michael@0 | 841 | vector unsigned char tmp1, tmp2, tmp3, tmp4, edges, |
michael@0 | 842 | dest_mask, src_mask, store_mask; |
michael@0 | 843 | |
michael@0 | 844 | COMPUTE_SHIFT_MASKS (dest, src); |
michael@0 | 845 | |
michael@0 | 846 | /* printf ("%s\n",__PRETTY_FUNCTION__); */ |
michael@0 | 847 | for (i = width / 4; i > 0; i--) |
michael@0 | 848 | { |
michael@0 | 849 | LOAD_VECTORS (dest, src); |
michael@0 | 850 | |
michael@0 | 851 | vdest = pix_add_mul (vdest, splat_alpha (vsrc), |
michael@0 | 852 | vsrc, splat_alpha (negate (vdest))); |
michael@0 | 853 | |
michael@0 | 854 | STORE_VECTOR (dest); |
michael@0 | 855 | |
michael@0 | 856 | src += 4; |
michael@0 | 857 | dest += 4; |
michael@0 | 858 | } |
michael@0 | 859 | |
michael@0 | 860 | for (i = width % 4; --i >= 0;) |
michael@0 | 861 | { |
michael@0 | 862 | uint32_t s = src[i]; |
michael@0 | 863 | uint32_t d = dest[i]; |
michael@0 | 864 | uint32_t src_a = ALPHA_8 (s); |
michael@0 | 865 | uint32_t dest_ia = ALPHA_8 (~d); |
michael@0 | 866 | |
michael@0 | 867 | UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_ia, d, src_a); |
michael@0 | 868 | |
michael@0 | 869 | dest[i] = s; |
michael@0 | 870 | } |
michael@0 | 871 | } |
michael@0 | 872 | |
michael@0 | 873 | static void |
michael@0 | 874 | vmx_combine_atop_reverse_u_mask (uint32_t * dest, |
michael@0 | 875 | const uint32_t *src, |
michael@0 | 876 | const uint32_t *mask, |
michael@0 | 877 | int width) |
michael@0 | 878 | { |
michael@0 | 879 | int i; |
michael@0 | 880 | vector unsigned int vdest, vsrc, vmask; |
michael@0 | 881 | vector unsigned char tmp1, tmp2, tmp3, tmp4, edges, |
michael@0 | 882 | dest_mask, src_mask, mask_mask, store_mask; |
michael@0 | 883 | |
michael@0 | 884 | COMPUTE_SHIFT_MASKC (dest, src, mask); |
michael@0 | 885 | |
michael@0 | 886 | /* printf ("%s\n",__PRETTY_FUNCTION__); */ |
michael@0 | 887 | for (i = width / 4; i > 0; i--) |
michael@0 | 888 | { |
michael@0 | 889 | LOAD_VECTORSM (dest, src, mask); |
michael@0 | 890 | |
michael@0 | 891 | vdest = pix_add_mul (vdest, splat_alpha (vsrc), |
michael@0 | 892 | vsrc, splat_alpha (negate (vdest))); |
michael@0 | 893 | |
michael@0 | 894 | STORE_VECTOR (dest); |
michael@0 | 895 | |
michael@0 | 896 | src += 4; |
michael@0 | 897 | dest += 4; |
michael@0 | 898 | mask += 4; |
michael@0 | 899 | } |
michael@0 | 900 | |
michael@0 | 901 | for (i = width % 4; --i >= 0;) |
michael@0 | 902 | { |
michael@0 | 903 | uint32_t m = ALPHA_8 (mask[i]); |
michael@0 | 904 | uint32_t s = src[i]; |
michael@0 | 905 | uint32_t d = dest[i]; |
michael@0 | 906 | uint32_t src_a; |
michael@0 | 907 | uint32_t dest_ia = ALPHA_8 (~d); |
michael@0 | 908 | |
michael@0 | 909 | UN8x4_MUL_UN8 (s, m); |
michael@0 | 910 | |
michael@0 | 911 | src_a = ALPHA_8 (s); |
michael@0 | 912 | |
michael@0 | 913 | UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_ia, d, src_a); |
michael@0 | 914 | |
michael@0 | 915 | dest[i] = s; |
michael@0 | 916 | } |
michael@0 | 917 | } |
michael@0 | 918 | |
michael@0 | 919 | static void |
michael@0 | 920 | vmx_combine_atop_reverse_u (pixman_implementation_t *imp, |
michael@0 | 921 | pixman_op_t op, |
michael@0 | 922 | uint32_t * dest, |
michael@0 | 923 | const uint32_t * src, |
michael@0 | 924 | const uint32_t * mask, |
michael@0 | 925 | int width) |
michael@0 | 926 | { |
michael@0 | 927 | if (mask) |
michael@0 | 928 | vmx_combine_atop_reverse_u_mask (dest, src, mask, width); |
michael@0 | 929 | else |
michael@0 | 930 | vmx_combine_atop_reverse_u_no_mask (dest, src, width); |
michael@0 | 931 | } |
michael@0 | 932 | |
michael@0 | 933 | static void |
michael@0 | 934 | vmx_combine_xor_u_no_mask (uint32_t * dest, |
michael@0 | 935 | const uint32_t *src, |
michael@0 | 936 | int width) |
michael@0 | 937 | { |
michael@0 | 938 | int i; |
michael@0 | 939 | vector unsigned int vdest, vsrc; |
michael@0 | 940 | vector unsigned char tmp1, tmp2, tmp3, tmp4, edges, |
michael@0 | 941 | dest_mask, src_mask, store_mask; |
michael@0 | 942 | |
michael@0 | 943 | COMPUTE_SHIFT_MASKS (dest, src); |
michael@0 | 944 | |
michael@0 | 945 | /* printf ("%s\n",__PRETTY_FUNCTION__); */ |
michael@0 | 946 | for (i = width / 4; i > 0; i--) |
michael@0 | 947 | { |
michael@0 | 948 | LOAD_VECTORS (dest, src); |
michael@0 | 949 | |
michael@0 | 950 | vdest = pix_add_mul (vsrc, splat_alpha (negate (vdest)), |
michael@0 | 951 | vdest, splat_alpha (negate (vsrc))); |
michael@0 | 952 | |
michael@0 | 953 | STORE_VECTOR (dest); |
michael@0 | 954 | |
michael@0 | 955 | src += 4; |
michael@0 | 956 | dest += 4; |
michael@0 | 957 | } |
michael@0 | 958 | |
michael@0 | 959 | for (i = width % 4; --i >= 0;) |
michael@0 | 960 | { |
michael@0 | 961 | uint32_t s = src[i]; |
michael@0 | 962 | uint32_t d = dest[i]; |
michael@0 | 963 | uint32_t src_ia = ALPHA_8 (~s); |
michael@0 | 964 | uint32_t dest_ia = ALPHA_8 (~d); |
michael@0 | 965 | |
michael@0 | 966 | UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_ia, d, src_ia); |
michael@0 | 967 | |
michael@0 | 968 | dest[i] = s; |
michael@0 | 969 | } |
michael@0 | 970 | } |
michael@0 | 971 | |
michael@0 | 972 | static void |
michael@0 | 973 | vmx_combine_xor_u_mask (uint32_t * dest, |
michael@0 | 974 | const uint32_t *src, |
michael@0 | 975 | const uint32_t *mask, |
michael@0 | 976 | int width) |
michael@0 | 977 | { |
michael@0 | 978 | int i; |
michael@0 | 979 | vector unsigned int vdest, vsrc, vmask; |
michael@0 | 980 | vector unsigned char tmp1, tmp2, tmp3, tmp4, edges, |
michael@0 | 981 | dest_mask, src_mask, mask_mask, store_mask; |
michael@0 | 982 | |
michael@0 | 983 | COMPUTE_SHIFT_MASKC (dest, src, mask); |
michael@0 | 984 | |
michael@0 | 985 | /* printf ("%s\n",__PRETTY_FUNCTION__); */ |
michael@0 | 986 | for (i = width / 4; i > 0; i--) |
michael@0 | 987 | { |
michael@0 | 988 | LOAD_VECTORSM (dest, src, mask); |
michael@0 | 989 | |
michael@0 | 990 | vdest = pix_add_mul (vsrc, splat_alpha (negate (vdest)), |
michael@0 | 991 | vdest, splat_alpha (negate (vsrc))); |
michael@0 | 992 | |
michael@0 | 993 | STORE_VECTOR (dest); |
michael@0 | 994 | |
michael@0 | 995 | src += 4; |
michael@0 | 996 | dest += 4; |
michael@0 | 997 | mask += 4; |
michael@0 | 998 | } |
michael@0 | 999 | |
michael@0 | 1000 | for (i = width % 4; --i >= 0;) |
michael@0 | 1001 | { |
michael@0 | 1002 | uint32_t m = ALPHA_8 (mask[i]); |
michael@0 | 1003 | uint32_t s = src[i]; |
michael@0 | 1004 | uint32_t d = dest[i]; |
michael@0 | 1005 | uint32_t src_ia; |
michael@0 | 1006 | uint32_t dest_ia = ALPHA_8 (~d); |
michael@0 | 1007 | |
michael@0 | 1008 | UN8x4_MUL_UN8 (s, m); |
michael@0 | 1009 | |
michael@0 | 1010 | src_ia = ALPHA_8 (~s); |
michael@0 | 1011 | |
michael@0 | 1012 | UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_ia, d, src_ia); |
michael@0 | 1013 | |
michael@0 | 1014 | dest[i] = s; |
michael@0 | 1015 | } |
michael@0 | 1016 | } |
michael@0 | 1017 | |
michael@0 | 1018 | static void |
michael@0 | 1019 | vmx_combine_xor_u (pixman_implementation_t *imp, |
michael@0 | 1020 | pixman_op_t op, |
michael@0 | 1021 | uint32_t * dest, |
michael@0 | 1022 | const uint32_t * src, |
michael@0 | 1023 | const uint32_t * mask, |
michael@0 | 1024 | int width) |
michael@0 | 1025 | { |
michael@0 | 1026 | if (mask) |
michael@0 | 1027 | vmx_combine_xor_u_mask (dest, src, mask, width); |
michael@0 | 1028 | else |
michael@0 | 1029 | vmx_combine_xor_u_no_mask (dest, src, width); |
michael@0 | 1030 | } |
michael@0 | 1031 | |
michael@0 | 1032 | static void |
michael@0 | 1033 | vmx_combine_add_u_no_mask (uint32_t * dest, |
michael@0 | 1034 | const uint32_t *src, |
michael@0 | 1035 | int width) |
michael@0 | 1036 | { |
michael@0 | 1037 | int i; |
michael@0 | 1038 | vector unsigned int vdest, vsrc; |
michael@0 | 1039 | vector unsigned char tmp1, tmp2, tmp3, tmp4, edges, |
michael@0 | 1040 | dest_mask, src_mask, store_mask; |
michael@0 | 1041 | |
michael@0 | 1042 | COMPUTE_SHIFT_MASKS (dest, src); |
michael@0 | 1043 | /* printf ("%s\n",__PRETTY_FUNCTION__); */ |
michael@0 | 1044 | for (i = width / 4; i > 0; i--) |
michael@0 | 1045 | { |
michael@0 | 1046 | LOAD_VECTORS (dest, src); |
michael@0 | 1047 | |
michael@0 | 1048 | vdest = pix_add (vsrc, vdest); |
michael@0 | 1049 | |
michael@0 | 1050 | STORE_VECTOR (dest); |
michael@0 | 1051 | |
michael@0 | 1052 | src += 4; |
michael@0 | 1053 | dest += 4; |
michael@0 | 1054 | } |
michael@0 | 1055 | |
michael@0 | 1056 | for (i = width % 4; --i >= 0;) |
michael@0 | 1057 | { |
michael@0 | 1058 | uint32_t s = src[i]; |
michael@0 | 1059 | uint32_t d = dest[i]; |
michael@0 | 1060 | |
michael@0 | 1061 | UN8x4_ADD_UN8x4 (d, s); |
michael@0 | 1062 | |
michael@0 | 1063 | dest[i] = d; |
michael@0 | 1064 | } |
michael@0 | 1065 | } |
michael@0 | 1066 | |
michael@0 | 1067 | static void |
michael@0 | 1068 | vmx_combine_add_u_mask (uint32_t * dest, |
michael@0 | 1069 | const uint32_t *src, |
michael@0 | 1070 | const uint32_t *mask, |
michael@0 | 1071 | int width) |
michael@0 | 1072 | { |
michael@0 | 1073 | int i; |
michael@0 | 1074 | vector unsigned int vdest, vsrc, vmask; |
michael@0 | 1075 | vector unsigned char tmp1, tmp2, tmp3, tmp4, edges, |
michael@0 | 1076 | dest_mask, src_mask, mask_mask, store_mask; |
michael@0 | 1077 | |
michael@0 | 1078 | COMPUTE_SHIFT_MASKC (dest, src, mask); |
michael@0 | 1079 | |
michael@0 | 1080 | /* printf ("%s\n",__PRETTY_FUNCTION__); */ |
michael@0 | 1081 | for (i = width / 4; i > 0; i--) |
michael@0 | 1082 | { |
michael@0 | 1083 | LOAD_VECTORSM (dest, src, mask); |
michael@0 | 1084 | |
michael@0 | 1085 | vdest = pix_add (vsrc, vdest); |
michael@0 | 1086 | |
michael@0 | 1087 | STORE_VECTOR (dest); |
michael@0 | 1088 | |
michael@0 | 1089 | src += 4; |
michael@0 | 1090 | dest += 4; |
michael@0 | 1091 | mask += 4; |
michael@0 | 1092 | } |
michael@0 | 1093 | |
michael@0 | 1094 | for (i = width % 4; --i >= 0;) |
michael@0 | 1095 | { |
michael@0 | 1096 | uint32_t m = ALPHA_8 (mask[i]); |
michael@0 | 1097 | uint32_t s = src[i]; |
michael@0 | 1098 | uint32_t d = dest[i]; |
michael@0 | 1099 | |
michael@0 | 1100 | UN8x4_MUL_UN8 (s, m); |
michael@0 | 1101 | UN8x4_ADD_UN8x4 (d, s); |
michael@0 | 1102 | |
michael@0 | 1103 | dest[i] = d; |
michael@0 | 1104 | } |
michael@0 | 1105 | } |
michael@0 | 1106 | |
michael@0 | 1107 | static void |
michael@0 | 1108 | vmx_combine_add_u (pixman_implementation_t *imp, |
michael@0 | 1109 | pixman_op_t op, |
michael@0 | 1110 | uint32_t * dest, |
michael@0 | 1111 | const uint32_t * src, |
michael@0 | 1112 | const uint32_t * mask, |
michael@0 | 1113 | int width) |
michael@0 | 1114 | { |
michael@0 | 1115 | if (mask) |
michael@0 | 1116 | vmx_combine_add_u_mask (dest, src, mask, width); |
michael@0 | 1117 | else |
michael@0 | 1118 | vmx_combine_add_u_no_mask (dest, src, width); |
michael@0 | 1119 | } |
michael@0 | 1120 | |
michael@0 | 1121 | static void |
michael@0 | 1122 | vmx_combine_src_ca (pixman_implementation_t *imp, |
michael@0 | 1123 | pixman_op_t op, |
michael@0 | 1124 | uint32_t * dest, |
michael@0 | 1125 | const uint32_t * src, |
michael@0 | 1126 | const uint32_t * mask, |
michael@0 | 1127 | int width) |
michael@0 | 1128 | { |
michael@0 | 1129 | int i; |
michael@0 | 1130 | vector unsigned int vdest, vsrc, vmask; |
michael@0 | 1131 | vector unsigned char tmp1, tmp2, tmp3, tmp4, edges, |
michael@0 | 1132 | dest_mask, mask_mask, src_mask, store_mask; |
michael@0 | 1133 | |
michael@0 | 1134 | COMPUTE_SHIFT_MASKC (dest, src, mask); |
michael@0 | 1135 | |
michael@0 | 1136 | /* printf ("%s\n",__PRETTY_FUNCTION__); */ |
michael@0 | 1137 | for (i = width / 4; i > 0; i--) |
michael@0 | 1138 | { |
michael@0 | 1139 | LOAD_VECTORSC (dest, src, mask); |
michael@0 | 1140 | |
michael@0 | 1141 | vdest = pix_multiply (vsrc, vmask); |
michael@0 | 1142 | |
michael@0 | 1143 | STORE_VECTOR (dest); |
michael@0 | 1144 | |
michael@0 | 1145 | mask += 4; |
michael@0 | 1146 | src += 4; |
michael@0 | 1147 | dest += 4; |
michael@0 | 1148 | } |
michael@0 | 1149 | |
michael@0 | 1150 | for (i = width % 4; --i >= 0;) |
michael@0 | 1151 | { |
michael@0 | 1152 | uint32_t a = mask[i]; |
michael@0 | 1153 | uint32_t s = src[i]; |
michael@0 | 1154 | |
michael@0 | 1155 | UN8x4_MUL_UN8x4 (s, a); |
michael@0 | 1156 | |
michael@0 | 1157 | dest[i] = s; |
michael@0 | 1158 | } |
michael@0 | 1159 | } |
michael@0 | 1160 | |
michael@0 | 1161 | static void |
michael@0 | 1162 | vmx_combine_over_ca (pixman_implementation_t *imp, |
michael@0 | 1163 | pixman_op_t op, |
michael@0 | 1164 | uint32_t * dest, |
michael@0 | 1165 | const uint32_t * src, |
michael@0 | 1166 | const uint32_t * mask, |
michael@0 | 1167 | int width) |
michael@0 | 1168 | { |
michael@0 | 1169 | int i; |
michael@0 | 1170 | vector unsigned int vdest, vsrc, vmask; |
michael@0 | 1171 | vector unsigned char tmp1, tmp2, tmp3, tmp4, edges, |
michael@0 | 1172 | dest_mask, mask_mask, src_mask, store_mask; |
michael@0 | 1173 | |
michael@0 | 1174 | COMPUTE_SHIFT_MASKC (dest, src, mask); |
michael@0 | 1175 | |
michael@0 | 1176 | /* printf ("%s\n",__PRETTY_FUNCTION__); */ |
michael@0 | 1177 | for (i = width / 4; i > 0; i--) |
michael@0 | 1178 | { |
michael@0 | 1179 | LOAD_VECTORSC (dest, src, mask); |
michael@0 | 1180 | |
michael@0 | 1181 | vdest = in_over (vsrc, splat_alpha (vsrc), vmask, vdest); |
michael@0 | 1182 | |
michael@0 | 1183 | STORE_VECTOR (dest); |
michael@0 | 1184 | |
michael@0 | 1185 | mask += 4; |
michael@0 | 1186 | src += 4; |
michael@0 | 1187 | dest += 4; |
michael@0 | 1188 | } |
michael@0 | 1189 | |
michael@0 | 1190 | for (i = width % 4; --i >= 0;) |
michael@0 | 1191 | { |
michael@0 | 1192 | uint32_t a = mask[i]; |
michael@0 | 1193 | uint32_t s = src[i]; |
michael@0 | 1194 | uint32_t d = dest[i]; |
michael@0 | 1195 | uint32_t sa = ALPHA_8 (s); |
michael@0 | 1196 | |
michael@0 | 1197 | UN8x4_MUL_UN8x4 (s, a); |
michael@0 | 1198 | UN8x4_MUL_UN8 (a, sa); |
michael@0 | 1199 | UN8x4_MUL_UN8x4_ADD_UN8x4 (d, ~a, s); |
michael@0 | 1200 | |
michael@0 | 1201 | dest[i] = d; |
michael@0 | 1202 | } |
michael@0 | 1203 | } |
michael@0 | 1204 | |
michael@0 | 1205 | static void |
michael@0 | 1206 | vmx_combine_over_reverse_ca (pixman_implementation_t *imp, |
michael@0 | 1207 | pixman_op_t op, |
michael@0 | 1208 | uint32_t * dest, |
michael@0 | 1209 | const uint32_t * src, |
michael@0 | 1210 | const uint32_t * mask, |
michael@0 | 1211 | int width) |
michael@0 | 1212 | { |
michael@0 | 1213 | int i; |
michael@0 | 1214 | vector unsigned int vdest, vsrc, vmask; |
michael@0 | 1215 | vector unsigned char tmp1, tmp2, tmp3, tmp4, edges, |
michael@0 | 1216 | dest_mask, mask_mask, src_mask, store_mask; |
michael@0 | 1217 | |
michael@0 | 1218 | COMPUTE_SHIFT_MASKC (dest, src, mask); |
michael@0 | 1219 | |
michael@0 | 1220 | /* printf("%s\n",__PRETTY_FUNCTION__); */ |
michael@0 | 1221 | for (i = width / 4; i > 0; i--) |
michael@0 | 1222 | { |
michael@0 | 1223 | LOAD_VECTORSC (dest, src, mask); |
michael@0 | 1224 | |
michael@0 | 1225 | vdest = over (vdest, splat_alpha (vdest), pix_multiply (vsrc, vmask)); |
michael@0 | 1226 | |
michael@0 | 1227 | STORE_VECTOR (dest); |
michael@0 | 1228 | |
michael@0 | 1229 | mask += 4; |
michael@0 | 1230 | src += 4; |
michael@0 | 1231 | dest += 4; |
michael@0 | 1232 | } |
michael@0 | 1233 | |
michael@0 | 1234 | for (i = width % 4; --i >= 0;) |
michael@0 | 1235 | { |
michael@0 | 1236 | uint32_t a = mask[i]; |
michael@0 | 1237 | uint32_t s = src[i]; |
michael@0 | 1238 | uint32_t d = dest[i]; |
michael@0 | 1239 | uint32_t ida = ALPHA_8 (~d); |
michael@0 | 1240 | |
michael@0 | 1241 | UN8x4_MUL_UN8x4 (s, a); |
michael@0 | 1242 | UN8x4_MUL_UN8_ADD_UN8x4 (s, ida, d); |
michael@0 | 1243 | |
michael@0 | 1244 | dest[i] = s; |
michael@0 | 1245 | } |
michael@0 | 1246 | } |
michael@0 | 1247 | |
michael@0 | 1248 | static void |
michael@0 | 1249 | vmx_combine_in_ca (pixman_implementation_t *imp, |
michael@0 | 1250 | pixman_op_t op, |
michael@0 | 1251 | uint32_t * dest, |
michael@0 | 1252 | const uint32_t * src, |
michael@0 | 1253 | const uint32_t * mask, |
michael@0 | 1254 | int width) |
michael@0 | 1255 | { |
michael@0 | 1256 | int i; |
michael@0 | 1257 | vector unsigned int vdest, vsrc, vmask; |
michael@0 | 1258 | vector unsigned char tmp1, tmp2, tmp3, tmp4, edges, |
michael@0 | 1259 | dest_mask, mask_mask, src_mask, store_mask; |
michael@0 | 1260 | |
michael@0 | 1261 | COMPUTE_SHIFT_MASKC (dest, src, mask); |
michael@0 | 1262 | |
michael@0 | 1263 | /* printf ("%s\n",__PRETTY_FUNCTION__); */ |
michael@0 | 1264 | for (i = width / 4; i > 0; i--) |
michael@0 | 1265 | { |
michael@0 | 1266 | LOAD_VECTORSC (dest, src, mask); |
michael@0 | 1267 | |
michael@0 | 1268 | vdest = pix_multiply (pix_multiply (vsrc, vmask), splat_alpha (vdest)); |
michael@0 | 1269 | |
michael@0 | 1270 | STORE_VECTOR (dest); |
michael@0 | 1271 | |
michael@0 | 1272 | src += 4; |
michael@0 | 1273 | dest += 4; |
michael@0 | 1274 | mask += 4; |
michael@0 | 1275 | } |
michael@0 | 1276 | |
michael@0 | 1277 | for (i = width % 4; --i >= 0;) |
michael@0 | 1278 | { |
michael@0 | 1279 | uint32_t a = mask[i]; |
michael@0 | 1280 | uint32_t s = src[i]; |
michael@0 | 1281 | uint32_t da = ALPHA_8 (dest[i]); |
michael@0 | 1282 | |
michael@0 | 1283 | UN8x4_MUL_UN8x4 (s, a); |
michael@0 | 1284 | UN8x4_MUL_UN8 (s, da); |
michael@0 | 1285 | |
michael@0 | 1286 | dest[i] = s; |
michael@0 | 1287 | } |
michael@0 | 1288 | } |
michael@0 | 1289 | |
michael@0 | 1290 | static void |
michael@0 | 1291 | vmx_combine_in_reverse_ca (pixman_implementation_t *imp, |
michael@0 | 1292 | pixman_op_t op, |
michael@0 | 1293 | uint32_t * dest, |
michael@0 | 1294 | const uint32_t * src, |
michael@0 | 1295 | const uint32_t * mask, |
michael@0 | 1296 | int width) |
michael@0 | 1297 | { |
michael@0 | 1298 | int i; |
michael@0 | 1299 | vector unsigned int vdest, vsrc, vmask; |
michael@0 | 1300 | vector unsigned char tmp1, tmp2, tmp3, tmp4, edges, |
michael@0 | 1301 | dest_mask, mask_mask, src_mask, store_mask; |
michael@0 | 1302 | |
michael@0 | 1303 | COMPUTE_SHIFT_MASKC (dest, src, mask); |
michael@0 | 1304 | |
michael@0 | 1305 | /* printf ("%s\n",__PRETTY_FUNCTION__); */ |
michael@0 | 1306 | for (i = width / 4; i > 0; i--) |
michael@0 | 1307 | { |
michael@0 | 1308 | |
michael@0 | 1309 | LOAD_VECTORSC (dest, src, mask); |
michael@0 | 1310 | |
michael@0 | 1311 | vdest = pix_multiply (vdest, pix_multiply (vmask, splat_alpha (vsrc))); |
michael@0 | 1312 | |
michael@0 | 1313 | STORE_VECTOR (dest); |
michael@0 | 1314 | |
michael@0 | 1315 | src += 4; |
michael@0 | 1316 | dest += 4; |
michael@0 | 1317 | mask += 4; |
michael@0 | 1318 | } |
michael@0 | 1319 | |
michael@0 | 1320 | for (i = width % 4; --i >= 0;) |
michael@0 | 1321 | { |
michael@0 | 1322 | uint32_t a = mask[i]; |
michael@0 | 1323 | uint32_t d = dest[i]; |
michael@0 | 1324 | uint32_t sa = ALPHA_8 (src[i]); |
michael@0 | 1325 | |
michael@0 | 1326 | UN8x4_MUL_UN8 (a, sa); |
michael@0 | 1327 | UN8x4_MUL_UN8x4 (d, a); |
michael@0 | 1328 | |
michael@0 | 1329 | dest[i] = d; |
michael@0 | 1330 | } |
michael@0 | 1331 | } |
michael@0 | 1332 | |
michael@0 | 1333 | static void |
michael@0 | 1334 | vmx_combine_out_ca (pixman_implementation_t *imp, |
michael@0 | 1335 | pixman_op_t op, |
michael@0 | 1336 | uint32_t * dest, |
michael@0 | 1337 | const uint32_t * src, |
michael@0 | 1338 | const uint32_t * mask, |
michael@0 | 1339 | int width) |
michael@0 | 1340 | { |
michael@0 | 1341 | int i; |
michael@0 | 1342 | vector unsigned int vdest, vsrc, vmask; |
michael@0 | 1343 | vector unsigned char tmp1, tmp2, tmp3, tmp4, edges, |
michael@0 | 1344 | dest_mask, mask_mask, src_mask, store_mask; |
michael@0 | 1345 | |
michael@0 | 1346 | COMPUTE_SHIFT_MASKC (dest, src, mask); |
michael@0 | 1347 | |
michael@0 | 1348 | /* printf ("%s\n",__PRETTY_FUNCTION__); */ |
michael@0 | 1349 | for (i = width / 4; i > 0; i--) |
michael@0 | 1350 | { |
michael@0 | 1351 | LOAD_VECTORSC (dest, src, mask); |
michael@0 | 1352 | |
michael@0 | 1353 | vdest = pix_multiply ( |
michael@0 | 1354 | pix_multiply (vsrc, vmask), splat_alpha (negate (vdest))); |
michael@0 | 1355 | |
michael@0 | 1356 | STORE_VECTOR (dest); |
michael@0 | 1357 | |
michael@0 | 1358 | src += 4; |
michael@0 | 1359 | dest += 4; |
michael@0 | 1360 | mask += 4; |
michael@0 | 1361 | } |
michael@0 | 1362 | |
michael@0 | 1363 | for (i = width % 4; --i >= 0;) |
michael@0 | 1364 | { |
michael@0 | 1365 | uint32_t a = mask[i]; |
michael@0 | 1366 | uint32_t s = src[i]; |
michael@0 | 1367 | uint32_t d = dest[i]; |
michael@0 | 1368 | uint32_t da = ALPHA_8 (~d); |
michael@0 | 1369 | |
michael@0 | 1370 | UN8x4_MUL_UN8x4 (s, a); |
michael@0 | 1371 | UN8x4_MUL_UN8 (s, da); |
michael@0 | 1372 | |
michael@0 | 1373 | dest[i] = s; |
michael@0 | 1374 | } |
michael@0 | 1375 | } |
michael@0 | 1376 | |
michael@0 | 1377 | static void |
michael@0 | 1378 | vmx_combine_out_reverse_ca (pixman_implementation_t *imp, |
michael@0 | 1379 | pixman_op_t op, |
michael@0 | 1380 | uint32_t * dest, |
michael@0 | 1381 | const uint32_t * src, |
michael@0 | 1382 | const uint32_t * mask, |
michael@0 | 1383 | int width) |
michael@0 | 1384 | { |
michael@0 | 1385 | int i; |
michael@0 | 1386 | vector unsigned int vdest, vsrc, vmask; |
michael@0 | 1387 | vector unsigned char tmp1, tmp2, tmp3, tmp4, edges, |
michael@0 | 1388 | dest_mask, mask_mask, src_mask, store_mask; |
michael@0 | 1389 | |
michael@0 | 1390 | COMPUTE_SHIFT_MASKC (dest, src, mask); |
michael@0 | 1391 | |
michael@0 | 1392 | /* printf ("%s\n",__PRETTY_FUNCTION__); */ |
michael@0 | 1393 | for (i = width / 4; i > 0; i--) |
michael@0 | 1394 | { |
michael@0 | 1395 | LOAD_VECTORSC (dest, src, mask); |
michael@0 | 1396 | |
michael@0 | 1397 | vdest = pix_multiply ( |
michael@0 | 1398 | vdest, negate (pix_multiply (vmask, splat_alpha (vsrc)))); |
michael@0 | 1399 | |
michael@0 | 1400 | STORE_VECTOR (dest); |
michael@0 | 1401 | |
michael@0 | 1402 | src += 4; |
michael@0 | 1403 | dest += 4; |
michael@0 | 1404 | mask += 4; |
michael@0 | 1405 | } |
michael@0 | 1406 | |
michael@0 | 1407 | for (i = width % 4; --i >= 0;) |
michael@0 | 1408 | { |
michael@0 | 1409 | uint32_t a = mask[i]; |
michael@0 | 1410 | uint32_t s = src[i]; |
michael@0 | 1411 | uint32_t d = dest[i]; |
michael@0 | 1412 | uint32_t sa = ALPHA_8 (s); |
michael@0 | 1413 | |
michael@0 | 1414 | UN8x4_MUL_UN8 (a, sa); |
michael@0 | 1415 | UN8x4_MUL_UN8x4 (d, ~a); |
michael@0 | 1416 | |
michael@0 | 1417 | dest[i] = d; |
michael@0 | 1418 | } |
michael@0 | 1419 | } |
michael@0 | 1420 | |
michael@0 | 1421 | static void |
michael@0 | 1422 | vmx_combine_atop_ca (pixman_implementation_t *imp, |
michael@0 | 1423 | pixman_op_t op, |
michael@0 | 1424 | uint32_t * dest, |
michael@0 | 1425 | const uint32_t * src, |
michael@0 | 1426 | const uint32_t * mask, |
michael@0 | 1427 | int width) |
michael@0 | 1428 | { |
michael@0 | 1429 | int i; |
michael@0 | 1430 | vector unsigned int vdest, vsrc, vmask, vsrca; |
michael@0 | 1431 | vector unsigned char tmp1, tmp2, tmp3, tmp4, edges, |
michael@0 | 1432 | dest_mask, mask_mask, src_mask, store_mask; |
michael@0 | 1433 | |
michael@0 | 1434 | COMPUTE_SHIFT_MASKC (dest, src, mask); |
michael@0 | 1435 | |
michael@0 | 1436 | /* printf ("%s\n",__PRETTY_FUNCTION__); */ |
michael@0 | 1437 | for (i = width / 4; i > 0; i--) |
michael@0 | 1438 | { |
michael@0 | 1439 | LOAD_VECTORSC (dest, src, mask); |
michael@0 | 1440 | |
michael@0 | 1441 | vsrca = splat_alpha (vsrc); |
michael@0 | 1442 | |
michael@0 | 1443 | vsrc = pix_multiply (vsrc, vmask); |
michael@0 | 1444 | vmask = pix_multiply (vmask, vsrca); |
michael@0 | 1445 | |
michael@0 | 1446 | vdest = pix_add_mul (vsrc, splat_alpha (vdest), |
michael@0 | 1447 | negate (vmask), vdest); |
michael@0 | 1448 | |
michael@0 | 1449 | STORE_VECTOR (dest); |
michael@0 | 1450 | |
michael@0 | 1451 | src += 4; |
michael@0 | 1452 | dest += 4; |
michael@0 | 1453 | mask += 4; |
michael@0 | 1454 | } |
michael@0 | 1455 | |
michael@0 | 1456 | for (i = width % 4; --i >= 0;) |
michael@0 | 1457 | { |
michael@0 | 1458 | uint32_t a = mask[i]; |
michael@0 | 1459 | uint32_t s = src[i]; |
michael@0 | 1460 | uint32_t d = dest[i]; |
michael@0 | 1461 | uint32_t sa = ALPHA_8 (s); |
michael@0 | 1462 | uint32_t da = ALPHA_8 (d); |
michael@0 | 1463 | |
michael@0 | 1464 | UN8x4_MUL_UN8x4 (s, a); |
michael@0 | 1465 | UN8x4_MUL_UN8 (a, sa); |
michael@0 | 1466 | UN8x4_MUL_UN8x4_ADD_UN8x4_MUL_UN8 (d, ~a, s, da); |
michael@0 | 1467 | |
michael@0 | 1468 | dest[i] = d; |
michael@0 | 1469 | } |
michael@0 | 1470 | } |
michael@0 | 1471 | |
michael@0 | 1472 | static void |
michael@0 | 1473 | vmx_combine_atop_reverse_ca (pixman_implementation_t *imp, |
michael@0 | 1474 | pixman_op_t op, |
michael@0 | 1475 | uint32_t * dest, |
michael@0 | 1476 | const uint32_t * src, |
michael@0 | 1477 | const uint32_t * mask, |
michael@0 | 1478 | int width) |
michael@0 | 1479 | { |
michael@0 | 1480 | int i; |
michael@0 | 1481 | vector unsigned int vdest, vsrc, vmask; |
michael@0 | 1482 | vector unsigned char tmp1, tmp2, tmp3, tmp4, edges, |
michael@0 | 1483 | dest_mask, mask_mask, src_mask, store_mask; |
michael@0 | 1484 | |
michael@0 | 1485 | COMPUTE_SHIFT_MASKC (dest, src, mask); |
michael@0 | 1486 | |
michael@0 | 1487 | /* printf ("%s\n",__PRETTY_FUNCTION__); */ |
michael@0 | 1488 | for (i = width / 4; i > 0; i--) |
michael@0 | 1489 | { |
michael@0 | 1490 | LOAD_VECTORSC (dest, src, mask); |
michael@0 | 1491 | |
michael@0 | 1492 | vdest = pix_add_mul (vdest, |
michael@0 | 1493 | pix_multiply (vmask, splat_alpha (vsrc)), |
michael@0 | 1494 | pix_multiply (vsrc, vmask), |
michael@0 | 1495 | negate (splat_alpha (vdest))); |
michael@0 | 1496 | |
michael@0 | 1497 | STORE_VECTOR (dest); |
michael@0 | 1498 | |
michael@0 | 1499 | src += 4; |
michael@0 | 1500 | dest += 4; |
michael@0 | 1501 | mask += 4; |
michael@0 | 1502 | } |
michael@0 | 1503 | |
michael@0 | 1504 | for (i = width % 4; --i >= 0;) |
michael@0 | 1505 | { |
michael@0 | 1506 | uint32_t a = mask[i]; |
michael@0 | 1507 | uint32_t s = src[i]; |
michael@0 | 1508 | uint32_t d = dest[i]; |
michael@0 | 1509 | uint32_t sa = ALPHA_8 (s); |
michael@0 | 1510 | uint32_t da = ALPHA_8 (~d); |
michael@0 | 1511 | |
michael@0 | 1512 | UN8x4_MUL_UN8x4 (s, a); |
michael@0 | 1513 | UN8x4_MUL_UN8 (a, sa); |
michael@0 | 1514 | UN8x4_MUL_UN8x4_ADD_UN8x4_MUL_UN8 (d, a, s, da); |
michael@0 | 1515 | |
michael@0 | 1516 | dest[i] = d; |
michael@0 | 1517 | } |
michael@0 | 1518 | } |
michael@0 | 1519 | |
michael@0 | 1520 | static void |
michael@0 | 1521 | vmx_combine_xor_ca (pixman_implementation_t *imp, |
michael@0 | 1522 | pixman_op_t op, |
michael@0 | 1523 | uint32_t * dest, |
michael@0 | 1524 | const uint32_t * src, |
michael@0 | 1525 | const uint32_t * mask, |
michael@0 | 1526 | int width) |
michael@0 | 1527 | { |
michael@0 | 1528 | int i; |
michael@0 | 1529 | vector unsigned int vdest, vsrc, vmask; |
michael@0 | 1530 | vector unsigned char tmp1, tmp2, tmp3, tmp4, edges, |
michael@0 | 1531 | dest_mask, mask_mask, src_mask, store_mask; |
michael@0 | 1532 | |
michael@0 | 1533 | COMPUTE_SHIFT_MASKC (dest, src, mask); |
michael@0 | 1534 | |
michael@0 | 1535 | /* printf ("%s\n",__PRETTY_FUNCTION__); */ |
michael@0 | 1536 | for (i = width / 4; i > 0; i--) |
michael@0 | 1537 | { |
michael@0 | 1538 | LOAD_VECTORSC (dest, src, mask); |
michael@0 | 1539 | |
michael@0 | 1540 | vdest = pix_add_mul (vdest, |
michael@0 | 1541 | negate (pix_multiply (vmask, splat_alpha (vsrc))), |
michael@0 | 1542 | pix_multiply (vsrc, vmask), |
michael@0 | 1543 | negate (splat_alpha (vdest))); |
michael@0 | 1544 | |
michael@0 | 1545 | STORE_VECTOR (dest); |
michael@0 | 1546 | |
michael@0 | 1547 | src += 4; |
michael@0 | 1548 | dest += 4; |
michael@0 | 1549 | mask += 4; |
michael@0 | 1550 | } |
michael@0 | 1551 | |
michael@0 | 1552 | for (i = width % 4; --i >= 0;) |
michael@0 | 1553 | { |
michael@0 | 1554 | uint32_t a = mask[i]; |
michael@0 | 1555 | uint32_t s = src[i]; |
michael@0 | 1556 | uint32_t d = dest[i]; |
michael@0 | 1557 | uint32_t sa = ALPHA_8 (s); |
michael@0 | 1558 | uint32_t da = ALPHA_8 (~d); |
michael@0 | 1559 | |
michael@0 | 1560 | UN8x4_MUL_UN8x4 (s, a); |
michael@0 | 1561 | UN8x4_MUL_UN8 (a, sa); |
michael@0 | 1562 | UN8x4_MUL_UN8x4_ADD_UN8x4_MUL_UN8 (d, ~a, s, da); |
michael@0 | 1563 | |
michael@0 | 1564 | dest[i] = d; |
michael@0 | 1565 | } |
michael@0 | 1566 | } |
michael@0 | 1567 | |
michael@0 | 1568 | static void |
michael@0 | 1569 | vmx_combine_add_ca (pixman_implementation_t *imp, |
michael@0 | 1570 | pixman_op_t op, |
michael@0 | 1571 | uint32_t * dest, |
michael@0 | 1572 | const uint32_t * src, |
michael@0 | 1573 | const uint32_t * mask, |
michael@0 | 1574 | int width) |
michael@0 | 1575 | { |
michael@0 | 1576 | int i; |
michael@0 | 1577 | vector unsigned int vdest, vsrc, vmask; |
michael@0 | 1578 | vector unsigned char tmp1, tmp2, tmp3, tmp4, edges, |
michael@0 | 1579 | dest_mask, mask_mask, src_mask, store_mask; |
michael@0 | 1580 | |
michael@0 | 1581 | COMPUTE_SHIFT_MASKC (dest, src, mask); |
michael@0 | 1582 | |
michael@0 | 1583 | /* printf ("%s\n",__PRETTY_FUNCTION__); */ |
michael@0 | 1584 | for (i = width / 4; i > 0; i--) |
michael@0 | 1585 | { |
michael@0 | 1586 | LOAD_VECTORSC (dest, src, mask); |
michael@0 | 1587 | |
michael@0 | 1588 | vdest = pix_add (pix_multiply (vsrc, vmask), vdest); |
michael@0 | 1589 | |
michael@0 | 1590 | STORE_VECTOR (dest); |
michael@0 | 1591 | |
michael@0 | 1592 | src += 4; |
michael@0 | 1593 | dest += 4; |
michael@0 | 1594 | mask += 4; |
michael@0 | 1595 | } |
michael@0 | 1596 | |
michael@0 | 1597 | for (i = width % 4; --i >= 0;) |
michael@0 | 1598 | { |
michael@0 | 1599 | uint32_t a = mask[i]; |
michael@0 | 1600 | uint32_t s = src[i]; |
michael@0 | 1601 | uint32_t d = dest[i]; |
michael@0 | 1602 | |
michael@0 | 1603 | UN8x4_MUL_UN8x4 (s, a); |
michael@0 | 1604 | UN8x4_ADD_UN8x4 (s, d); |
michael@0 | 1605 | |
michael@0 | 1606 | dest[i] = s; |
michael@0 | 1607 | } |
michael@0 | 1608 | } |
michael@0 | 1609 | |
michael@0 | 1610 | static const pixman_fast_path_t vmx_fast_paths[] = |
michael@0 | 1611 | { |
michael@0 | 1612 | { PIXMAN_OP_NONE }, |
michael@0 | 1613 | }; |
michael@0 | 1614 | |
michael@0 | 1615 | pixman_implementation_t * |
michael@0 | 1616 | _pixman_implementation_create_vmx (pixman_implementation_t *fallback) |
michael@0 | 1617 | { |
michael@0 | 1618 | pixman_implementation_t *imp = _pixman_implementation_create (fallback, vmx_fast_paths); |
michael@0 | 1619 | |
michael@0 | 1620 | /* Set up function pointers */ |
michael@0 | 1621 | |
michael@0 | 1622 | imp->combine_32[PIXMAN_OP_OVER] = vmx_combine_over_u; |
michael@0 | 1623 | imp->combine_32[PIXMAN_OP_OVER_REVERSE] = vmx_combine_over_reverse_u; |
michael@0 | 1624 | imp->combine_32[PIXMAN_OP_IN] = vmx_combine_in_u; |
michael@0 | 1625 | imp->combine_32[PIXMAN_OP_IN_REVERSE] = vmx_combine_in_reverse_u; |
michael@0 | 1626 | imp->combine_32[PIXMAN_OP_OUT] = vmx_combine_out_u; |
michael@0 | 1627 | imp->combine_32[PIXMAN_OP_OUT_REVERSE] = vmx_combine_out_reverse_u; |
michael@0 | 1628 | imp->combine_32[PIXMAN_OP_ATOP] = vmx_combine_atop_u; |
michael@0 | 1629 | imp->combine_32[PIXMAN_OP_ATOP_REVERSE] = vmx_combine_atop_reverse_u; |
michael@0 | 1630 | imp->combine_32[PIXMAN_OP_XOR] = vmx_combine_xor_u; |
michael@0 | 1631 | |
michael@0 | 1632 | imp->combine_32[PIXMAN_OP_ADD] = vmx_combine_add_u; |
michael@0 | 1633 | |
michael@0 | 1634 | imp->combine_32_ca[PIXMAN_OP_SRC] = vmx_combine_src_ca; |
michael@0 | 1635 | imp->combine_32_ca[PIXMAN_OP_OVER] = vmx_combine_over_ca; |
michael@0 | 1636 | imp->combine_32_ca[PIXMAN_OP_OVER_REVERSE] = vmx_combine_over_reverse_ca; |
michael@0 | 1637 | imp->combine_32_ca[PIXMAN_OP_IN] = vmx_combine_in_ca; |
michael@0 | 1638 | imp->combine_32_ca[PIXMAN_OP_IN_REVERSE] = vmx_combine_in_reverse_ca; |
michael@0 | 1639 | imp->combine_32_ca[PIXMAN_OP_OUT] = vmx_combine_out_ca; |
michael@0 | 1640 | imp->combine_32_ca[PIXMAN_OP_OUT_REVERSE] = vmx_combine_out_reverse_ca; |
michael@0 | 1641 | imp->combine_32_ca[PIXMAN_OP_ATOP] = vmx_combine_atop_ca; |
michael@0 | 1642 | imp->combine_32_ca[PIXMAN_OP_ATOP_REVERSE] = vmx_combine_atop_reverse_ca; |
michael@0 | 1643 | imp->combine_32_ca[PIXMAN_OP_XOR] = vmx_combine_xor_ca; |
michael@0 | 1644 | imp->combine_32_ca[PIXMAN_OP_ADD] = vmx_combine_add_ca; |
michael@0 | 1645 | |
michael@0 | 1646 | return imp; |
michael@0 | 1647 | } |