gfx/cairo/libpixman/src/pixman-combine32.h

Thu, 22 Jan 2015 13:21:57 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Thu, 22 Jan 2015 13:21:57 +0100
branch
TOR_BUG_9701
changeset 15
b8a032363ba2
permissions
-rw-r--r--

Incorporate requested changes from Mozilla in review:
https://bugzilla.mozilla.org/show_bug.cgi?id=1123480#c6

michael@0 1 #define COMPONENT_SIZE 8
michael@0 2 #define MASK 0xff
michael@0 3 #define ONE_HALF 0x80
michael@0 4
michael@0 5 #define A_SHIFT 8 * 3
michael@0 6 #define R_SHIFT 8 * 2
michael@0 7 #define G_SHIFT 8
michael@0 8 #define A_MASK 0xff000000
michael@0 9 #define R_MASK 0xff0000
michael@0 10 #define G_MASK 0xff00
michael@0 11
michael@0 12 #define RB_MASK 0xff00ff
michael@0 13 #define AG_MASK 0xff00ff00
michael@0 14 #define RB_ONE_HALF 0x800080
michael@0 15 #define RB_MASK_PLUS_ONE 0x10000100
michael@0 16
michael@0 17 #define ALPHA_8(x) ((x) >> A_SHIFT)
michael@0 18 #define RED_8(x) (((x) >> R_SHIFT) & MASK)
michael@0 19 #define GREEN_8(x) (((x) >> G_SHIFT) & MASK)
michael@0 20 #define BLUE_8(x) ((x) & MASK)
michael@0 21
michael@0 22 /*
michael@0 23 * ARMv6 has UQADD8 instruction, which implements unsigned saturated
michael@0 24 * addition for 8-bit values packed in 32-bit registers. It is very useful
michael@0 25 * for UN8x4_ADD_UN8x4, UN8_rb_ADD_UN8_rb and ADD_UN8 macros (which would
michael@0 26 * otherwise need a lot of arithmetic operations to simulate this operation).
michael@0 27 * Since most of the major ARM linux distros are built for ARMv7, we are
michael@0 28 * much less dependent on runtime CPU detection and can get practical
michael@0 29 * benefits from conditional compilation here for a lot of users.
michael@0 30 */
michael@0 31
michael@0 32 #if defined(USE_GCC_INLINE_ASM) && defined(__arm__) && \
michael@0 33 !defined(__aarch64__) && (!defined(__thumb__) || defined(__thumb2__))
michael@0 34 #if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \
michael@0 35 defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \
michael@0 36 defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) || \
michael@0 37 defined(__ARM_ARCH_6M__) || defined(__ARM_ARCH_7__) || \
michael@0 38 defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || \
michael@0 39 defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7EM__)
michael@0 40
michael@0 41 static force_inline uint32_t
michael@0 42 un8x4_add_un8x4 (uint32_t x, uint32_t y)
michael@0 43 {
michael@0 44 uint32_t t;
michael@0 45 asm ("uqadd8 %0, %1, %2" : "=r" (t) : "%r" (x), "r" (y));
michael@0 46 return t;
michael@0 47 }
michael@0 48
michael@0 49 #define UN8x4_ADD_UN8x4(x, y) \
michael@0 50 ((x) = un8x4_add_un8x4 ((x), (y)))
michael@0 51
michael@0 52 #define UN8_rb_ADD_UN8_rb(x, y, t) \
michael@0 53 ((t) = un8x4_add_un8x4 ((x), (y)), (x) = (t))
michael@0 54
michael@0 55 #define ADD_UN8(x, y, t) \
michael@0 56 ((t) = (x), un8x4_add_un8x4 ((t), (y)))
michael@0 57
michael@0 58 #endif
michael@0 59 #endif
michael@0 60
michael@0 61 /*****************************************************************************/
michael@0 62
michael@0 63 /*
michael@0 64 * Helper macros.
michael@0 65 */
michael@0 66
michael@0 67 #define MUL_UN8(a, b, t) \
michael@0 68 ((t) = (a) * (uint16_t)(b) + ONE_HALF, ((((t) >> G_SHIFT ) + (t) ) >> G_SHIFT ))
michael@0 69
michael@0 70 #define DIV_UN8(a, b) \
michael@0 71 (((uint16_t) (a) * MASK + ((b) / 2)) / (b))
michael@0 72
michael@0 73 #ifndef ADD_UN8
michael@0 74 #define ADD_UN8(x, y, t) \
michael@0 75 ((t) = (x) + (y), \
michael@0 76 (uint32_t) (uint8_t) ((t) | (0 - ((t) >> G_SHIFT))))
michael@0 77 #endif
michael@0 78
michael@0 79 #define DIV_ONE_UN8(x) \
michael@0 80 (((x) + ONE_HALF + (((x) + ONE_HALF) >> G_SHIFT)) >> G_SHIFT)
michael@0 81
michael@0 82 /*
michael@0 83 * The methods below use some tricks to be able to do two color
michael@0 84 * components at the same time.
michael@0 85 */
michael@0 86
michael@0 87 /*
michael@0 88 * x_rb = (x_rb * a) / 255
michael@0 89 */
michael@0 90 #define UN8_rb_MUL_UN8(x, a, t) \
michael@0 91 do \
michael@0 92 { \
michael@0 93 t = ((x) & RB_MASK) * (a); \
michael@0 94 t += RB_ONE_HALF; \
michael@0 95 x = (t + ((t >> G_SHIFT) & RB_MASK)) >> G_SHIFT; \
michael@0 96 x &= RB_MASK; \
michael@0 97 } while (0)
michael@0 98
michael@0 99 /*
michael@0 100 * x_rb = min (x_rb + y_rb, 255)
michael@0 101 */
michael@0 102 #ifndef UN8_rb_ADD_UN8_rb
michael@0 103 #define UN8_rb_ADD_UN8_rb(x, y, t) \
michael@0 104 do \
michael@0 105 { \
michael@0 106 t = ((x) + (y)); \
michael@0 107 t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK); \
michael@0 108 x = (t & RB_MASK); \
michael@0 109 } while (0)
michael@0 110 #endif
michael@0 111
michael@0 112 /*
michael@0 113 * x_rb = (x_rb * a_rb) / 255
michael@0 114 */
michael@0 115 #define UN8_rb_MUL_UN8_rb(x, a, t) \
michael@0 116 do \
michael@0 117 { \
michael@0 118 t = (x & MASK) * (a & MASK); \
michael@0 119 t |= (x & R_MASK) * ((a >> R_SHIFT) & MASK); \
michael@0 120 t += RB_ONE_HALF; \
michael@0 121 t = (t + ((t >> G_SHIFT) & RB_MASK)) >> G_SHIFT; \
michael@0 122 x = t & RB_MASK; \
michael@0 123 } while (0)
michael@0 124
michael@0 125 /*
michael@0 126 * x_c = (x_c * a) / 255
michael@0 127 */
michael@0 128 #define UN8x4_MUL_UN8(x, a) \
michael@0 129 do \
michael@0 130 { \
michael@0 131 uint32_t r1__, r2__, t__; \
michael@0 132 \
michael@0 133 r1__ = (x); \
michael@0 134 UN8_rb_MUL_UN8 (r1__, (a), t__); \
michael@0 135 \
michael@0 136 r2__ = (x) >> G_SHIFT; \
michael@0 137 UN8_rb_MUL_UN8 (r2__, (a), t__); \
michael@0 138 \
michael@0 139 (x) = r1__ | (r2__ << G_SHIFT); \
michael@0 140 } while (0)
michael@0 141
michael@0 142 /*
michael@0 143 * x_c = (x_c * a) / 255 + y_c
michael@0 144 */
michael@0 145 #define UN8x4_MUL_UN8_ADD_UN8x4(x, a, y) \
michael@0 146 do \
michael@0 147 { \
michael@0 148 uint32_t r1__, r2__, r3__, t__; \
michael@0 149 \
michael@0 150 r1__ = (x); \
michael@0 151 r2__ = (y) & RB_MASK; \
michael@0 152 UN8_rb_MUL_UN8 (r1__, (a), t__); \
michael@0 153 UN8_rb_ADD_UN8_rb (r1__, r2__, t__); \
michael@0 154 \
michael@0 155 r2__ = (x) >> G_SHIFT; \
michael@0 156 r3__ = ((y) >> G_SHIFT) & RB_MASK; \
michael@0 157 UN8_rb_MUL_UN8 (r2__, (a), t__); \
michael@0 158 UN8_rb_ADD_UN8_rb (r2__, r3__, t__); \
michael@0 159 \
michael@0 160 (x) = r1__ | (r2__ << G_SHIFT); \
michael@0 161 } while (0)
michael@0 162
michael@0 163 /*
michael@0 164 * x_c = (x_c * a + y_c * b) / 255
michael@0 165 */
michael@0 166 #define UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8(x, a, y, b) \
michael@0 167 do \
michael@0 168 { \
michael@0 169 uint32_t r1__, r2__, r3__, t__; \
michael@0 170 \
michael@0 171 r1__ = (x); \
michael@0 172 r2__ = (y); \
michael@0 173 UN8_rb_MUL_UN8 (r1__, (a), t__); \
michael@0 174 UN8_rb_MUL_UN8 (r2__, (b), t__); \
michael@0 175 UN8_rb_ADD_UN8_rb (r1__, r2__, t__); \
michael@0 176 \
michael@0 177 r2__ = ((x) >> G_SHIFT); \
michael@0 178 r3__ = ((y) >> G_SHIFT); \
michael@0 179 UN8_rb_MUL_UN8 (r2__, (a), t__); \
michael@0 180 UN8_rb_MUL_UN8 (r3__, (b), t__); \
michael@0 181 UN8_rb_ADD_UN8_rb (r2__, r3__, t__); \
michael@0 182 \
michael@0 183 (x) = r1__ | (r2__ << G_SHIFT); \
michael@0 184 } while (0)
michael@0 185
michael@0 186 /*
michael@0 187 * x_c = (x_c * a_c) / 255
michael@0 188 */
michael@0 189 #define UN8x4_MUL_UN8x4(x, a) \
michael@0 190 do \
michael@0 191 { \
michael@0 192 uint32_t r1__, r2__, r3__, t__; \
michael@0 193 \
michael@0 194 r1__ = (x); \
michael@0 195 r2__ = (a); \
michael@0 196 UN8_rb_MUL_UN8_rb (r1__, r2__, t__); \
michael@0 197 \
michael@0 198 r2__ = (x) >> G_SHIFT; \
michael@0 199 r3__ = (a) >> G_SHIFT; \
michael@0 200 UN8_rb_MUL_UN8_rb (r2__, r3__, t__); \
michael@0 201 \
michael@0 202 (x) = r1__ | (r2__ << G_SHIFT); \
michael@0 203 } while (0)
michael@0 204
michael@0 205 /*
michael@0 206 * x_c = (x_c * a_c) / 255 + y_c
michael@0 207 */
michael@0 208 #define UN8x4_MUL_UN8x4_ADD_UN8x4(x, a, y) \
michael@0 209 do \
michael@0 210 { \
michael@0 211 uint32_t r1__, r2__, r3__, t__; \
michael@0 212 \
michael@0 213 r1__ = (x); \
michael@0 214 r2__ = (a); \
michael@0 215 UN8_rb_MUL_UN8_rb (r1__, r2__, t__); \
michael@0 216 r2__ = (y) & RB_MASK; \
michael@0 217 UN8_rb_ADD_UN8_rb (r1__, r2__, t__); \
michael@0 218 \
michael@0 219 r2__ = ((x) >> G_SHIFT); \
michael@0 220 r3__ = ((a) >> G_SHIFT); \
michael@0 221 UN8_rb_MUL_UN8_rb (r2__, r3__, t__); \
michael@0 222 r3__ = ((y) >> G_SHIFT) & RB_MASK; \
michael@0 223 UN8_rb_ADD_UN8_rb (r2__, r3__, t__); \
michael@0 224 \
michael@0 225 (x) = r1__ | (r2__ << G_SHIFT); \
michael@0 226 } while (0)
michael@0 227
michael@0 228 /*
michael@0 229 * x_c = (x_c * a_c + y_c * b) / 255
michael@0 230 */
michael@0 231 #define UN8x4_MUL_UN8x4_ADD_UN8x4_MUL_UN8(x, a, y, b) \
michael@0 232 do \
michael@0 233 { \
michael@0 234 uint32_t r1__, r2__, r3__, t__; \
michael@0 235 \
michael@0 236 r1__ = (x); \
michael@0 237 r2__ = (a); \
michael@0 238 UN8_rb_MUL_UN8_rb (r1__, r2__, t__); \
michael@0 239 r2__ = (y); \
michael@0 240 UN8_rb_MUL_UN8 (r2__, (b), t__); \
michael@0 241 UN8_rb_ADD_UN8_rb (r1__, r2__, t__); \
michael@0 242 \
michael@0 243 r2__ = (x) >> G_SHIFT; \
michael@0 244 r3__ = (a) >> G_SHIFT; \
michael@0 245 UN8_rb_MUL_UN8_rb (r2__, r3__, t__); \
michael@0 246 r3__ = (y) >> G_SHIFT; \
michael@0 247 UN8_rb_MUL_UN8 (r3__, (b), t__); \
michael@0 248 UN8_rb_ADD_UN8_rb (r2__, r3__, t__); \
michael@0 249 \
michael@0 250 x = r1__ | (r2__ << G_SHIFT); \
michael@0 251 } while (0)
michael@0 252
michael@0 253 /*
michael@0 254 x_c = min(x_c + y_c, 255)
michael@0 255 */
michael@0 256 #ifndef UN8x4_ADD_UN8x4
michael@0 257 #define UN8x4_ADD_UN8x4(x, y) \
michael@0 258 do \
michael@0 259 { \
michael@0 260 uint32_t r1__, r2__, r3__, t__; \
michael@0 261 \
michael@0 262 r1__ = (x) & RB_MASK; \
michael@0 263 r2__ = (y) & RB_MASK; \
michael@0 264 UN8_rb_ADD_UN8_rb (r1__, r2__, t__); \
michael@0 265 \
michael@0 266 r2__ = ((x) >> G_SHIFT) & RB_MASK; \
michael@0 267 r3__ = ((y) >> G_SHIFT) & RB_MASK; \
michael@0 268 UN8_rb_ADD_UN8_rb (r2__, r3__, t__); \
michael@0 269 \
michael@0 270 x = r1__ | (r2__ << G_SHIFT); \
michael@0 271 } while (0)
michael@0 272 #endif

mercurial