michael@0: /* -*- Mode: c; c-basic-offset: 4; tab-width: 8; indent-tabs-mode: t; -*- */ michael@0: /* michael@0: * Copyright © 2000 SuSE, Inc. michael@0: * Copyright © 2007 Red Hat, Inc. michael@0: * michael@0: * Permission to use, copy, modify, distribute, and sell this software and its michael@0: * documentation for any purpose is hereby granted without fee, provided that michael@0: * the above copyright notice appear in all copies and that both that michael@0: * copyright notice and this permission notice appear in supporting michael@0: * documentation, and that the name of SuSE not be used in advertising or michael@0: * publicity pertaining to distribution of the software without specific, michael@0: * written prior permission. SuSE makes no representations about the michael@0: * suitability of this software for any purpose. It is provided "as is" michael@0: * without express or implied warranty. michael@0: * michael@0: * SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL michael@0: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE michael@0: * BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES michael@0: * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION michael@0: * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN michael@0: * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. michael@0: * michael@0: * Author: Keith Packard, SuSE, Inc. michael@0: */ michael@0: michael@0: #ifndef PIXMAN_FAST_PATH_H__ michael@0: #define PIXMAN_FAST_PATH_H__ michael@0: michael@0: #include "pixman-private.h" michael@0: michael@0: #define PIXMAN_REPEAT_COVER -1 michael@0: michael@0: /* Flags describing input parameters to fast path macro template. michael@0: * Turning on some flag values may indicate that michael@0: * "some property X is available so template can use this" or michael@0: * "some property X should be handled by template". michael@0: * michael@0: * FLAG_HAVE_SOLID_MASK michael@0: * Input mask is solid so template should handle this. michael@0: * michael@0: * FLAG_HAVE_NON_SOLID_MASK michael@0: * Input mask is bits mask so template should handle this. michael@0: * michael@0: * FLAG_HAVE_SOLID_MASK and FLAG_HAVE_NON_SOLID_MASK are mutually michael@0: * exclusive. (It's not allowed to turn both flags on) michael@0: */ michael@0: #define FLAG_NONE (0) michael@0: #define FLAG_HAVE_SOLID_MASK (1 << 1) michael@0: #define FLAG_HAVE_NON_SOLID_MASK (1 << 2) michael@0: michael@0: /* To avoid too short repeated scanline function calls, extend source michael@0: * scanlines having width less than below constant value. michael@0: */ michael@0: #define REPEAT_NORMAL_MIN_WIDTH 64 michael@0: michael@0: static force_inline pixman_bool_t michael@0: repeat (pixman_repeat_t repeat, int *c, int size) michael@0: { michael@0: if (repeat == PIXMAN_REPEAT_NONE) michael@0: { michael@0: if (*c < 0 || *c >= size) michael@0: return FALSE; michael@0: } michael@0: else if (repeat == PIXMAN_REPEAT_NORMAL) michael@0: { michael@0: while (*c >= size) michael@0: *c -= size; michael@0: while (*c < 0) michael@0: *c += size; michael@0: } michael@0: else if (repeat == PIXMAN_REPEAT_PAD) michael@0: { michael@0: *c = CLIP (*c, 0, size - 1); michael@0: } michael@0: else /* REFLECT */ michael@0: { michael@0: *c = MOD (*c, size * 2); michael@0: if (*c >= size) michael@0: *c = size * 2 - *c - 1; michael@0: } michael@0: return TRUE; michael@0: } michael@0: michael@0: static force_inline int michael@0: pixman_fixed_to_bilinear_weight (pixman_fixed_t x) michael@0: { michael@0: return (x >> (16 - BILINEAR_INTERPOLATION_BITS)) & michael@0: ((1 << BILINEAR_INTERPOLATION_BITS) - 1); michael@0: } michael@0: michael@0: #if BILINEAR_INTERPOLATION_BITS <= 4 michael@0: /* Inspired by Filter_32_opaque from Skia */ michael@0: static force_inline uint32_t michael@0: bilinear_interpolation (uint32_t tl, uint32_t tr, michael@0: uint32_t bl, uint32_t br, michael@0: int distx, int disty) michael@0: { michael@0: int distxy, distxiy, distixy, distixiy; michael@0: uint32_t lo, hi; michael@0: michael@0: distx <<= (4 - BILINEAR_INTERPOLATION_BITS); michael@0: disty <<= (4 - BILINEAR_INTERPOLATION_BITS); michael@0: michael@0: distxy = distx * disty; michael@0: distxiy = (distx << 4) - distxy; /* distx * (16 - disty) */ michael@0: distixy = (disty << 4) - distxy; /* disty * (16 - distx) */ michael@0: distixiy = michael@0: 16 * 16 - (disty << 4) - michael@0: (distx << 4) + distxy; /* (16 - distx) * (16 - disty) */ michael@0: michael@0: lo = (tl & 0xff00ff) * distixiy; michael@0: hi = ((tl >> 8) & 0xff00ff) * distixiy; michael@0: michael@0: lo += (tr & 0xff00ff) * distxiy; michael@0: hi += ((tr >> 8) & 0xff00ff) * distxiy; michael@0: michael@0: lo += (bl & 0xff00ff) * distixy; michael@0: hi += ((bl >> 8) & 0xff00ff) * distixy; michael@0: michael@0: lo += (br & 0xff00ff) * distxy; michael@0: hi += ((br >> 8) & 0xff00ff) * distxy; michael@0: michael@0: return ((lo >> 8) & 0xff00ff) | (hi & ~0xff00ff); michael@0: } michael@0: michael@0: #else michael@0: #if SIZEOF_LONG > 4 michael@0: michael@0: static force_inline uint32_t michael@0: bilinear_interpolation (uint32_t tl, uint32_t tr, michael@0: uint32_t bl, uint32_t br, michael@0: int distx, int disty) michael@0: { michael@0: uint64_t distxy, distxiy, distixy, distixiy; michael@0: uint64_t tl64, tr64, bl64, br64; michael@0: uint64_t f, r; michael@0: michael@0: distx <<= (8 - BILINEAR_INTERPOLATION_BITS); michael@0: disty <<= (8 - BILINEAR_INTERPOLATION_BITS); michael@0: michael@0: distxy = distx * disty; michael@0: distxiy = distx * (256 - disty); michael@0: distixy = (256 - distx) * disty; michael@0: distixiy = (256 - distx) * (256 - disty); michael@0: michael@0: /* Alpha and Blue */ michael@0: tl64 = tl & 0xff0000ff; michael@0: tr64 = tr & 0xff0000ff; michael@0: bl64 = bl & 0xff0000ff; michael@0: br64 = br & 0xff0000ff; michael@0: michael@0: f = tl64 * distixiy + tr64 * distxiy + bl64 * distixy + br64 * distxy; michael@0: r = f & 0x0000ff0000ff0000ull; michael@0: michael@0: /* Red and Green */ michael@0: tl64 = tl; michael@0: tl64 = ((tl64 << 16) & 0x000000ff00000000ull) | (tl64 & 0x0000ff00ull); michael@0: michael@0: tr64 = tr; michael@0: tr64 = ((tr64 << 16) & 0x000000ff00000000ull) | (tr64 & 0x0000ff00ull); michael@0: michael@0: bl64 = bl; michael@0: bl64 = ((bl64 << 16) & 0x000000ff00000000ull) | (bl64 & 0x0000ff00ull); michael@0: michael@0: br64 = br; michael@0: br64 = ((br64 << 16) & 0x000000ff00000000ull) | (br64 & 0x0000ff00ull); michael@0: michael@0: f = tl64 * distixiy + tr64 * distxiy + bl64 * distixy + br64 * distxy; michael@0: r |= ((f >> 16) & 0x000000ff00000000ull) | (f & 0xff000000ull); michael@0: michael@0: return (uint32_t)(r >> 16); michael@0: } michael@0: michael@0: #else michael@0: michael@0: #ifdef LOW_QUALITY_INTERPOLATION michael@0: /* Based on Filter_32_opaque_portable from Skia */ michael@0: static force_inline uint32_t michael@0: bilinear_interpolation(uint32_t a00, uint32_t a01, michael@0: uint32_t a10, uint32_t a11, michael@0: int x, int y) michael@0: { michael@0: int xy = x * y; michael@0: static const uint32_t mask = 0xff00ff; michael@0: michael@0: int scale = 256 - 16*y - 16*x + xy; michael@0: uint32_t lo = (a00 & mask) * scale; michael@0: uint32_t hi = ((a00 >> 8) & mask) * scale; michael@0: michael@0: scale = 16*x - xy; michael@0: lo += (a01 & mask) * scale; michael@0: hi += ((a01 >> 8) & mask) * scale; michael@0: michael@0: scale = 16*y - xy; michael@0: lo += (a10 & mask) * scale; michael@0: hi += ((a10 >> 8) & mask) * scale; michael@0: michael@0: lo += (a11 & mask) * xy; michael@0: hi += ((a11 >> 8) & mask) * xy; michael@0: michael@0: return ((lo >> 8) & mask) | (hi & ~mask); michael@0: } michael@0: #else michael@0: static force_inline uint32_t michael@0: bilinear_interpolation (uint32_t tl, uint32_t tr, michael@0: uint32_t bl, uint32_t br, michael@0: int distx, int disty) michael@0: { michael@0: int distxy, distxiy, distixy, distixiy; michael@0: uint32_t f, r; michael@0: michael@0: distx <<= (8 - BILINEAR_INTERPOLATION_BITS); michael@0: disty <<= (8 - BILINEAR_INTERPOLATION_BITS); michael@0: michael@0: distxy = distx * disty; michael@0: distxiy = (distx << 8) - distxy; /* distx * (256 - disty) */ michael@0: distixy = (disty << 8) - distxy; /* disty * (256 - distx) */ michael@0: distixiy = michael@0: 256 * 256 - (disty << 8) - michael@0: (distx << 8) + distxy; /* (256 - distx) * (256 - disty) */ michael@0: michael@0: /* Blue */ michael@0: r = (tl & 0x000000ff) * distixiy + (tr & 0x000000ff) * distxiy michael@0: + (bl & 0x000000ff) * distixy + (br & 0x000000ff) * distxy; michael@0: michael@0: /* Green */ michael@0: f = (tl & 0x0000ff00) * distixiy + (tr & 0x0000ff00) * distxiy michael@0: + (bl & 0x0000ff00) * distixy + (br & 0x0000ff00) * distxy; michael@0: r |= f & 0xff000000; michael@0: michael@0: tl >>= 16; michael@0: tr >>= 16; michael@0: bl >>= 16; michael@0: br >>= 16; michael@0: r >>= 16; michael@0: michael@0: /* Red */ michael@0: f = (tl & 0x000000ff) * distixiy + (tr & 0x000000ff) * distxiy michael@0: + (bl & 0x000000ff) * distixy + (br & 0x000000ff) * distxy; michael@0: r |= f & 0x00ff0000; michael@0: michael@0: /* Alpha */ michael@0: f = (tl & 0x0000ff00) * distixiy + (tr & 0x0000ff00) * distxiy michael@0: + (bl & 0x0000ff00) * distixy + (br & 0x0000ff00) * distxy; michael@0: r |= f & 0xff000000; michael@0: michael@0: return r; michael@0: } michael@0: #endif michael@0: #endif michael@0: #endif // BILINEAR_INTERPOLATION_BITS <= 4 michael@0: michael@0: /* michael@0: * For each scanline fetched from source image with PAD repeat: michael@0: * - calculate how many pixels need to be padded on the left side michael@0: * - calculate how many pixels need to be padded on the right side michael@0: * - update width to only count pixels which are fetched from the image michael@0: * All this information is returned via 'width', 'left_pad', 'right_pad' michael@0: * arguments. The code is assuming that 'unit_x' is positive. michael@0: * michael@0: * Note: 64-bit math is used in order to avoid potential overflows, which michael@0: * is probably excessive in many cases. This particular function michael@0: * may need its own correctness test and performance tuning. michael@0: */ michael@0: static force_inline void michael@0: pad_repeat_get_scanline_bounds (int32_t source_image_width, michael@0: pixman_fixed_t vx, michael@0: pixman_fixed_t unit_x, michael@0: int32_t * width, michael@0: int32_t * left_pad, michael@0: int32_t * right_pad) michael@0: { michael@0: int64_t max_vx = (int64_t) source_image_width << 16; michael@0: int64_t tmp; michael@0: if (vx < 0) michael@0: { michael@0: tmp = ((int64_t) unit_x - 1 - vx) / unit_x; michael@0: if (tmp > *width) michael@0: { michael@0: *left_pad = *width; michael@0: *width = 0; michael@0: } michael@0: else michael@0: { michael@0: *left_pad = (int32_t) tmp; michael@0: *width -= (int32_t) tmp; michael@0: } michael@0: } michael@0: else michael@0: { michael@0: *left_pad = 0; michael@0: } michael@0: tmp = ((int64_t) unit_x - 1 - vx + max_vx) / unit_x - *left_pad; michael@0: if (tmp < 0) michael@0: { michael@0: *right_pad = *width; michael@0: *width = 0; michael@0: } michael@0: else if (tmp >= *width) michael@0: { michael@0: *right_pad = 0; michael@0: } michael@0: else michael@0: { michael@0: *right_pad = *width - (int32_t) tmp; michael@0: *width = (int32_t) tmp; michael@0: } michael@0: } michael@0: michael@0: /* A macroified version of specialized nearest scalers for some michael@0: * common 8888 and 565 formats. It supports SRC and OVER ops. michael@0: * michael@0: * There are two repeat versions, one that handles repeat normal, michael@0: * and one without repeat handling that only works if the src region michael@0: * used is completely covered by the pre-repeated source samples. michael@0: * michael@0: * The loops are unrolled to process two pixels per iteration for better michael@0: * performance on most CPU architectures (superscalar processors michael@0: * can issue several operations simultaneously, other processors can hide michael@0: * instructions latencies by pipelining operations). Unrolling more michael@0: * does not make much sense because the compiler will start running out michael@0: * of spare registers soon. michael@0: */ michael@0: michael@0: #define GET_8888_ALPHA(s) ((s) >> 24) michael@0: /* This is not actually used since we don't have an OVER with michael@0: 565 source, but it is needed to build. */ michael@0: #define GET_0565_ALPHA(s) 0xff michael@0: #define GET_x888_ALPHA(s) 0xff michael@0: michael@0: #define FAST_NEAREST_SCANLINE(scanline_func_name, SRC_FORMAT, DST_FORMAT, \ michael@0: src_type_t, dst_type_t, OP, repeat_mode) \ michael@0: static force_inline void \ michael@0: scanline_func_name (dst_type_t *dst, \ michael@0: const src_type_t *src, \ michael@0: int32_t w, \ michael@0: pixman_fixed_t vx, \ michael@0: pixman_fixed_t unit_x, \ michael@0: pixman_fixed_t src_width_fixed, \ michael@0: pixman_bool_t fully_transparent_src) \ michael@0: { \ michael@0: uint32_t d; \ michael@0: src_type_t s1, s2; \ michael@0: uint8_t a1, a2; \ michael@0: int x1, x2; \ michael@0: \ michael@0: if (PIXMAN_OP_ ## OP == PIXMAN_OP_OVER && fully_transparent_src) \ michael@0: return; \ michael@0: \ michael@0: if (PIXMAN_OP_ ## OP != PIXMAN_OP_SRC && PIXMAN_OP_ ## OP != PIXMAN_OP_OVER) \ michael@0: abort(); \ michael@0: \ michael@0: while ((w -= 2) >= 0) \ michael@0: { \ michael@0: x1 = pixman_fixed_to_int (vx); \ michael@0: vx += unit_x; \ michael@0: if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NORMAL) \ michael@0: { \ michael@0: /* This works because we know that unit_x is positive */ \ michael@0: while (vx >= 0) \ michael@0: vx -= src_width_fixed; \ michael@0: } \ michael@0: s1 = *(src + x1); \ michael@0: \ michael@0: x2 = pixman_fixed_to_int (vx); \ michael@0: vx += unit_x; \ michael@0: if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NORMAL) \ michael@0: { \ michael@0: /* This works because we know that unit_x is positive */ \ michael@0: while (vx >= 0) \ michael@0: vx -= src_width_fixed; \ michael@0: } \ michael@0: s2 = *(src + x2); \ michael@0: \ michael@0: if (PIXMAN_OP_ ## OP == PIXMAN_OP_OVER) \ michael@0: { \ michael@0: a1 = GET_ ## SRC_FORMAT ## _ALPHA(s1); \ michael@0: a2 = GET_ ## SRC_FORMAT ## _ALPHA(s2); \ michael@0: \ michael@0: if (a1 == 0xff) \ michael@0: { \ michael@0: *dst = convert_ ## SRC_FORMAT ## _to_ ## DST_FORMAT (s1); \ michael@0: } \ michael@0: else if (s1) \ michael@0: { \ michael@0: d = convert_ ## DST_FORMAT ## _to_8888 (*dst); \ michael@0: s1 = convert_ ## SRC_FORMAT ## _to_8888 (s1); \ michael@0: a1 ^= 0xff; \ michael@0: UN8x4_MUL_UN8_ADD_UN8x4 (d, a1, s1); \ michael@0: *dst = convert_8888_to_ ## DST_FORMAT (d); \ michael@0: } \ michael@0: dst++; \ michael@0: \ michael@0: if (a2 == 0xff) \ michael@0: { \ michael@0: *dst = convert_ ## SRC_FORMAT ## _to_ ## DST_FORMAT (s2); \ michael@0: } \ michael@0: else if (s2) \ michael@0: { \ michael@0: d = convert_## DST_FORMAT ## _to_8888 (*dst); \ michael@0: s2 = convert_## SRC_FORMAT ## _to_8888 (s2); \ michael@0: a2 ^= 0xff; \ michael@0: UN8x4_MUL_UN8_ADD_UN8x4 (d, a2, s2); \ michael@0: *dst = convert_8888_to_ ## DST_FORMAT (d); \ michael@0: } \ michael@0: dst++; \ michael@0: } \ michael@0: else /* PIXMAN_OP_SRC */ \ michael@0: { \ michael@0: *dst++ = convert_ ## SRC_FORMAT ## _to_ ## DST_FORMAT (s1); \ michael@0: *dst++ = convert_ ## SRC_FORMAT ## _to_ ## DST_FORMAT (s2); \ michael@0: } \ michael@0: } \ michael@0: \ michael@0: if (w & 1) \ michael@0: { \ michael@0: x1 = pixman_fixed_to_int (vx); \ michael@0: s1 = *(src + x1); \ michael@0: \ michael@0: if (PIXMAN_OP_ ## OP == PIXMAN_OP_OVER) \ michael@0: { \ michael@0: a1 = GET_ ## SRC_FORMAT ## _ALPHA(s1); \ michael@0: \ michael@0: if (a1 == 0xff) \ michael@0: { \ michael@0: *dst = convert_ ## SRC_FORMAT ## _to_ ## DST_FORMAT (s1); \ michael@0: } \ michael@0: else if (s1) \ michael@0: { \ michael@0: d = convert_## DST_FORMAT ## _to_8888 (*dst); \ michael@0: s1 = convert_ ## SRC_FORMAT ## _to_8888 (s1); \ michael@0: a1 ^= 0xff; \ michael@0: UN8x4_MUL_UN8_ADD_UN8x4 (d, a1, s1); \ michael@0: *dst = convert_8888_to_ ## DST_FORMAT (d); \ michael@0: } \ michael@0: dst++; \ michael@0: } \ michael@0: else /* PIXMAN_OP_SRC */ \ michael@0: { \ michael@0: *dst++ = convert_ ## SRC_FORMAT ## _to_ ## DST_FORMAT (s1); \ michael@0: } \ michael@0: } \ michael@0: } michael@0: michael@0: #define FAST_NEAREST_MAINLOOP_INT(scale_func_name, scanline_func, src_type_t, mask_type_t, \ michael@0: dst_type_t, repeat_mode, have_mask, mask_is_solid) \ michael@0: static void \ michael@0: fast_composite_scaled_nearest ## scale_func_name (pixman_implementation_t *imp, \ michael@0: pixman_composite_info_t *info) \ michael@0: { \ michael@0: PIXMAN_COMPOSITE_ARGS (info); \ michael@0: dst_type_t *dst_line; \ michael@0: mask_type_t *mask_line; \ michael@0: src_type_t *src_first_line; \ michael@0: int y; \ michael@0: pixman_fixed_t src_width_fixed = pixman_int_to_fixed (src_image->bits.width); \ michael@0: pixman_fixed_t max_vy; \ michael@0: pixman_vector_t v; \ michael@0: pixman_fixed_t vx, vy; \ michael@0: pixman_fixed_t unit_x, unit_y; \ michael@0: int32_t left_pad, right_pad; \ michael@0: \ michael@0: src_type_t *src; \ michael@0: dst_type_t *dst; \ michael@0: mask_type_t solid_mask; \ michael@0: const mask_type_t *mask = &solid_mask; \ michael@0: int src_stride, mask_stride, dst_stride; \ michael@0: \ michael@0: PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, dst_type_t, dst_stride, dst_line, 1); \ michael@0: if (have_mask) \ michael@0: { \ michael@0: if (mask_is_solid) \ michael@0: solid_mask = _pixman_image_get_solid (imp, mask_image, dest_image->bits.format); \ michael@0: else \ michael@0: PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, mask_type_t, \ michael@0: mask_stride, mask_line, 1); \ michael@0: } \ michael@0: /* pass in 0 instead of src_x and src_y because src_x and src_y need to be \ michael@0: * transformed from destination space to source space */ \ michael@0: PIXMAN_IMAGE_GET_LINE (src_image, 0, 0, src_type_t, src_stride, src_first_line, 1); \ michael@0: \ michael@0: /* reference point is the center of the pixel */ \ michael@0: v.vector[0] = pixman_int_to_fixed (src_x) + pixman_fixed_1 / 2; \ michael@0: v.vector[1] = pixman_int_to_fixed (src_y) + pixman_fixed_1 / 2; \ michael@0: v.vector[2] = pixman_fixed_1; \ michael@0: \ michael@0: if (!pixman_transform_point_3d (src_image->common.transform, &v)) \ michael@0: return; \ michael@0: \ michael@0: unit_x = src_image->common.transform->matrix[0][0]; \ michael@0: unit_y = src_image->common.transform->matrix[1][1]; \ michael@0: \ michael@0: /* Round down to closest integer, ensuring that 0.5 rounds to 0, not 1 */ \ michael@0: v.vector[0] -= pixman_fixed_e; \ michael@0: v.vector[1] -= pixman_fixed_e; \ michael@0: \ michael@0: vx = v.vector[0]; \ michael@0: vy = v.vector[1]; \ michael@0: \ michael@0: if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NORMAL) \ michael@0: { \ michael@0: max_vy = pixman_int_to_fixed (src_image->bits.height); \ michael@0: \ michael@0: /* Clamp repeating positions inside the actual samples */ \ michael@0: repeat (PIXMAN_REPEAT_NORMAL, &vx, src_width_fixed); \ michael@0: repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); \ michael@0: } \ michael@0: \ michael@0: if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_PAD || \ michael@0: PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NONE) \ michael@0: { \ michael@0: pad_repeat_get_scanline_bounds (src_image->bits.width, vx, unit_x, \ michael@0: &width, &left_pad, &right_pad); \ michael@0: vx += left_pad * unit_x; \ michael@0: } \ michael@0: \ michael@0: while (--height >= 0) \ michael@0: { \ michael@0: dst = dst_line; \ michael@0: dst_line += dst_stride; \ michael@0: if (have_mask && !mask_is_solid) \ michael@0: { \ michael@0: mask = mask_line; \ michael@0: mask_line += mask_stride; \ michael@0: } \ michael@0: \ michael@0: y = pixman_fixed_to_int (vy); \ michael@0: vy += unit_y; \ michael@0: if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NORMAL) \ michael@0: repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); \ michael@0: if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_PAD) \ michael@0: { \ michael@0: repeat (PIXMAN_REPEAT_PAD, &y, src_image->bits.height); \ michael@0: src = src_first_line + src_stride * y; \ michael@0: if (left_pad > 0) \ michael@0: { \ michael@0: scanline_func (mask, dst, \ michael@0: src + src_image->bits.width - src_image->bits.width + 1, \ michael@0: left_pad, -pixman_fixed_e, 0, src_width_fixed, FALSE); \ michael@0: } \ michael@0: if (width > 0) \ michael@0: { \ michael@0: scanline_func (mask + (mask_is_solid ? 0 : left_pad), \ michael@0: dst + left_pad, src + src_image->bits.width, width, \ michael@0: vx - src_width_fixed, unit_x, src_width_fixed, FALSE); \ michael@0: } \ michael@0: if (right_pad > 0) \ michael@0: { \ michael@0: scanline_func (mask + (mask_is_solid ? 0 : left_pad + width), \ michael@0: dst + left_pad + width, src + src_image->bits.width, \ michael@0: right_pad, -pixman_fixed_e, 0, src_width_fixed, FALSE); \ michael@0: } \ michael@0: } \ michael@0: else if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NONE) \ michael@0: { \ michael@0: static const src_type_t zero[1] = { 0 }; \ michael@0: if (y < 0 || y >= src_image->bits.height) \ michael@0: { \ michael@0: scanline_func (mask, dst, zero + 1, left_pad + width + right_pad, \ michael@0: -pixman_fixed_e, 0, src_width_fixed, TRUE); \ michael@0: continue; \ michael@0: } \ michael@0: src = src_first_line + src_stride * y; \ michael@0: if (left_pad > 0) \ michael@0: { \ michael@0: scanline_func (mask, dst, zero + 1, left_pad, \ michael@0: -pixman_fixed_e, 0, src_width_fixed, TRUE); \ michael@0: } \ michael@0: if (width > 0) \ michael@0: { \ michael@0: scanline_func (mask + (mask_is_solid ? 0 : left_pad), \ michael@0: dst + left_pad, src + src_image->bits.width, width, \ michael@0: vx - src_width_fixed, unit_x, src_width_fixed, FALSE); \ michael@0: } \ michael@0: if (right_pad > 0) \ michael@0: { \ michael@0: scanline_func (mask + (mask_is_solid ? 0 : left_pad + width), \ michael@0: dst + left_pad + width, zero + 1, right_pad, \ michael@0: -pixman_fixed_e, 0, src_width_fixed, TRUE); \ michael@0: } \ michael@0: } \ michael@0: else \ michael@0: { \ michael@0: src = src_first_line + src_stride * y; \ michael@0: scanline_func (mask, dst, src + src_image->bits.width, width, vx - src_width_fixed, \ michael@0: unit_x, src_width_fixed, FALSE); \ michael@0: } \ michael@0: } \ michael@0: } michael@0: michael@0: /* A workaround for old sun studio, see: https://bugs.freedesktop.org/show_bug.cgi?id=32764 */ michael@0: #define FAST_NEAREST_MAINLOOP_COMMON(scale_func_name, scanline_func, src_type_t, mask_type_t, \ michael@0: dst_type_t, repeat_mode, have_mask, mask_is_solid) \ michael@0: FAST_NEAREST_MAINLOOP_INT(_ ## scale_func_name, scanline_func, src_type_t, mask_type_t, \ michael@0: dst_type_t, repeat_mode, have_mask, mask_is_solid) michael@0: michael@0: #define FAST_NEAREST_MAINLOOP_NOMASK(scale_func_name, scanline_func, src_type_t, dst_type_t, \ michael@0: repeat_mode) \ michael@0: static force_inline void \ michael@0: scanline_func##scale_func_name##_wrapper ( \ michael@0: const uint8_t *mask, \ michael@0: dst_type_t *dst, \ michael@0: const src_type_t *src, \ michael@0: int32_t w, \ michael@0: pixman_fixed_t vx, \ michael@0: pixman_fixed_t unit_x, \ michael@0: pixman_fixed_t max_vx, \ michael@0: pixman_bool_t fully_transparent_src) \ michael@0: { \ michael@0: scanline_func (dst, src, w, vx, unit_x, max_vx, fully_transparent_src); \ michael@0: } \ michael@0: FAST_NEAREST_MAINLOOP_INT (scale_func_name, scanline_func##scale_func_name##_wrapper, \ michael@0: src_type_t, uint8_t, dst_type_t, repeat_mode, FALSE, FALSE) michael@0: michael@0: #define FAST_NEAREST_MAINLOOP(scale_func_name, scanline_func, src_type_t, dst_type_t, \ michael@0: repeat_mode) \ michael@0: FAST_NEAREST_MAINLOOP_NOMASK(_ ## scale_func_name, scanline_func, src_type_t, \ michael@0: dst_type_t, repeat_mode) michael@0: michael@0: #define FAST_NEAREST(scale_func_name, SRC_FORMAT, DST_FORMAT, \ michael@0: src_type_t, dst_type_t, OP, repeat_mode) \ michael@0: FAST_NEAREST_SCANLINE(scaled_nearest_scanline_ ## scale_func_name ## _ ## OP, \ michael@0: SRC_FORMAT, DST_FORMAT, src_type_t, dst_type_t, \ michael@0: OP, repeat_mode) \ michael@0: FAST_NEAREST_MAINLOOP_NOMASK(_ ## scale_func_name ## _ ## OP, \ michael@0: scaled_nearest_scanline_ ## scale_func_name ## _ ## OP, \ michael@0: src_type_t, dst_type_t, repeat_mode) michael@0: michael@0: michael@0: #define SCALED_NEAREST_FLAGS \ michael@0: (FAST_PATH_SCALE_TRANSFORM | \ michael@0: FAST_PATH_NO_ALPHA_MAP | \ michael@0: FAST_PATH_NEAREST_FILTER | \ michael@0: FAST_PATH_NO_ACCESSORS | \ michael@0: FAST_PATH_NARROW_FORMAT) michael@0: michael@0: #define SIMPLE_NEAREST_FAST_PATH_NORMAL(op,s,d,func) \ michael@0: { PIXMAN_OP_ ## op, \ michael@0: PIXMAN_ ## s, \ michael@0: (SCALED_NEAREST_FLAGS | \ michael@0: FAST_PATH_NORMAL_REPEAT | \ michael@0: FAST_PATH_X_UNIT_POSITIVE), \ michael@0: PIXMAN_null, 0, \ michael@0: PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ michael@0: fast_composite_scaled_nearest_ ## func ## _normal ## _ ## op, \ michael@0: } michael@0: michael@0: #define SIMPLE_NEAREST_FAST_PATH_PAD(op,s,d,func) \ michael@0: { PIXMAN_OP_ ## op, \ michael@0: PIXMAN_ ## s, \ michael@0: (SCALED_NEAREST_FLAGS | \ michael@0: FAST_PATH_PAD_REPEAT | \ michael@0: FAST_PATH_X_UNIT_POSITIVE), \ michael@0: PIXMAN_null, 0, \ michael@0: PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ michael@0: fast_composite_scaled_nearest_ ## func ## _pad ## _ ## op, \ michael@0: } michael@0: michael@0: #define SIMPLE_NEAREST_FAST_PATH_NONE(op,s,d,func) \ michael@0: { PIXMAN_OP_ ## op, \ michael@0: PIXMAN_ ## s, \ michael@0: (SCALED_NEAREST_FLAGS | \ michael@0: FAST_PATH_NONE_REPEAT | \ michael@0: FAST_PATH_X_UNIT_POSITIVE), \ michael@0: PIXMAN_null, 0, \ michael@0: PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ michael@0: fast_composite_scaled_nearest_ ## func ## _none ## _ ## op, \ michael@0: } michael@0: michael@0: #define SIMPLE_NEAREST_FAST_PATH_COVER(op,s,d,func) \ michael@0: { PIXMAN_OP_ ## op, \ michael@0: PIXMAN_ ## s, \ michael@0: SCALED_NEAREST_FLAGS | FAST_PATH_SAMPLES_COVER_CLIP_NEAREST, \ michael@0: PIXMAN_null, 0, \ michael@0: PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ michael@0: fast_composite_scaled_nearest_ ## func ## _cover ## _ ## op, \ michael@0: } michael@0: michael@0: #define SIMPLE_NEAREST_A8_MASK_FAST_PATH_NORMAL(op,s,d,func) \ michael@0: { PIXMAN_OP_ ## op, \ michael@0: PIXMAN_ ## s, \ michael@0: (SCALED_NEAREST_FLAGS | \ michael@0: FAST_PATH_NORMAL_REPEAT | \ michael@0: FAST_PATH_X_UNIT_POSITIVE), \ michael@0: PIXMAN_a8, MASK_FLAGS (a8, FAST_PATH_UNIFIED_ALPHA), \ michael@0: PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ michael@0: fast_composite_scaled_nearest_ ## func ## _normal ## _ ## op, \ michael@0: } michael@0: michael@0: #define SIMPLE_NEAREST_A8_MASK_FAST_PATH_PAD(op,s,d,func) \ michael@0: { PIXMAN_OP_ ## op, \ michael@0: PIXMAN_ ## s, \ michael@0: (SCALED_NEAREST_FLAGS | \ michael@0: FAST_PATH_PAD_REPEAT | \ michael@0: FAST_PATH_X_UNIT_POSITIVE), \ michael@0: PIXMAN_a8, MASK_FLAGS (a8, FAST_PATH_UNIFIED_ALPHA), \ michael@0: PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ michael@0: fast_composite_scaled_nearest_ ## func ## _pad ## _ ## op, \ michael@0: } michael@0: michael@0: #define SIMPLE_NEAREST_A8_MASK_FAST_PATH_NONE(op,s,d,func) \ michael@0: { PIXMAN_OP_ ## op, \ michael@0: PIXMAN_ ## s, \ michael@0: (SCALED_NEAREST_FLAGS | \ michael@0: FAST_PATH_NONE_REPEAT | \ michael@0: FAST_PATH_X_UNIT_POSITIVE), \ michael@0: PIXMAN_a8, MASK_FLAGS (a8, FAST_PATH_UNIFIED_ALPHA), \ michael@0: PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ michael@0: fast_composite_scaled_nearest_ ## func ## _none ## _ ## op, \ michael@0: } michael@0: michael@0: #define SIMPLE_NEAREST_A8_MASK_FAST_PATH_COVER(op,s,d,func) \ michael@0: { PIXMAN_OP_ ## op, \ michael@0: PIXMAN_ ## s, \ michael@0: SCALED_NEAREST_FLAGS | FAST_PATH_SAMPLES_COVER_CLIP_NEAREST, \ michael@0: PIXMAN_a8, MASK_FLAGS (a8, FAST_PATH_UNIFIED_ALPHA), \ michael@0: PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ michael@0: fast_composite_scaled_nearest_ ## func ## _cover ## _ ## op, \ michael@0: } michael@0: michael@0: #define SIMPLE_NEAREST_SOLID_MASK_FAST_PATH_NORMAL(op,s,d,func) \ michael@0: { PIXMAN_OP_ ## op, \ michael@0: PIXMAN_ ## s, \ michael@0: (SCALED_NEAREST_FLAGS | \ michael@0: FAST_PATH_NORMAL_REPEAT | \ michael@0: FAST_PATH_X_UNIT_POSITIVE), \ michael@0: PIXMAN_solid, MASK_FLAGS (solid, FAST_PATH_UNIFIED_ALPHA), \ michael@0: PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ michael@0: fast_composite_scaled_nearest_ ## func ## _normal ## _ ## op, \ michael@0: } michael@0: michael@0: #define SIMPLE_NEAREST_SOLID_MASK_FAST_PATH_PAD(op,s,d,func) \ michael@0: { PIXMAN_OP_ ## op, \ michael@0: PIXMAN_ ## s, \ michael@0: (SCALED_NEAREST_FLAGS | \ michael@0: FAST_PATH_PAD_REPEAT | \ michael@0: FAST_PATH_X_UNIT_POSITIVE), \ michael@0: PIXMAN_solid, MASK_FLAGS (solid, FAST_PATH_UNIFIED_ALPHA), \ michael@0: PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ michael@0: fast_composite_scaled_nearest_ ## func ## _pad ## _ ## op, \ michael@0: } michael@0: michael@0: #define SIMPLE_NEAREST_SOLID_MASK_FAST_PATH_NONE(op,s,d,func) \ michael@0: { PIXMAN_OP_ ## op, \ michael@0: PIXMAN_ ## s, \ michael@0: (SCALED_NEAREST_FLAGS | \ michael@0: FAST_PATH_NONE_REPEAT | \ michael@0: FAST_PATH_X_UNIT_POSITIVE), \ michael@0: PIXMAN_solid, MASK_FLAGS (solid, FAST_PATH_UNIFIED_ALPHA), \ michael@0: PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ michael@0: fast_composite_scaled_nearest_ ## func ## _none ## _ ## op, \ michael@0: } michael@0: michael@0: #define SIMPLE_NEAREST_SOLID_MASK_FAST_PATH_COVER(op,s,d,func) \ michael@0: { PIXMAN_OP_ ## op, \ michael@0: PIXMAN_ ## s, \ michael@0: SCALED_NEAREST_FLAGS | FAST_PATH_SAMPLES_COVER_CLIP_NEAREST, \ michael@0: PIXMAN_solid, MASK_FLAGS (solid, FAST_PATH_UNIFIED_ALPHA), \ michael@0: PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ michael@0: fast_composite_scaled_nearest_ ## func ## _cover ## _ ## op, \ michael@0: } michael@0: michael@0: /* Prefer the use of 'cover' variant, because it is faster */ michael@0: #define SIMPLE_NEAREST_FAST_PATH(op,s,d,func) \ michael@0: SIMPLE_NEAREST_FAST_PATH_COVER (op,s,d,func), \ michael@0: SIMPLE_NEAREST_FAST_PATH_NONE (op,s,d,func), \ michael@0: SIMPLE_NEAREST_FAST_PATH_PAD (op,s,d,func), \ michael@0: SIMPLE_NEAREST_FAST_PATH_NORMAL (op,s,d,func) michael@0: michael@0: #define SIMPLE_NEAREST_A8_MASK_FAST_PATH(op,s,d,func) \ michael@0: SIMPLE_NEAREST_A8_MASK_FAST_PATH_COVER (op,s,d,func), \ michael@0: SIMPLE_NEAREST_A8_MASK_FAST_PATH_NONE (op,s,d,func), \ michael@0: SIMPLE_NEAREST_A8_MASK_FAST_PATH_PAD (op,s,d,func) michael@0: michael@0: #define SIMPLE_NEAREST_SOLID_MASK_FAST_PATH(op,s,d,func) \ michael@0: SIMPLE_NEAREST_SOLID_MASK_FAST_PATH_COVER (op,s,d,func), \ michael@0: SIMPLE_NEAREST_SOLID_MASK_FAST_PATH_NONE (op,s,d,func), \ michael@0: SIMPLE_NEAREST_SOLID_MASK_FAST_PATH_PAD (op,s,d,func) michael@0: michael@0: /*****************************************************************************/ michael@0: michael@0: /* michael@0: * Identify 5 zones in each scanline for bilinear scaling. Depending on michael@0: * whether 2 pixels to be interpolated are fetched from the image itself, michael@0: * from the padding area around it or from both image and padding area. michael@0: */ michael@0: static force_inline void michael@0: bilinear_pad_repeat_get_scanline_bounds (int32_t source_image_width, michael@0: pixman_fixed_t vx, michael@0: pixman_fixed_t unit_x, michael@0: int32_t * left_pad, michael@0: int32_t * left_tz, michael@0: int32_t * width, michael@0: int32_t * right_tz, michael@0: int32_t * right_pad) michael@0: { michael@0: int width1 = *width, left_pad1, right_pad1; michael@0: int width2 = *width, left_pad2, right_pad2; michael@0: michael@0: pad_repeat_get_scanline_bounds (source_image_width, vx, unit_x, michael@0: &width1, &left_pad1, &right_pad1); michael@0: pad_repeat_get_scanline_bounds (source_image_width, vx + pixman_fixed_1, michael@0: unit_x, &width2, &left_pad2, &right_pad2); michael@0: michael@0: *left_pad = left_pad2; michael@0: *left_tz = left_pad1 - left_pad2; michael@0: *right_tz = right_pad2 - right_pad1; michael@0: *right_pad = right_pad1; michael@0: *width -= *left_pad + *left_tz + *right_tz + *right_pad; michael@0: } michael@0: michael@0: /* michael@0: * Main loop template for single pass bilinear scaling. It needs to be michael@0: * provided with 'scanline_func' which should do the compositing operation. michael@0: * The needed function has the following prototype: michael@0: * michael@0: * scanline_func (dst_type_t * dst, michael@0: * const mask_type_ * mask, michael@0: * const src_type_t * src_top, michael@0: * const src_type_t * src_bottom, michael@0: * int32_t width, michael@0: * int weight_top, michael@0: * int weight_bottom, michael@0: * pixman_fixed_t vx, michael@0: * pixman_fixed_t unit_x, michael@0: * pixman_fixed_t max_vx, michael@0: * pixman_bool_t zero_src) michael@0: * michael@0: * Where: michael@0: * dst - destination scanline buffer for storing results michael@0: * mask - mask buffer (or single value for solid mask) michael@0: * src_top, src_bottom - two source scanlines michael@0: * width - number of pixels to process michael@0: * weight_top - weight of the top row for interpolation michael@0: * weight_bottom - weight of the bottom row for interpolation michael@0: * vx - initial position for fetching the first pair of michael@0: * pixels from the source buffer michael@0: * unit_x - position increment needed to move to the next pair michael@0: * of pixels michael@0: * max_vx - image size as a fixed point value, can be used for michael@0: * implementing NORMAL repeat (when it is supported) michael@0: * zero_src - boolean hint variable, which is set to TRUE when michael@0: * all source pixels are fetched from zero padding michael@0: * zone for NONE repeat michael@0: * michael@0: * Note: normally the sum of 'weight_top' and 'weight_bottom' is equal to michael@0: * BILINEAR_INTERPOLATION_RANGE, but sometimes it may be less than that michael@0: * for NONE repeat when handling fuzzy antialiased top or bottom image michael@0: * edges. Also both top and bottom weight variables are guaranteed to michael@0: * have value, which is less than BILINEAR_INTERPOLATION_RANGE. michael@0: * For example, the weights can fit into unsigned byte or be used michael@0: * with 8-bit SIMD multiplication instructions for 8-bit interpolation michael@0: * precision. michael@0: */ michael@0: michael@0: /* Replace a single "scanline_func" with "fetch_func" & "op_func" to allow optional michael@0: * two stage processing (bilinear fetch to a temp buffer, followed by unscaled michael@0: * combine), "op_func" may be NULL, in this case we keep old behavior. michael@0: * This is ugly and gcc issues some warnings, but works. michael@0: * michael@0: * An advice: clang has much better error reporting than gcc for deeply nested macros. michael@0: */ michael@0: michael@0: #define scanline_func(dst_type_t, mask_type_t, src_type_t, fetch_func, op_func, dst, \ michael@0: scanline_buf, mask, src_top, src_bottom, width, \ michael@0: weight_top, weight_bottom, vx, unit_x, max_vx, zero_src) \ michael@0: do { \ michael@0: if (op_func != NULL) \ michael@0: { \ michael@0: fetch_func ((void *)scanline_buf, (mask), (src_top), (src_bottom), (width), \ michael@0: (weight_top), (weight_bottom), (vx), (unit_x), (max_vx), (zero_src)); \ michael@0: ((void (*)(dst_type_t *, const mask_type_t *, const src_type_t *, int)) op_func)\ michael@0: ((dst), (mask), (src_type_t *)scanline_buf, (width)); \ michael@0: } \ michael@0: else \ michael@0: { \ michael@0: fetch_func ((void*)(dst), (mask), (src_top), (src_bottom), (width), (weight_top), \ michael@0: (weight_bottom), (vx), (unit_x), (max_vx), (zero_src)); \ michael@0: } \ michael@0: } while (0) michael@0: michael@0: michael@0: #define SCANLINE_BUFFER_LENGTH 3072 michael@0: michael@0: #define FAST_BILINEAR_MAINLOOP_INT(scale_func_name, fetch_func, op_func, src_type_t, \ michael@0: mask_type_t, dst_type_t, repeat_mode, flags) \ michael@0: static void \ michael@0: fast_composite_scaled_bilinear ## scale_func_name (pixman_implementation_t *imp, \ michael@0: pixman_composite_info_t *info) \ michael@0: { \ michael@0: PIXMAN_COMPOSITE_ARGS (info); \ michael@0: dst_type_t *dst_line; \ michael@0: mask_type_t *mask_line; \ michael@0: src_type_t *src_first_line; \ michael@0: int y1, y2; \ michael@0: pixman_fixed_t max_vx = INT32_MAX; /* suppress uninitialized variable warning */ \ michael@0: pixman_vector_t v; \ michael@0: pixman_fixed_t vx, vy; \ michael@0: pixman_fixed_t unit_x, unit_y; \ michael@0: int32_t left_pad, left_tz, right_tz, right_pad; \ michael@0: \ michael@0: dst_type_t *dst; \ michael@0: mask_type_t solid_mask; \ michael@0: const mask_type_t *mask = &solid_mask; \ michael@0: int src_stride, mask_stride, dst_stride; \ michael@0: \ michael@0: int src_width; \ michael@0: pixman_fixed_t src_width_fixed; \ michael@0: int max_x; \ michael@0: pixman_bool_t need_src_extension; \ michael@0: \ michael@0: uint64_t stack_scanline_buffer[SCANLINE_BUFFER_LENGTH]; \ michael@0: uint8_t *scanline_buffer = (uint8_t *) stack_scanline_buffer; \ michael@0: \ michael@0: PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, dst_type_t, dst_stride, dst_line, 1); \ michael@0: if (flags & FLAG_HAVE_SOLID_MASK) \ michael@0: { \ michael@0: solid_mask = _pixman_image_get_solid (imp, mask_image, dest_image->bits.format); \ michael@0: mask_stride = 0; \ michael@0: } \ michael@0: else if (flags & FLAG_HAVE_NON_SOLID_MASK) \ michael@0: { \ michael@0: PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, mask_type_t, \ michael@0: mask_stride, mask_line, 1); \ michael@0: } \ michael@0: \ michael@0: /* pass in 0 instead of src_x and src_y because src_x and src_y need to be \ michael@0: * transformed from destination space to source space */ \ michael@0: PIXMAN_IMAGE_GET_LINE (src_image, 0, 0, src_type_t, src_stride, src_first_line, 1); \ michael@0: \ michael@0: /* reference point is the center of the pixel */ \ michael@0: v.vector[0] = pixman_int_to_fixed (src_x) + pixman_fixed_1 / 2; \ michael@0: v.vector[1] = pixman_int_to_fixed (src_y) + pixman_fixed_1 / 2; \ michael@0: v.vector[2] = pixman_fixed_1; \ michael@0: \ michael@0: if (!pixman_transform_point_3d (src_image->common.transform, &v)) \ michael@0: return; \ michael@0: \ michael@0: unit_x = src_image->common.transform->matrix[0][0]; \ michael@0: unit_y = src_image->common.transform->matrix[1][1]; \ michael@0: \ michael@0: v.vector[0] -= pixman_fixed_1 / 2; \ michael@0: v.vector[1] -= pixman_fixed_1 / 2; \ michael@0: \ michael@0: vy = v.vector[1]; \ michael@0: \ michael@0: if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_PAD || \ michael@0: PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NONE) \ michael@0: { \ michael@0: bilinear_pad_repeat_get_scanline_bounds (src_image->bits.width, v.vector[0], unit_x, \ michael@0: &left_pad, &left_tz, &width, &right_tz, &right_pad); \ michael@0: if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_PAD) \ michael@0: { \ michael@0: /* PAD repeat does not need special handling for 'transition zones' and */ \ michael@0: /* they can be combined with 'padding zones' safely */ \ michael@0: left_pad += left_tz; \ michael@0: right_pad += right_tz; \ michael@0: left_tz = right_tz = 0; \ michael@0: } \ michael@0: v.vector[0] += left_pad * unit_x; \ michael@0: } \ michael@0: \ michael@0: if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NORMAL) \ michael@0: { \ michael@0: vx = v.vector[0]; \ michael@0: repeat (PIXMAN_REPEAT_NORMAL, &vx, pixman_int_to_fixed(src_image->bits.width)); \ michael@0: max_x = pixman_fixed_to_int (vx + (width - 1) * (int64_t)unit_x) + 1; \ michael@0: \ michael@0: if (src_image->bits.width < REPEAT_NORMAL_MIN_WIDTH) \ michael@0: { \ michael@0: src_width = 0; \ michael@0: \ michael@0: while (src_width < REPEAT_NORMAL_MIN_WIDTH && src_width <= max_x) \ michael@0: src_width += src_image->bits.width; \ michael@0: \ michael@0: need_src_extension = TRUE; \ michael@0: } \ michael@0: else \ michael@0: { \ michael@0: src_width = src_image->bits.width; \ michael@0: need_src_extension = FALSE; \ michael@0: } \ michael@0: \ michael@0: src_width_fixed = pixman_int_to_fixed (src_width); \ michael@0: } \ michael@0: \ michael@0: if (op_func != NULL && width * sizeof(src_type_t) > sizeof(stack_scanline_buffer)) \ michael@0: { \ michael@0: scanline_buffer = pixman_malloc_ab (width, sizeof(src_type_t)); \ michael@0: \ michael@0: if (!scanline_buffer) \ michael@0: return; \ michael@0: } \ michael@0: \ michael@0: while (--height >= 0) \ michael@0: { \ michael@0: int weight1, weight2; \ michael@0: dst = dst_line; \ michael@0: dst_line += dst_stride; \ michael@0: vx = v.vector[0]; \ michael@0: if (flags & FLAG_HAVE_NON_SOLID_MASK) \ michael@0: { \ michael@0: mask = mask_line; \ michael@0: mask_line += mask_stride; \ michael@0: } \ michael@0: \ michael@0: y1 = pixman_fixed_to_int (vy); \ michael@0: weight2 = pixman_fixed_to_bilinear_weight (vy); \ michael@0: if (weight2) \ michael@0: { \ michael@0: /* both weight1 and weight2 are smaller than BILINEAR_INTERPOLATION_RANGE */ \ michael@0: y2 = y1 + 1; \ michael@0: weight1 = BILINEAR_INTERPOLATION_RANGE - weight2; \ michael@0: } \ michael@0: else \ michael@0: { \ michael@0: /* set both top and bottom row to the same scanline and tweak weights */ \ michael@0: y2 = y1; \ michael@0: weight1 = weight2 = BILINEAR_INTERPOLATION_RANGE / 2; \ michael@0: } \ michael@0: vy += unit_y; \ michael@0: if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_PAD) \ michael@0: { \ michael@0: src_type_t *src1, *src2; \ michael@0: src_type_t buf1[2]; \ michael@0: src_type_t buf2[2]; \ michael@0: repeat (PIXMAN_REPEAT_PAD, &y1, src_image->bits.height); \ michael@0: repeat (PIXMAN_REPEAT_PAD, &y2, src_image->bits.height); \ michael@0: src1 = src_first_line + src_stride * y1; \ michael@0: src2 = src_first_line + src_stride * y2; \ michael@0: \ michael@0: if (left_pad > 0) \ michael@0: { \ michael@0: buf1[0] = buf1[1] = src1[0]; \ michael@0: buf2[0] = buf2[1] = src2[0]; \ michael@0: scanline_func (dst_type_t, mask_type_t, src_type_t, fetch_func, op_func, dst, \ michael@0: scanline_buffer, mask, buf1, buf2, left_pad, weight1, weight2, \ michael@0: 0, 0, 0, FALSE); \ michael@0: dst += left_pad; \ michael@0: if (flags & FLAG_HAVE_NON_SOLID_MASK) \ michael@0: mask += left_pad; \ michael@0: } \ michael@0: if (width > 0) \ michael@0: { \ michael@0: scanline_func (dst_type_t, mask_type_t, src_type_t, fetch_func, op_func, dst, \ michael@0: scanline_buffer, mask, src1, src2, width, weight1, weight2, \ michael@0: vx, unit_x, 0, FALSE); \ michael@0: dst += width; \ michael@0: if (flags & FLAG_HAVE_NON_SOLID_MASK) \ michael@0: mask += width; \ michael@0: } \ michael@0: if (right_pad > 0) \ michael@0: { \ michael@0: buf1[0] = buf1[1] = src1[src_image->bits.width - 1]; \ michael@0: buf2[0] = buf2[1] = src2[src_image->bits.width - 1]; \ michael@0: scanline_func (dst_type_t, mask_type_t, src_type_t, fetch_func, op_func, dst, \ michael@0: scanline_buffer, mask, buf1, buf2, right_pad, weight1, weight2, \ michael@0: 0, 0, 0, FALSE); \ michael@0: } \ michael@0: } \ michael@0: else if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NONE) \ michael@0: { \ michael@0: src_type_t *src1, *src2; \ michael@0: src_type_t buf1[2]; \ michael@0: src_type_t buf2[2]; \ michael@0: /* handle top/bottom zero padding by just setting weights to 0 if needed */ \ michael@0: if (y1 < 0) \ michael@0: { \ michael@0: weight1 = 0; \ michael@0: y1 = 0; \ michael@0: } \ michael@0: if (y1 >= src_image->bits.height) \ michael@0: { \ michael@0: weight1 = 0; \ michael@0: y1 = src_image->bits.height - 1; \ michael@0: } \ michael@0: if (y2 < 0) \ michael@0: { \ michael@0: weight2 = 0; \ michael@0: y2 = 0; \ michael@0: } \ michael@0: if (y2 >= src_image->bits.height) \ michael@0: { \ michael@0: weight2 = 0; \ michael@0: y2 = src_image->bits.height - 1; \ michael@0: } \ michael@0: src1 = src_first_line + src_stride * y1; \ michael@0: src2 = src_first_line + src_stride * y2; \ michael@0: \ michael@0: if (left_pad > 0) \ michael@0: { \ michael@0: buf1[0] = buf1[1] = 0; \ michael@0: buf2[0] = buf2[1] = 0; \ michael@0: scanline_func (dst_type_t, mask_type_t, src_type_t, fetch_func, op_func, dst, \ michael@0: scanline_buffer, mask, buf1, buf2, left_pad, weight1, weight2, \ michael@0: 0, 0, 0, TRUE); \ michael@0: dst += left_pad; \ michael@0: if (flags & FLAG_HAVE_NON_SOLID_MASK) \ michael@0: mask += left_pad; \ michael@0: } \ michael@0: if (left_tz > 0) \ michael@0: { \ michael@0: buf1[0] = 0; \ michael@0: buf1[1] = src1[0]; \ michael@0: buf2[0] = 0; \ michael@0: buf2[1] = src2[0]; \ michael@0: scanline_func (dst_type_t, mask_type_t, src_type_t, fetch_func, op_func, dst, \ michael@0: scanline_buffer, mask, buf1, buf2, left_tz, weight1, weight2, \ michael@0: pixman_fixed_frac (vx), unit_x, 0, FALSE); \ michael@0: dst += left_tz; \ michael@0: if (flags & FLAG_HAVE_NON_SOLID_MASK) \ michael@0: mask += left_tz; \ michael@0: vx += left_tz * unit_x; \ michael@0: } \ michael@0: if (width > 0) \ michael@0: { \ michael@0: scanline_func (dst_type_t, mask_type_t, src_type_t, fetch_func, op_func, dst, \ michael@0: scanline_buffer, mask, src1, src2, width, weight1, weight2, \ michael@0: vx, unit_x, 0, FALSE); \ michael@0: dst += width; \ michael@0: if (flags & FLAG_HAVE_NON_SOLID_MASK) \ michael@0: mask += width; \ michael@0: vx += width * unit_x; \ michael@0: } \ michael@0: if (right_tz > 0) \ michael@0: { \ michael@0: buf1[0] = src1[src_image->bits.width - 1]; \ michael@0: buf1[1] = 0; \ michael@0: buf2[0] = src2[src_image->bits.width - 1]; \ michael@0: buf2[1] = 0; \ michael@0: scanline_func (dst_type_t, mask_type_t, src_type_t, fetch_func, op_func, dst, \ michael@0: scanline_buffer, mask, buf1, buf2, right_tz, weight1, weight2, \ michael@0: pixman_fixed_frac (vx), unit_x, 0, FALSE); \ michael@0: dst += right_tz; \ michael@0: if (flags & FLAG_HAVE_NON_SOLID_MASK) \ michael@0: mask += right_tz; \ michael@0: } \ michael@0: if (right_pad > 0) \ michael@0: { \ michael@0: buf1[0] = buf1[1] = 0; \ michael@0: buf2[0] = buf2[1] = 0; \ michael@0: scanline_func (dst_type_t, mask_type_t, src_type_t, fetch_func, op_func, dst, \ michael@0: scanline_buffer, mask, buf1, buf2, right_pad, weight1, weight2, \ michael@0: 0, 0, 0, TRUE); \ michael@0: } \ michael@0: } \ michael@0: else if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NORMAL) \ michael@0: { \ michael@0: int32_t num_pixels; \ michael@0: int32_t width_remain; \ michael@0: src_type_t * src_line_top; \ michael@0: src_type_t * src_line_bottom; \ michael@0: src_type_t buf1[2]; \ michael@0: src_type_t buf2[2]; \ michael@0: src_type_t extended_src_line0[REPEAT_NORMAL_MIN_WIDTH*2]; \ michael@0: src_type_t extended_src_line1[REPEAT_NORMAL_MIN_WIDTH*2]; \ michael@0: int i, j; \ michael@0: \ michael@0: repeat (PIXMAN_REPEAT_NORMAL, &y1, src_image->bits.height); \ michael@0: repeat (PIXMAN_REPEAT_NORMAL, &y2, src_image->bits.height); \ michael@0: src_line_top = src_first_line + src_stride * y1; \ michael@0: src_line_bottom = src_first_line + src_stride * y2; \ michael@0: \ michael@0: if (need_src_extension) \ michael@0: { \ michael@0: for (i=0; ibits.width; j++, i++) \ michael@0: { \ michael@0: extended_src_line0[i] = src_line_top[j]; \ michael@0: extended_src_line1[i] = src_line_bottom[j]; \ michael@0: } \ michael@0: } \ michael@0: \ michael@0: src_line_top = &extended_src_line0[0]; \ michael@0: src_line_bottom = &extended_src_line1[0]; \ michael@0: } \ michael@0: \ michael@0: /* Top & Bottom wrap around buffer */ \ michael@0: buf1[0] = src_line_top[src_width - 1]; \ michael@0: buf1[1] = src_line_top[0]; \ michael@0: buf2[0] = src_line_bottom[src_width - 1]; \ michael@0: buf2[1] = src_line_bottom[0]; \ michael@0: \ michael@0: width_remain = width; \ michael@0: \ michael@0: while (width_remain > 0) \ michael@0: { \ michael@0: /* We use src_width_fixed because it can make vx in original source range */ \ michael@0: repeat (PIXMAN_REPEAT_NORMAL, &vx, src_width_fixed); \ michael@0: \ michael@0: /* Wrap around part */ \ michael@0: if (pixman_fixed_to_int (vx) == src_width - 1) \ michael@0: { \ michael@0: /* for positive unit_x \ michael@0: * num_pixels = max(n) + 1, where vx + n*unit_x < src_width_fixed \ michael@0: * \ michael@0: * vx is in range [0, src_width_fixed - pixman_fixed_e] \ michael@0: * So we are safe from overflow. \ michael@0: */ \ michael@0: num_pixels = ((src_width_fixed - vx - pixman_fixed_e) / unit_x) + 1; \ michael@0: \ michael@0: if (num_pixels > width_remain) \ michael@0: num_pixels = width_remain; \ michael@0: \ michael@0: scanline_func (dst_type_t, mask_type_t, src_type_t, fetch_func, op_func, \ michael@0: dst, scanline_buffer, mask, buf1, buf2, num_pixels, \ michael@0: weight1, weight2, pixman_fixed_frac(vx), \ michael@0: unit_x, src_width_fixed, FALSE); \ michael@0: \ michael@0: width_remain -= num_pixels; \ michael@0: vx += num_pixels * unit_x; \ michael@0: dst += num_pixels; \ michael@0: \ michael@0: if (flags & FLAG_HAVE_NON_SOLID_MASK) \ michael@0: mask += num_pixels; \ michael@0: \ michael@0: repeat (PIXMAN_REPEAT_NORMAL, &vx, src_width_fixed); \ michael@0: } \ michael@0: \ michael@0: /* Normal scanline composite */ \ michael@0: if (pixman_fixed_to_int (vx) != src_width - 1 && width_remain > 0) \ michael@0: { \ michael@0: /* for positive unit_x \ michael@0: * num_pixels = max(n) + 1, where vx + n*unit_x < (src_width_fixed - 1) \ michael@0: * \ michael@0: * vx is in range [0, src_width_fixed - pixman_fixed_e] \ michael@0: * So we are safe from overflow here. \ michael@0: */ \ michael@0: num_pixels = ((src_width_fixed - pixman_fixed_1 - vx - pixman_fixed_e) \ michael@0: / unit_x) + 1; \ michael@0: \ michael@0: if (num_pixels > width_remain) \ michael@0: num_pixels = width_remain; \ michael@0: \ michael@0: scanline_func (dst_type_t, mask_type_t, src_type_t, fetch_func, op_func, \ michael@0: dst, scanline_buffer, mask, src_line_top, src_line_bottom, \ michael@0: num_pixels, weight1, weight2, vx, unit_x, src_width_fixed, \ michael@0: FALSE); \ michael@0: \ michael@0: width_remain -= num_pixels; \ michael@0: vx += num_pixels * unit_x; \ michael@0: dst += num_pixels; \ michael@0: \ michael@0: if (flags & FLAG_HAVE_NON_SOLID_MASK) \ michael@0: mask += num_pixels; \ michael@0: } \ michael@0: } \ michael@0: } \ michael@0: else \ michael@0: { \ michael@0: scanline_func (dst_type_t, mask_type_t, src_type_t, fetch_func, op_func, dst, \ michael@0: scanline_buffer, mask, \ michael@0: src_first_line + src_stride * y1, \ michael@0: src_first_line + src_stride * y2, width, \ michael@0: weight1, weight2, vx, unit_x, max_vx, FALSE); \ michael@0: } \ michael@0: } \ michael@0: if (scanline_buffer != (uint8_t *) stack_scanline_buffer) \ michael@0: free (scanline_buffer); \ michael@0: } michael@0: michael@0: /* A workaround for old sun studio, see: https://bugs.freedesktop.org/show_bug.cgi?id=32764 */ michael@0: #define FAST_BILINEAR_MAINLOOP_COMMON(scale_func_name, fetch_func, op_func, src_type_t, mask_type_t,\ michael@0: dst_type_t, repeat_mode, flags) \ michael@0: FAST_BILINEAR_MAINLOOP_INT(_ ## scale_func_name, fetch_func, op_func, src_type_t, mask_type_t,\ michael@0: dst_type_t, repeat_mode, flags) michael@0: michael@0: #define SCALED_BILINEAR_FLAGS \ michael@0: (FAST_PATH_SCALE_TRANSFORM | \ michael@0: FAST_PATH_NO_ALPHA_MAP | \ michael@0: FAST_PATH_BILINEAR_FILTER | \ michael@0: FAST_PATH_NO_ACCESSORS | \ michael@0: FAST_PATH_NARROW_FORMAT) michael@0: michael@0: #define SIMPLE_BILINEAR_FAST_PATH_PAD(op,s,d,func) \ michael@0: { PIXMAN_OP_ ## op, \ michael@0: PIXMAN_ ## s, \ michael@0: (SCALED_BILINEAR_FLAGS | \ michael@0: FAST_PATH_PAD_REPEAT | \ michael@0: FAST_PATH_X_UNIT_POSITIVE), \ michael@0: PIXMAN_null, 0, \ michael@0: PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ michael@0: fast_composite_scaled_bilinear_ ## func ## _pad ## _ ## op, \ michael@0: } michael@0: michael@0: #define SIMPLE_BILINEAR_FAST_PATH_NONE(op,s,d,func) \ michael@0: { PIXMAN_OP_ ## op, \ michael@0: PIXMAN_ ## s, \ michael@0: (SCALED_BILINEAR_FLAGS | \ michael@0: FAST_PATH_NONE_REPEAT | \ michael@0: FAST_PATH_X_UNIT_POSITIVE), \ michael@0: PIXMAN_null, 0, \ michael@0: PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ michael@0: fast_composite_scaled_bilinear_ ## func ## _none ## _ ## op, \ michael@0: } michael@0: michael@0: #define SIMPLE_BILINEAR_FAST_PATH_COVER(op,s,d,func) \ michael@0: { PIXMAN_OP_ ## op, \ michael@0: PIXMAN_ ## s, \ michael@0: SCALED_BILINEAR_FLAGS | FAST_PATH_SAMPLES_COVER_CLIP_BILINEAR, \ michael@0: PIXMAN_null, 0, \ michael@0: PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ michael@0: fast_composite_scaled_bilinear_ ## func ## _cover ## _ ## op, \ michael@0: } michael@0: michael@0: #define SIMPLE_BILINEAR_FAST_PATH_NORMAL(op,s,d,func) \ michael@0: { PIXMAN_OP_ ## op, \ michael@0: PIXMAN_ ## s, \ michael@0: (SCALED_BILINEAR_FLAGS | \ michael@0: FAST_PATH_NORMAL_REPEAT | \ michael@0: FAST_PATH_X_UNIT_POSITIVE), \ michael@0: PIXMAN_null, 0, \ michael@0: PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ michael@0: fast_composite_scaled_bilinear_ ## func ## _normal ## _ ## op, \ michael@0: } michael@0: michael@0: #define SIMPLE_BILINEAR_A8_MASK_FAST_PATH_PAD(op,s,d,func) \ michael@0: { PIXMAN_OP_ ## op, \ michael@0: PIXMAN_ ## s, \ michael@0: (SCALED_BILINEAR_FLAGS | \ michael@0: FAST_PATH_PAD_REPEAT | \ michael@0: FAST_PATH_X_UNIT_POSITIVE), \ michael@0: PIXMAN_a8, MASK_FLAGS (a8, FAST_PATH_UNIFIED_ALPHA), \ michael@0: PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ michael@0: fast_composite_scaled_bilinear_ ## func ## _pad ## _ ## op, \ michael@0: } michael@0: michael@0: #define SIMPLE_BILINEAR_A8_MASK_FAST_PATH_NONE(op,s,d,func) \ michael@0: { PIXMAN_OP_ ## op, \ michael@0: PIXMAN_ ## s, \ michael@0: (SCALED_BILINEAR_FLAGS | \ michael@0: FAST_PATH_NONE_REPEAT | \ michael@0: FAST_PATH_X_UNIT_POSITIVE), \ michael@0: PIXMAN_a8, MASK_FLAGS (a8, FAST_PATH_UNIFIED_ALPHA), \ michael@0: PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ michael@0: fast_composite_scaled_bilinear_ ## func ## _none ## _ ## op, \ michael@0: } michael@0: michael@0: #define SIMPLE_BILINEAR_A8_MASK_FAST_PATH_COVER(op,s,d,func) \ michael@0: { PIXMAN_OP_ ## op, \ michael@0: PIXMAN_ ## s, \ michael@0: SCALED_BILINEAR_FLAGS | FAST_PATH_SAMPLES_COVER_CLIP_BILINEAR, \ michael@0: PIXMAN_a8, MASK_FLAGS (a8, FAST_PATH_UNIFIED_ALPHA), \ michael@0: PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ michael@0: fast_composite_scaled_bilinear_ ## func ## _cover ## _ ## op, \ michael@0: } michael@0: michael@0: #define SIMPLE_BILINEAR_A8_MASK_FAST_PATH_NORMAL(op,s,d,func) \ michael@0: { PIXMAN_OP_ ## op, \ michael@0: PIXMAN_ ## s, \ michael@0: (SCALED_BILINEAR_FLAGS | \ michael@0: FAST_PATH_NORMAL_REPEAT | \ michael@0: FAST_PATH_X_UNIT_POSITIVE), \ michael@0: PIXMAN_a8, MASK_FLAGS (a8, FAST_PATH_UNIFIED_ALPHA), \ michael@0: PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ michael@0: fast_composite_scaled_bilinear_ ## func ## _normal ## _ ## op, \ michael@0: } michael@0: michael@0: #define SIMPLE_BILINEAR_SOLID_MASK_FAST_PATH_PAD(op,s,d,func) \ michael@0: { PIXMAN_OP_ ## op, \ michael@0: PIXMAN_ ## s, \ michael@0: (SCALED_BILINEAR_FLAGS | \ michael@0: FAST_PATH_PAD_REPEAT | \ michael@0: FAST_PATH_X_UNIT_POSITIVE), \ michael@0: PIXMAN_solid, MASK_FLAGS (solid, FAST_PATH_UNIFIED_ALPHA), \ michael@0: PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ michael@0: fast_composite_scaled_bilinear_ ## func ## _pad ## _ ## op, \ michael@0: } michael@0: michael@0: #define SIMPLE_BILINEAR_SOLID_MASK_FAST_PATH_NONE(op,s,d,func) \ michael@0: { PIXMAN_OP_ ## op, \ michael@0: PIXMAN_ ## s, \ michael@0: (SCALED_BILINEAR_FLAGS | \ michael@0: FAST_PATH_NONE_REPEAT | \ michael@0: FAST_PATH_X_UNIT_POSITIVE), \ michael@0: PIXMAN_solid, MASK_FLAGS (solid, FAST_PATH_UNIFIED_ALPHA), \ michael@0: PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ michael@0: fast_composite_scaled_bilinear_ ## func ## _none ## _ ## op, \ michael@0: } michael@0: michael@0: #define SIMPLE_BILINEAR_SOLID_MASK_FAST_PATH_COVER(op,s,d,func) \ michael@0: { PIXMAN_OP_ ## op, \ michael@0: PIXMAN_ ## s, \ michael@0: SCALED_BILINEAR_FLAGS | FAST_PATH_SAMPLES_COVER_CLIP_BILINEAR, \ michael@0: PIXMAN_solid, MASK_FLAGS (solid, FAST_PATH_UNIFIED_ALPHA), \ michael@0: PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ michael@0: fast_composite_scaled_bilinear_ ## func ## _cover ## _ ## op, \ michael@0: } michael@0: michael@0: #define SIMPLE_BILINEAR_SOLID_MASK_FAST_PATH_NORMAL(op,s,d,func) \ michael@0: { PIXMAN_OP_ ## op, \ michael@0: PIXMAN_ ## s, \ michael@0: (SCALED_BILINEAR_FLAGS | \ michael@0: FAST_PATH_NORMAL_REPEAT | \ michael@0: FAST_PATH_X_UNIT_POSITIVE), \ michael@0: PIXMAN_solid, MASK_FLAGS (solid, FAST_PATH_UNIFIED_ALPHA), \ michael@0: PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ michael@0: fast_composite_scaled_bilinear_ ## func ## _normal ## _ ## op, \ michael@0: } michael@0: michael@0: /* Prefer the use of 'cover' variant, because it is faster */ michael@0: #define SIMPLE_BILINEAR_FAST_PATH(op,s,d,func) \ michael@0: SIMPLE_BILINEAR_FAST_PATH_COVER (op,s,d,func), \ michael@0: SIMPLE_BILINEAR_FAST_PATH_NONE (op,s,d,func), \ michael@0: SIMPLE_BILINEAR_FAST_PATH_PAD (op,s,d,func), \ michael@0: SIMPLE_BILINEAR_FAST_PATH_NORMAL (op,s,d,func) michael@0: michael@0: #define SIMPLE_BILINEAR_A8_MASK_FAST_PATH(op,s,d,func) \ michael@0: SIMPLE_BILINEAR_A8_MASK_FAST_PATH_COVER (op,s,d,func), \ michael@0: SIMPLE_BILINEAR_A8_MASK_FAST_PATH_NONE (op,s,d,func), \ michael@0: SIMPLE_BILINEAR_A8_MASK_FAST_PATH_PAD (op,s,d,func), \ michael@0: SIMPLE_BILINEAR_A8_MASK_FAST_PATH_NORMAL (op,s,d,func) michael@0: michael@0: #define SIMPLE_BILINEAR_SOLID_MASK_FAST_PATH(op,s,d,func) \ michael@0: SIMPLE_BILINEAR_SOLID_MASK_FAST_PATH_COVER (op,s,d,func), \ michael@0: SIMPLE_BILINEAR_SOLID_MASK_FAST_PATH_NONE (op,s,d,func), \ michael@0: SIMPLE_BILINEAR_SOLID_MASK_FAST_PATH_PAD (op,s,d,func), \ michael@0: SIMPLE_BILINEAR_SOLID_MASK_FAST_PATH_NORMAL (op,s,d,func) michael@0: michael@0: #endif