Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
michael@0 | 1 | diff --git a/src/cairo-gl-surface.c b/src/cairo-gl-surface.c |
michael@0 | 2 | index 2acc8b5..019249e 100644 |
michael@0 | 3 | --- a/src/cairo-gl-surface.c |
michael@0 | 4 | +++ b/src/cairo-gl-surface.c |
michael@0 | 5 | @@ -2012,13 +2012,14 @@ typedef struct _cairo_gl_surface_span_renderer { |
michael@0 | 6 | |
michael@0 | 7 | cairo_gl_composite_setup_t setup; |
michael@0 | 8 | |
michael@0 | 9 | + int xmin, xmax; |
michael@0 | 10 | + |
michael@0 | 11 | cairo_operator_t op; |
michael@0 | 12 | cairo_antialias_t antialias; |
michael@0 | 13 | |
michael@0 | 14 | cairo_gl_surface_t *dst; |
michael@0 | 15 | cairo_region_t *clip; |
michael@0 | 16 | |
michael@0 | 17 | - cairo_composite_rectangles_t composite_rectangles; |
michael@0 | 18 | GLuint vbo; |
michael@0 | 19 | void *vbo_base; |
michael@0 | 20 | unsigned int vbo_size; |
michael@0 | 21 | @@ -2049,11 +2050,11 @@ _cairo_gl_span_renderer_flush (cairo_gl_surface_span_renderer_t *renderer) |
michael@0 | 22 | cairo_region_get_rectangle (renderer->clip, i, &rect); |
michael@0 | 23 | |
michael@0 | 24 | glScissor (rect.x, rect.y, rect.width, rect.height); |
michael@0 | 25 | - glDrawArrays (GL_LINES, 0, count); |
michael@0 | 26 | + glDrawArrays (GL_QUADS, 0, count); |
michael@0 | 27 | } |
michael@0 | 28 | glDisable (GL_SCISSOR_TEST); |
michael@0 | 29 | } else { |
michael@0 | 30 | - glDrawArrays (GL_LINES, 0, count); |
michael@0 | 31 | + glDrawArrays (GL_QUADS, 0, count); |
michael@0 | 32 | } |
michael@0 | 33 | } |
michael@0 | 34 | |
michael@0 | 35 | @@ -2134,72 +2135,87 @@ _cairo_gl_emit_span_vertex (cairo_gl_surface_span_renderer_t *renderer, |
michael@0 | 36 | |
michael@0 | 37 | static void |
michael@0 | 38 | _cairo_gl_emit_span (cairo_gl_surface_span_renderer_t *renderer, |
michael@0 | 39 | - int x1, int x2, int y, uint8_t alpha) |
michael@0 | 40 | + int x, int y1, int y2, |
michael@0 | 41 | + uint8_t alpha) |
michael@0 | 42 | { |
michael@0 | 43 | float *vertices = _cairo_gl_span_renderer_get_vbo (renderer, 2); |
michael@0 | 44 | |
michael@0 | 45 | - _cairo_gl_emit_span_vertex (renderer, x1, y, alpha, vertices); |
michael@0 | 46 | - _cairo_gl_emit_span_vertex (renderer, x2, y, alpha, |
michael@0 | 47 | + _cairo_gl_emit_span_vertex (renderer, x, y1, alpha, vertices); |
michael@0 | 48 | + _cairo_gl_emit_span_vertex (renderer, x, y2, alpha, |
michael@0 | 49 | vertices + renderer->vertex_size / 4); |
michael@0 | 50 | } |
michael@0 | 51 | |
michael@0 | 52 | -/* Emits the contents of the span renderer rows as GL_LINES with the span's |
michael@0 | 53 | - * alpha. |
michael@0 | 54 | - * |
michael@0 | 55 | - * Unlike the image surface, which is compositing into a temporary, we emit |
michael@0 | 56 | - * coverage even for alpha == 0, in case we're using an unbounded operator. |
michael@0 | 57 | - * But it means we avoid having to do the fixup. |
michael@0 | 58 | - */ |
michael@0 | 59 | +static void |
michael@0 | 60 | +_cairo_gl_emit_rectangle (cairo_gl_surface_span_renderer_t *renderer, |
michael@0 | 61 | + int x1, int y1, |
michael@0 | 62 | + int x2, int y2, |
michael@0 | 63 | + int coverage) |
michael@0 | 64 | +{ |
michael@0 | 65 | + _cairo_gl_emit_span (renderer, x1, y1, y2, coverage); |
michael@0 | 66 | + _cairo_gl_emit_span (renderer, x2, y2, y1, coverage); |
michael@0 | 67 | +} |
michael@0 | 68 | + |
michael@0 | 69 | static cairo_status_t |
michael@0 | 70 | -_cairo_gl_surface_span_renderer_render_row ( |
michael@0 | 71 | - void *abstract_renderer, |
michael@0 | 72 | - int y, |
michael@0 | 73 | - const cairo_half_open_span_t *spans, |
michael@0 | 74 | - unsigned num_spans) |
michael@0 | 75 | +_cairo_gl_render_bounded_spans (void *abstract_renderer, |
michael@0 | 76 | + int y, int height, |
michael@0 | 77 | + const cairo_half_open_span_t *spans, |
michael@0 | 78 | + unsigned num_spans) |
michael@0 | 79 | { |
michael@0 | 80 | cairo_gl_surface_span_renderer_t *renderer = abstract_renderer; |
michael@0 | 81 | - int xmin = renderer->composite_rectangles.mask.x; |
michael@0 | 82 | - int xmax = xmin + renderer->composite_rectangles.width; |
michael@0 | 83 | - int prev_x = xmin; |
michael@0 | 84 | - int prev_alpha = 0; |
michael@0 | 85 | - unsigned i; |
michael@0 | 86 | - int x_translate; |
michael@0 | 87 | - |
michael@0 | 88 | - /* Make sure we're within y-range. */ |
michael@0 | 89 | - if (y < renderer->composite_rectangles.mask.y || |
michael@0 | 90 | - y >= renderer->composite_rectangles.mask.y + |
michael@0 | 91 | - renderer->composite_rectangles.height) |
michael@0 | 92 | + |
michael@0 | 93 | + if (num_spans == 0) |
michael@0 | 94 | return CAIRO_STATUS_SUCCESS; |
michael@0 | 95 | |
michael@0 | 96 | - x_translate = renderer->composite_rectangles.dst.x - |
michael@0 | 97 | - renderer->composite_rectangles.mask.x; |
michael@0 | 98 | - y += renderer->composite_rectangles.dst.y - |
michael@0 | 99 | - renderer->composite_rectangles.mask.y; |
michael@0 | 100 | + do { |
michael@0 | 101 | + if (spans[0].coverage) { |
michael@0 | 102 | + _cairo_gl_emit_rectangle (renderer, |
michael@0 | 103 | + spans[0].x, y, |
michael@0 | 104 | + spans[1].x, y + height, |
michael@0 | 105 | + spans[0].coverage); |
michael@0 | 106 | + } |
michael@0 | 107 | |
michael@0 | 108 | - /* Find the first span within x-range. */ |
michael@0 | 109 | - for (i=0; i < num_spans && spans[i].x < xmin; i++) {} |
michael@0 | 110 | - if (i>0) |
michael@0 | 111 | - prev_alpha = spans[i-1].coverage; |
michael@0 | 112 | + spans++; |
michael@0 | 113 | + } while (--num_spans > 1); |
michael@0 | 114 | |
michael@0 | 115 | - /* Set the intermediate spans. */ |
michael@0 | 116 | - for (; i < num_spans; i++) { |
michael@0 | 117 | - int x = spans[i].x; |
michael@0 | 118 | + return CAIRO_STATUS_SUCCESS; |
michael@0 | 119 | +} |
michael@0 | 120 | |
michael@0 | 121 | - if (x >= xmax) |
michael@0 | 122 | - break; |
michael@0 | 123 | +static cairo_status_t |
michael@0 | 124 | +_cairo_gl_render_unbounded_spans (void *abstract_renderer, |
michael@0 | 125 | + int y, int height, |
michael@0 | 126 | + const cairo_half_open_span_t *spans, |
michael@0 | 127 | + unsigned num_spans) |
michael@0 | 128 | +{ |
michael@0 | 129 | + cairo_gl_surface_span_renderer_t *renderer = abstract_renderer; |
michael@0 | 130 | |
michael@0 | 131 | - _cairo_gl_emit_span (renderer, |
michael@0 | 132 | - prev_x + x_translate, x + x_translate, y, |
michael@0 | 133 | - prev_alpha); |
michael@0 | 134 | + if (num_spans == 0) { |
michael@0 | 135 | + _cairo_gl_emit_rectangle (renderer, |
michael@0 | 136 | + renderer->xmin, y, |
michael@0 | 137 | + renderer->xmax, y + height, |
michael@0 | 138 | + 0); |
michael@0 | 139 | + return CAIRO_STATUS_SUCCESS; |
michael@0 | 140 | + } |
michael@0 | 141 | |
michael@0 | 142 | - prev_x = x; |
michael@0 | 143 | - prev_alpha = spans[i].coverage; |
michael@0 | 144 | + if (spans[0].x != renderer->xmin) { |
michael@0 | 145 | + _cairo_gl_emit_rectangle (renderer, |
michael@0 | 146 | + renderer->xmin, y, |
michael@0 | 147 | + spans[0].x, y + height, |
michael@0 | 148 | + 0); |
michael@0 | 149 | } |
michael@0 | 150 | |
michael@0 | 151 | - if (prev_x < xmax) { |
michael@0 | 152 | - _cairo_gl_emit_span (renderer, |
michael@0 | 153 | - prev_x + x_translate, xmax + x_translate, y, |
michael@0 | 154 | - prev_alpha); |
michael@0 | 155 | + do { |
michael@0 | 156 | + _cairo_gl_emit_rectangle (renderer, |
michael@0 | 157 | + spans[0].x, y, |
michael@0 | 158 | + spans[1].x, y + height, |
michael@0 | 159 | + spans[0].coverage); |
michael@0 | 160 | + spans++; |
michael@0 | 161 | + } while (--num_spans > 1); |
michael@0 | 162 | + |
michael@0 | 163 | + if (spans[0].x != renderer->xmax) { |
michael@0 | 164 | + _cairo_gl_emit_rectangle (renderer, |
michael@0 | 165 | + spans[0].x, y, |
michael@0 | 166 | + renderer->xmax, y + height, |
michael@0 | 167 | + 0); |
michael@0 | 168 | } |
michael@0 | 169 | |
michael@0 | 170 | return CAIRO_STATUS_SUCCESS; |
michael@0 | 171 | @@ -2274,8 +2290,6 @@ _cairo_gl_surface_create_span_renderer (cairo_operator_t op, |
michael@0 | 172 | cairo_gl_surface_t *dst = abstract_dst; |
michael@0 | 173 | cairo_gl_surface_span_renderer_t *renderer; |
michael@0 | 174 | cairo_status_t status; |
michael@0 | 175 | - int width = rects->width; |
michael@0 | 176 | - int height = rects->height; |
michael@0 | 177 | cairo_surface_attributes_t *src_attributes; |
michael@0 | 178 | GLenum err; |
michael@0 | 179 | |
michael@0 | 180 | diff --git a/src/cairo-image-surface.c b/src/cairo-image-surface.c |
michael@0 | 181 | index 48d8013..d52979d 100644 |
michael@0 | 182 | --- a/src/cairo-image-surface.c |
michael@0 | 183 | +++ b/src/cairo-image-surface.c |
michael@0 | 184 | @@ -1390,11 +1390,13 @@ typedef struct _cairo_image_surface_span_renderer { |
michael@0 | 185 | const cairo_pattern_t *pattern; |
michael@0 | 186 | cairo_antialias_t antialias; |
michael@0 | 187 | |
michael@0 | 188 | + uint8_t *mask_data; |
michael@0 | 189 | + uint32_t mask_stride; |
michael@0 | 190 | + |
michael@0 | 191 | cairo_image_surface_t *src; |
michael@0 | 192 | cairo_surface_attributes_t src_attributes; |
michael@0 | 193 | cairo_image_surface_t *mask; |
michael@0 | 194 | cairo_image_surface_t *dst; |
michael@0 | 195 | - |
michael@0 | 196 | cairo_composite_rectangles_t composite_rectangles; |
michael@0 | 197 | } cairo_image_surface_span_renderer_t; |
michael@0 | 198 | |
michael@0 | 199 | @@ -1403,66 +1405,46 @@ _cairo_image_surface_span_render_row ( |
michael@0 | 200 | int y, |
michael@0 | 201 | const cairo_half_open_span_t *spans, |
michael@0 | 202 | unsigned num_spans, |
michael@0 | 203 | - cairo_image_surface_t *mask, |
michael@0 | 204 | - const cairo_composite_rectangles_t *rects) |
michael@0 | 205 | + uint8_t *data, |
michael@0 | 206 | + uint32_t stride) |
michael@0 | 207 | { |
michael@0 | 208 | - int xmin = rects->mask.x; |
michael@0 | 209 | - int xmax = xmin + rects->width; |
michael@0 | 210 | uint8_t *row; |
michael@0 | 211 | - int prev_x = xmin; |
michael@0 | 212 | - int prev_alpha = 0; |
michael@0 | 213 | unsigned i; |
michael@0 | 214 | |
michael@0 | 215 | - /* Make sure we're within y-range. */ |
michael@0 | 216 | - y -= rects->mask.y; |
michael@0 | 217 | - if (y < 0 || y >= rects->height) |
michael@0 | 218 | + if (num_spans == 0) |
michael@0 | 219 | return; |
michael@0 | 220 | |
michael@0 | 221 | - row = (uint8_t*)(mask->data) + y*(size_t)mask->stride - xmin; |
michael@0 | 222 | - |
michael@0 | 223 | - /* Find the first span within x-range. */ |
michael@0 | 224 | - for (i=0; i < num_spans && spans[i].x < xmin; i++) {} |
michael@0 | 225 | - if (i>0) |
michael@0 | 226 | - prev_alpha = spans[i-1].coverage; |
michael@0 | 227 | - |
michael@0 | 228 | - /* Set the intermediate spans. */ |
michael@0 | 229 | - for (; i < num_spans; i++) { |
michael@0 | 230 | - int x = spans[i].x; |
michael@0 | 231 | - |
michael@0 | 232 | - if (x >= xmax) |
michael@0 | 233 | - break; |
michael@0 | 234 | - |
michael@0 | 235 | - if (prev_alpha != 0) { |
michael@0 | 236 | - /* We implement setting rendering the most common single |
michael@0 | 237 | - * pixel wide span case to avoid the overhead of a memset |
michael@0 | 238 | - * call. Open coding setting longer spans didn't show a |
michael@0 | 239 | - * noticeable improvement over memset. */ |
michael@0 | 240 | - if (x == prev_x + 1) { |
michael@0 | 241 | - row[prev_x] = prev_alpha; |
michael@0 | 242 | - } |
michael@0 | 243 | - else { |
michael@0 | 244 | - memset(row + prev_x, prev_alpha, x - prev_x); |
michael@0 | 245 | - } |
michael@0 | 246 | + row = data + y * stride; |
michael@0 | 247 | + for (i = 0; i < num_spans - 1; i++) { |
michael@0 | 248 | + if (! spans[i].coverage) |
michael@0 | 249 | + continue; |
michael@0 | 250 | + |
michael@0 | 251 | + /* We implement setting the most common single pixel wide |
michael@0 | 252 | + * span case to avoid the overhead of a memset call. |
michael@0 | 253 | + * Open coding setting longer spans didn't show a |
michael@0 | 254 | + * noticeable improvement over memset. |
michael@0 | 255 | + */ |
michael@0 | 256 | + if (spans[i+1].x == spans[i].x + 1) { |
michael@0 | 257 | + row[spans[i].x] = spans[i].coverage; |
michael@0 | 258 | + } else { |
michael@0 | 259 | + memset (row + spans[i].x, |
michael@0 | 260 | + spans[i].coverage, |
michael@0 | 261 | + spans[i+1].x - spans[i].x); |
michael@0 | 262 | } |
michael@0 | 263 | - |
michael@0 | 264 | - prev_x = x; |
michael@0 | 265 | - prev_alpha = spans[i].coverage; |
michael@0 | 266 | - } |
michael@0 | 267 | - |
michael@0 | 268 | - if (prev_alpha != 0 && prev_x < xmax) { |
michael@0 | 269 | - memset(row + prev_x, prev_alpha, xmax - prev_x); |
michael@0 | 270 | } |
michael@0 | 271 | } |
michael@0 | 272 | |
michael@0 | 273 | static cairo_status_t |
michael@0 | 274 | -_cairo_image_surface_span_renderer_render_row ( |
michael@0 | 275 | +_cairo_image_surface_span_renderer_render_rows ( |
michael@0 | 276 | void *abstract_renderer, |
michael@0 | 277 | int y, |
michael@0 | 278 | + int height, |
michael@0 | 279 | const cairo_half_open_span_t *spans, |
michael@0 | 280 | unsigned num_spans) |
michael@0 | 281 | { |
michael@0 | 282 | cairo_image_surface_span_renderer_t *renderer = abstract_renderer; |
michael@0 | 283 | - _cairo_image_surface_span_render_row (y, spans, num_spans, renderer->mask, &renderer->composite_rectangles); |
michael@0 | 284 | + while (height--) |
michael@0 | 285 | + _cairo_image_surface_span_render_row (y++, spans, num_spans, renderer->mask_data, renderer->mask_stride); |
michael@0 | 286 | return CAIRO_STATUS_SUCCESS; |
michael@0 | 287 | } |
michael@0 | 288 | |
michael@0 | 289 | @@ -1517,11 +1499,11 @@ _cairo_image_surface_span_renderer_finish (void *abstract_renderer) |
michael@0 | 290 | &dst->base, |
michael@0 | 291 | src_attributes, |
michael@0 | 292 | src->width, src->height, |
michael@0 | 293 | - rects->width, rects->height, |
michael@0 | 294 | + width, height, |
michael@0 | 295 | rects->src.x, rects->src.y, |
michael@0 | 296 | 0, 0, /* mask.x, mask.y */ |
michael@0 | 297 | rects->dst.x, rects->dst.y, |
michael@0 | 298 | - rects->width, rects->height, |
michael@0 | 299 | + width, height, |
michael@0 | 300 | dst->clip_region); |
michael@0 | 301 | } |
michael@0 | 302 | } |
michael@0 | 303 | @@ -1567,7 +1549,7 @@ _cairo_image_surface_create_span_renderer (cairo_operator_t op, |
michael@0 | 304 | |
michael@0 | 305 | renderer->base.destroy = _cairo_image_surface_span_renderer_destroy; |
michael@0 | 306 | renderer->base.finish = _cairo_image_surface_span_renderer_finish; |
michael@0 | 307 | - renderer->base.render_row = _cairo_image_surface_span_renderer_render_row; |
michael@0 | 308 | + renderer->base.render_rows = _cairo_image_surface_span_renderer_render_rows; |
michael@0 | 309 | renderer->op = op; |
michael@0 | 310 | renderer->pattern = pattern; |
michael@0 | 311 | renderer->antialias = antialias; |
michael@0 | 312 | @@ -1604,6 +1586,9 @@ _cairo_image_surface_create_span_renderer (cairo_operator_t op, |
michael@0 | 313 | _cairo_image_surface_span_renderer_destroy (renderer); |
michael@0 | 314 | return _cairo_span_renderer_create_in_error (status); |
michael@0 | 315 | } |
michael@0 | 316 | + |
michael@0 | 317 | + renderer->mask_data = renderer->mask->data - rects->mask.x - rects->mask.y * renderer->mask->stride; |
michael@0 | 318 | + renderer->mask_stride = renderer->mask->stride; |
michael@0 | 319 | return &renderer->base; |
michael@0 | 320 | } |
michael@0 | 321 | |
michael@0 | 322 | diff --git a/src/cairo-spans-private.h b/src/cairo-spans-private.h |
michael@0 | 323 | index e29a567..af3b38c 100644 |
michael@0 | 324 | --- a/src/cairo-spans-private.h |
michael@0 | 325 | +++ b/src/cairo-spans-private.h |
michael@0 | 326 | @@ -47,26 +47,24 @@ typedef struct _cairo_half_open_span { |
michael@0 | 327 | * surfaces if they want to composite spans instead of trapezoids. */ |
michael@0 | 328 | typedef struct _cairo_span_renderer cairo_span_renderer_t; |
michael@0 | 329 | struct _cairo_span_renderer { |
michael@0 | 330 | + /* Private status variable. */ |
michael@0 | 331 | + cairo_status_t status; |
michael@0 | 332 | + |
michael@0 | 333 | /* Called to destroy the renderer. */ |
michael@0 | 334 | cairo_destroy_func_t destroy; |
michael@0 | 335 | |
michael@0 | 336 | - /* Render the spans on row y of the source by whatever compositing |
michael@0 | 337 | - * method is required. The function should ignore spans outside |
michael@0 | 338 | - * the bounding box set by the init() function. */ |
michael@0 | 339 | - cairo_status_t (*render_row)( |
michael@0 | 340 | - void *abstract_renderer, |
michael@0 | 341 | - int y, |
michael@0 | 342 | - const cairo_half_open_span_t *coverages, |
michael@0 | 343 | - unsigned num_coverages); |
michael@0 | 344 | + /* Render the spans on row y of the destination by whatever compositing |
michael@0 | 345 | + * method is required. */ |
michael@0 | 346 | + cairo_warn cairo_status_t |
michael@0 | 347 | + (*render_rows) (void *abstract_renderer, |
michael@0 | 348 | + int y, int height, |
michael@0 | 349 | + const cairo_half_open_span_t *coverages, |
michael@0 | 350 | + unsigned num_coverages); |
michael@0 | 351 | |
michael@0 | 352 | /* Called after all rows have been rendered to perform whatever |
michael@0 | 353 | * final rendering step is required. This function is called just |
michael@0 | 354 | * once before the renderer is destroyed. */ |
michael@0 | 355 | - cairo_status_t (*finish)( |
michael@0 | 356 | - void *abstract_renderer); |
michael@0 | 357 | - |
michael@0 | 358 | - /* Private status variable. */ |
michael@0 | 359 | - cairo_status_t status; |
michael@0 | 360 | + cairo_status_t (*finish) (void *abstract_renderer); |
michael@0 | 361 | }; |
michael@0 | 362 | |
michael@0 | 363 | /* Scan converter interface. */ |
michael@0 | 364 | diff --git a/src/cairo-spans.c b/src/cairo-spans.c |
michael@0 | 365 | index af3b85f..69894c1 100644 |
michael@0 | 366 | --- a/src/cairo-spans.c |
michael@0 | 367 | +++ b/src/cairo-spans.c |
michael@0 | 368 | @@ -275,13 +275,15 @@ _cairo_scan_converter_create_in_error (cairo_status_t status) |
michael@0 | 369 | } |
michael@0 | 370 | |
michael@0 | 371 | static cairo_status_t |
michael@0 | 372 | -_cairo_nil_span_renderer_render_row ( |
michael@0 | 373 | +_cairo_nil_span_renderer_render_rows ( |
michael@0 | 374 | void *abstract_renderer, |
michael@0 | 375 | int y, |
michael@0 | 376 | + int height, |
michael@0 | 377 | const cairo_half_open_span_t *coverages, |
michael@0 | 378 | unsigned num_coverages) |
michael@0 | 379 | { |
michael@0 | 380 | (void) y; |
michael@0 | 381 | + (void) height; |
michael@0 | 382 | (void) coverages; |
michael@0 | 383 | (void) num_coverages; |
michael@0 | 384 | return _cairo_span_renderer_status (abstract_renderer); |
michael@0 | 385 | @@ -310,7 +312,7 @@ _cairo_span_renderer_set_error ( |
michael@0 | 386 | ASSERT_NOT_REACHED; |
michael@0 | 387 | } |
michael@0 | 388 | if (renderer->status == CAIRO_STATUS_SUCCESS) { |
michael@0 | 389 | - renderer->render_row = _cairo_nil_span_renderer_render_row; |
michael@0 | 390 | + renderer->render_rows = _cairo_nil_span_renderer_render_rows; |
michael@0 | 391 | renderer->finish = _cairo_nil_span_renderer_finish; |
michael@0 | 392 | renderer->status = error; |
michael@0 | 393 | } |
michael@0 | 394 | diff --git a/src/cairo-tor-scan-converter.c b/src/cairo-tor-scan-converter.c |
michael@0 | 395 | index 29262c2..2b9fb1b 100644 |
michael@0 | 396 | --- a/src/cairo-tor-scan-converter.c |
michael@0 | 397 | +++ b/src/cairo-tor-scan-converter.c |
michael@0 | 398 | @@ -128,27 +128,29 @@ blit_with_span_renderer( |
michael@0 | 399 | cairo_span_renderer_t *span_renderer, |
michael@0 | 400 | struct pool *span_pool, |
michael@0 | 401 | int y, |
michael@0 | 402 | + int height, |
michael@0 | 403 | int xmin, |
michael@0 | 404 | int xmax); |
michael@0 | 405 | |
michael@0 | 406 | static glitter_status_t |
michael@0 | 407 | -blit_empty_with_span_renderer (cairo_span_renderer_t *renderer, int y); |
michael@0 | 408 | +blit_empty_with_span_renderer (cairo_span_renderer_t *renderer, int y, int height); |
michael@0 | 409 | |
michael@0 | 410 | #define GLITTER_BLIT_COVERAGES_ARGS \ |
michael@0 | 411 | cairo_span_renderer_t *span_renderer, \ |
michael@0 | 412 | struct pool *span_pool |
michael@0 | 413 | |
michael@0 | 414 | -#define GLITTER_BLIT_COVERAGES(cells, y, xmin, xmax) do { \ |
michael@0 | 415 | +#define GLITTER_BLIT_COVERAGES(cells, y, height,xmin, xmax) do { \ |
michael@0 | 416 | cairo_status_t status = blit_with_span_renderer (cells, \ |
michael@0 | 417 | span_renderer, \ |
michael@0 | 418 | span_pool, \ |
michael@0 | 419 | - y, xmin, xmax); \ |
michael@0 | 420 | + y, height, \ |
michael@0 | 421 | + xmin, xmax); \ |
michael@0 | 422 | if (unlikely (status)) \ |
michael@0 | 423 | return status; \ |
michael@0 | 424 | } while (0) |
michael@0 | 425 | |
michael@0 | 426 | -#define GLITTER_BLIT_COVERAGES_EMPTY(y, xmin, xmax) do { \ |
michael@0 | 427 | - cairo_status_t status = blit_empty_with_span_renderer (span_renderer, y); \ |
michael@0 | 428 | +#define GLITTER_BLIT_COVERAGES_EMPTY(y, height, xmin, xmax) do { \ |
michael@0 | 429 | + cairo_status_t status = blit_empty_with_span_renderer (span_renderer, y, height); \ |
michael@0 | 430 | if (unlikely (status)) \ |
michael@0 | 431 | return status; \ |
michael@0 | 432 | } while (0) |
michael@0 | 433 | @@ -309,8 +311,8 @@ typedef int grid_area_t; |
michael@0 | 434 | #define UNROLL3(x) x x x |
michael@0 | 435 | |
michael@0 | 436 | struct quorem { |
michael@0 | 437 | - int quo; |
michael@0 | 438 | - int rem; |
michael@0 | 439 | + int32_t quo; |
michael@0 | 440 | + int32_t rem; |
michael@0 | 441 | }; |
michael@0 | 442 | |
michael@0 | 443 | /* Header for a chunk of memory in a memory pool. */ |
michael@0 | 444 | @@ -382,6 +384,7 @@ struct edge { |
michael@0 | 445 | /* Original sign of the edge: +1 for downwards, -1 for upwards |
michael@0 | 446 | * edges. */ |
michael@0 | 447 | int dir; |
michael@0 | 448 | + int vertical; |
michael@0 | 449 | }; |
michael@0 | 450 | |
michael@0 | 451 | /* Number of subsample rows per y-bucket. Must be GRID_Y. */ |
michael@0 | 452 | @@ -389,18 +392,28 @@ struct edge { |
michael@0 | 453 | |
michael@0 | 454 | #define EDGE_Y_BUCKET_INDEX(y, ymin) (((y) - (ymin))/EDGE_Y_BUCKET_HEIGHT) |
michael@0 | 455 | |
michael@0 | 456 | +struct bucket { |
michael@0 | 457 | + /* Unsorted list of edges starting within this bucket. */ |
michael@0 | 458 | + struct edge *edges; |
michael@0 | 459 | + |
michael@0 | 460 | + /* Set to non-zero if there are edges starting strictly within the |
michael@0 | 461 | + * bucket. */ |
michael@0 | 462 | + unsigned have_inside_edges; |
michael@0 | 463 | +}; |
michael@0 | 464 | + |
michael@0 | 465 | /* A collection of sorted and vertically clipped edges of the polygon. |
michael@0 | 466 | * Edges are moved from the polygon to an active list while scan |
michael@0 | 467 | * converting. */ |
michael@0 | 468 | struct polygon { |
michael@0 | 469 | - /* The vertical clip extents. */ |
michael@0 | 470 | + /* The clip extents. */ |
michael@0 | 471 | + grid_scaled_x_t xmin, xmax; |
michael@0 | 472 | grid_scaled_y_t ymin, ymax; |
michael@0 | 473 | |
michael@0 | 474 | /* Array of edges all starting in the same bucket. An edge is put |
michael@0 | 475 | * into bucket EDGE_BUCKET_INDEX(edge->ytop, polygon->ymin) when |
michael@0 | 476 | * it is added to the polygon. */ |
michael@0 | 477 | - struct edge **y_buckets; |
michael@0 | 478 | - struct edge *y_buckets_embedded[64]; |
michael@0 | 479 | + struct bucket *y_buckets; |
michael@0 | 480 | + struct bucket y_buckets_embedded[64]; |
michael@0 | 481 | |
michael@0 | 482 | struct { |
michael@0 | 483 | struct pool base[1]; |
michael@0 | 484 | @@ -702,7 +715,6 @@ static void |
michael@0 | 485 | cell_list_fini(struct cell_list *cells) |
michael@0 | 486 | { |
michael@0 | 487 | pool_fini (cells->cell_pool.base); |
michael@0 | 488 | - cell_list_init (cells); |
michael@0 | 489 | } |
michael@0 | 490 | |
michael@0 | 491 | /* Empty the cell list. This is called at the start of every pixel |
michael@0 | 492 | @@ -715,6 +727,26 @@ cell_list_reset (struct cell_list *cells) |
michael@0 | 493 | pool_reset (cells->cell_pool.base); |
michael@0 | 494 | } |
michael@0 | 495 | |
michael@0 | 496 | +static struct cell * |
michael@0 | 497 | +cell_list_alloc (struct cell_list *cells, |
michael@0 | 498 | + struct cell **cursor, |
michael@0 | 499 | + struct cell *tail, |
michael@0 | 500 | + int x) |
michael@0 | 501 | +{ |
michael@0 | 502 | + struct cell *cell; |
michael@0 | 503 | + |
michael@0 | 504 | + cell = pool_alloc (cells->cell_pool.base, sizeof (struct cell)); |
michael@0 | 505 | + if (unlikely (NULL == cell)) |
michael@0 | 506 | + return NULL; |
michael@0 | 507 | + |
michael@0 | 508 | + *cursor = cell; |
michael@0 | 509 | + cell->next = tail; |
michael@0 | 510 | + cell->x = x; |
michael@0 | 511 | + cell->uncovered_area = 0; |
michael@0 | 512 | + cell->covered_height = 0; |
michael@0 | 513 | + return cell; |
michael@0 | 514 | +} |
michael@0 | 515 | + |
michael@0 | 516 | /* Find a cell at the given x-coordinate. Returns %NULL if a new cell |
michael@0 | 517 | * needed to be allocated but couldn't be. Cells must be found with |
michael@0 | 518 | * non-decreasing x-coordinate until the cell list is rewound using |
michael@0 | 519 | @@ -737,22 +769,10 @@ cell_list_find (struct cell_list *cells, int x) |
michael@0 | 520 | } |
michael@0 | 521 | cells->cursor = cursor; |
michael@0 | 522 | |
michael@0 | 523 | - if (tail->x == x) { |
michael@0 | 524 | + if (tail->x == x) |
michael@0 | 525 | return tail; |
michael@0 | 526 | - } else { |
michael@0 | 527 | - struct cell *cell; |
michael@0 | 528 | - |
michael@0 | 529 | - cell = pool_alloc (cells->cell_pool.base, sizeof (struct cell)); |
michael@0 | 530 | - if (unlikely (NULL == cell)) |
michael@0 | 531 | - return NULL; |
michael@0 | 532 | |
michael@0 | 533 | - *cursor = cell; |
michael@0 | 534 | - cell->next = tail; |
michael@0 | 535 | - cell->x = x; |
michael@0 | 536 | - cell->uncovered_area = 0; |
michael@0 | 537 | - cell->covered_height = 0; |
michael@0 | 538 | - return cell; |
michael@0 | 539 | - } |
michael@0 | 540 | + return cell_list_alloc (cells, cursor, tail, x); |
michael@0 | 541 | } |
michael@0 | 542 | |
michael@0 | 543 | /* Find two cells at x1 and x2. This is exactly equivalent |
michael@0 | 544 | @@ -832,9 +852,8 @@ cell_list_find_pair(struct cell_list *cells, int x1, int x2) |
michael@0 | 545 | /* Add an unbounded subpixel span covering subpixels >= x to the |
michael@0 | 546 | * coverage cells. */ |
michael@0 | 547 | static glitter_status_t |
michael@0 | 548 | -cell_list_add_unbounded_subspan( |
michael@0 | 549 | - struct cell_list *cells, |
michael@0 | 550 | - grid_scaled_x_t x) |
michael@0 | 551 | +cell_list_add_unbounded_subspan (struct cell_list *cells, |
michael@0 | 552 | + grid_scaled_x_t x) |
michael@0 | 553 | { |
michael@0 | 554 | struct cell *cell; |
michael@0 | 555 | int ix, fx; |
michael@0 | 556 | @@ -907,20 +926,24 @@ cell_list_render_edge( |
michael@0 | 557 | struct edge *edge, |
michael@0 | 558 | int sign) |
michael@0 | 559 | { |
michael@0 | 560 | - struct quorem x1 = edge->x; |
michael@0 | 561 | - struct quorem x2 = x1; |
michael@0 | 562 | grid_scaled_y_t y1, y2, dy; |
michael@0 | 563 | grid_scaled_x_t dx; |
michael@0 | 564 | int ix1, ix2; |
michael@0 | 565 | grid_scaled_x_t fx1, fx2; |
michael@0 | 566 | |
michael@0 | 567 | - x2.quo += edge->dxdy_full.quo; |
michael@0 | 568 | - x2.rem += edge->dxdy_full.rem; |
michael@0 | 569 | - if (x2.rem >= 0) { |
michael@0 | 570 | - ++x2.quo; |
michael@0 | 571 | - x2.rem -= edge->dy; |
michael@0 | 572 | + struct quorem x1 = edge->x; |
michael@0 | 573 | + struct quorem x2 = x1; |
michael@0 | 574 | + |
michael@0 | 575 | + if (! edge->vertical) { |
michael@0 | 576 | + x2.quo += edge->dxdy_full.quo; |
michael@0 | 577 | + x2.rem += edge->dxdy_full.rem; |
michael@0 | 578 | + if (x2.rem >= 0) { |
michael@0 | 579 | + ++x2.quo; |
michael@0 | 580 | + x2.rem -= edge->dy; |
michael@0 | 581 | + } |
michael@0 | 582 | + |
michael@0 | 583 | + edge->x = x2; |
michael@0 | 584 | } |
michael@0 | 585 | - edge->x = x2; |
michael@0 | 586 | |
michael@0 | 587 | GRID_X_TO_INT_FRAC(x1.quo, ix1, fx1); |
michael@0 | 588 | GRID_X_TO_INT_FRAC(x2.quo, ix2, fx2); |
michael@0 | 589 | @@ -1026,6 +1049,7 @@ static void |
michael@0 | 590 | polygon_init (struct polygon *polygon) |
michael@0 | 591 | { |
michael@0 | 592 | polygon->ymin = polygon->ymax = 0; |
michael@0 | 593 | + polygon->xmin = polygon->xmax = 0; |
michael@0 | 594 | polygon->y_buckets = polygon->y_buckets_embedded; |
michael@0 | 595 | pool_init (polygon->edge_pool.base, |
michael@0 | 596 | 8192 - sizeof (struct _pool_chunk), |
michael@0 | 597 | @@ -1045,10 +1069,11 @@ polygon_fini (struct polygon *polygon) |
michael@0 | 598 | * receive new edges and clip them to the vertical range |
michael@0 | 599 | * [ymin,ymax). */ |
michael@0 | 600 | static glitter_status_t |
michael@0 | 601 | -polygon_reset( |
michael@0 | 602 | - struct polygon *polygon, |
michael@0 | 603 | - grid_scaled_y_t ymin, |
michael@0 | 604 | - grid_scaled_y_t ymax) |
michael@0 | 605 | +polygon_reset (struct polygon *polygon, |
michael@0 | 606 | + grid_scaled_x_t xmin, |
michael@0 | 607 | + grid_scaled_x_t xmax, |
michael@0 | 608 | + grid_scaled_y_t ymin, |
michael@0 | 609 | + grid_scaled_y_t ymax) |
michael@0 | 610 | { |
michael@0 | 611 | unsigned h = ymax - ymin; |
michael@0 | 612 | unsigned num_buckets = EDGE_Y_BUCKET_INDEX(ymax + EDGE_Y_BUCKET_HEIGHT-1, |
michael@0 | 613 | @@ -1065,14 +1090,16 @@ polygon_reset( |
michael@0 | 614 | polygon->y_buckets = polygon->y_buckets_embedded; |
michael@0 | 615 | if (num_buckets > ARRAY_LENGTH (polygon->y_buckets_embedded)) { |
michael@0 | 616 | polygon->y_buckets = _cairo_malloc_ab (num_buckets, |
michael@0 | 617 | - sizeof (struct edge *)); |
michael@0 | 618 | + sizeof (struct bucket)); |
michael@0 | 619 | if (unlikely (NULL == polygon->y_buckets)) |
michael@0 | 620 | goto bail_no_mem; |
michael@0 | 621 | } |
michael@0 | 622 | - memset (polygon->y_buckets, 0, num_buckets * sizeof (struct edge *)); |
michael@0 | 623 | + memset (polygon->y_buckets, 0, num_buckets * sizeof (struct bucket)); |
michael@0 | 624 | |
michael@0 | 625 | polygon->ymin = ymin; |
michael@0 | 626 | polygon->ymax = ymax; |
michael@0 | 627 | + polygon->xmin = xmin; |
michael@0 | 628 | + polygon->xmax = xmax; |
michael@0 | 629 | return GLITTER_STATUS_SUCCESS; |
michael@0 | 630 | |
michael@0 | 631 | bail_no_mem: |
michael@0 | 632 | @@ -1086,10 +1113,13 @@ _polygon_insert_edge_into_its_y_bucket( |
michael@0 | 633 | struct polygon *polygon, |
michael@0 | 634 | struct edge *e) |
michael@0 | 635 | { |
michael@0 | 636 | - unsigned ix = EDGE_Y_BUCKET_INDEX(e->ytop, polygon->ymin); |
michael@0 | 637 | - struct edge **ptail = &polygon->y_buckets[ix]; |
michael@0 | 638 | + unsigned j = e->ytop - polygon->ymin; |
michael@0 | 639 | + unsigned ix = j / EDGE_Y_BUCKET_HEIGHT; |
michael@0 | 640 | + unsigned offset = j % EDGE_Y_BUCKET_HEIGHT; |
michael@0 | 641 | + struct edge **ptail = &polygon->y_buckets[ix].edges; |
michael@0 | 642 | e->next = *ptail; |
michael@0 | 643 | *ptail = e; |
michael@0 | 644 | + polygon->y_buckets[ix].have_inside_edges |= offset; |
michael@0 | 645 | } |
michael@0 | 646 | |
michael@0 | 647 | inline static glitter_status_t |
michael@0 | 648 | @@ -1115,30 +1145,53 @@ polygon_add_edge (struct polygon *polygon, |
michael@0 | 649 | dx = edge->line.p2.x - edge->line.p1.x; |
michael@0 | 650 | dy = edge->line.p2.y - edge->line.p1.y; |
michael@0 | 651 | e->dy = dy; |
michael@0 | 652 | - e->dxdy = floored_divrem (dx, dy); |
michael@0 | 653 | - |
michael@0 | 654 | - if (ymin <= edge->top) |
michael@0 | 655 | - ytop = edge->top; |
michael@0 | 656 | - else |
michael@0 | 657 | - ytop = ymin; |
michael@0 | 658 | - if (ytop == edge->line.p1.y) { |
michael@0 | 659 | - e->x.quo = edge->line.p1.x; |
michael@0 | 660 | - e->x.rem = 0; |
michael@0 | 661 | - } else { |
michael@0 | 662 | - e->x = floored_muldivrem (ytop - edge->line.p1.y, dx, dy); |
michael@0 | 663 | - e->x.quo += edge->line.p1.x; |
michael@0 | 664 | - } |
michael@0 | 665 | - |
michael@0 | 666 | e->dir = edge->dir; |
michael@0 | 667 | + |
michael@0 | 668 | + ytop = edge->top >= ymin ? edge->top : ymin; |
michael@0 | 669 | + ybot = edge->bottom <= ymax ? edge->bottom : ymax; |
michael@0 | 670 | e->ytop = ytop; |
michael@0 | 671 | - ybot = edge->bottom < ymax ? edge->bottom : ymax; |
michael@0 | 672 | e->height_left = ybot - ytop; |
michael@0 | 673 | |
michael@0 | 674 | - if (e->height_left >= GRID_Y) { |
michael@0 | 675 | - e->dxdy_full = floored_muldivrem (GRID_Y, dx, dy); |
michael@0 | 676 | - } else { |
michael@0 | 677 | + if (dx == 0) { |
michael@0 | 678 | + e->vertical = TRUE; |
michael@0 | 679 | + e->x.quo = edge->line.p1.x; |
michael@0 | 680 | + e->x.rem = 0; |
michael@0 | 681 | + e->dxdy.quo = 0; |
michael@0 | 682 | + e->dxdy.rem = 0; |
michael@0 | 683 | e->dxdy_full.quo = 0; |
michael@0 | 684 | e->dxdy_full.rem = 0; |
michael@0 | 685 | + |
michael@0 | 686 | + /* Drop edges to the right of the clip extents. */ |
michael@0 | 687 | + if (e->x.quo >= polygon->xmax) |
michael@0 | 688 | + return GLITTER_STATUS_SUCCESS; |
michael@0 | 689 | + |
michael@0 | 690 | + /* Offset vertical edges at the left side of the clip extents |
michael@0 | 691 | + * to just shy of the left side. We depend on this when |
michael@0 | 692 | + * checking for possible intersections within the clip |
michael@0 | 693 | + * rectangle. */ |
michael@0 | 694 | + if (e->x.quo <= polygon->xmin) { |
michael@0 | 695 | + e->x.quo = polygon->xmin - 1; |
michael@0 | 696 | + } |
michael@0 | 697 | + } else { |
michael@0 | 698 | + e->vertical = FALSE; |
michael@0 | 699 | + e->dxdy = floored_divrem (dx, dy); |
michael@0 | 700 | + if (ytop == edge->line.p1.y) { |
michael@0 | 701 | + e->x.quo = edge->line.p1.x; |
michael@0 | 702 | + e->x.rem = 0; |
michael@0 | 703 | + } else { |
michael@0 | 704 | + e->x = floored_muldivrem (ytop - edge->line.p1.y, dx, dy); |
michael@0 | 705 | + e->x.quo += edge->line.p1.x; |
michael@0 | 706 | + } |
michael@0 | 707 | + |
michael@0 | 708 | + if (e->x.quo >= polygon->xmax && e->dxdy.quo >= 0) |
michael@0 | 709 | + return GLITTER_STATUS_SUCCESS; |
michael@0 | 710 | + |
michael@0 | 711 | + if (e->height_left >= GRID_Y) { |
michael@0 | 712 | + e->dxdy_full = floored_muldivrem (GRID_Y, dx, dy); |
michael@0 | 713 | + } else { |
michael@0 | 714 | + e->dxdy_full.quo = 0; |
michael@0 | 715 | + e->dxdy_full.rem = 0; |
michael@0 | 716 | + } |
michael@0 | 717 | } |
michael@0 | 718 | |
michael@0 | 719 | _polygon_insert_edge_into_its_y_bucket (polygon, e); |
michael@0 | 720 | @@ -1161,31 +1214,30 @@ active_list_init(struct active_list *active) |
michael@0 | 721 | active_list_reset(active); |
michael@0 | 722 | } |
michael@0 | 723 | |
michael@0 | 724 | -static void |
michael@0 | 725 | -active_list_fini( |
michael@0 | 726 | - struct active_list *active) |
michael@0 | 727 | -{ |
michael@0 | 728 | - active_list_reset(active); |
michael@0 | 729 | -} |
michael@0 | 730 | - |
michael@0 | 731 | /* Merge the edges in an unsorted list of edges into a sorted |
michael@0 | 732 | * list. The sort order is edges ascending by edge->x.quo. Returns |
michael@0 | 733 | * the new head of the sorted list. */ |
michael@0 | 734 | static struct edge * |
michael@0 | 735 | merge_unsorted_edges(struct edge *sorted_head, struct edge *unsorted_head) |
michael@0 | 736 | { |
michael@0 | 737 | - struct edge *head = unsorted_head; |
michael@0 | 738 | struct edge **cursor = &sorted_head; |
michael@0 | 739 | int x; |
michael@0 | 740 | |
michael@0 | 741 | - while (NULL != head) { |
michael@0 | 742 | + if (sorted_head == NULL) { |
michael@0 | 743 | + sorted_head = unsorted_head; |
michael@0 | 744 | + unsorted_head = unsorted_head->next; |
michael@0 | 745 | + sorted_head->next = NULL; |
michael@0 | 746 | + if (unsorted_head == NULL) |
michael@0 | 747 | + return sorted_head; |
michael@0 | 748 | + } |
michael@0 | 749 | + |
michael@0 | 750 | + do { |
michael@0 | 751 | + struct edge *next = unsorted_head->next; |
michael@0 | 752 | struct edge *prev = *cursor; |
michael@0 | 753 | - struct edge *next = head->next; |
michael@0 | 754 | - x = head->x.quo; |
michael@0 | 755 | |
michael@0 | 756 | - if (NULL == prev || x < prev->x.quo) { |
michael@0 | 757 | + x = unsorted_head->x.quo; |
michael@0 | 758 | + if (x < prev->x.quo) |
michael@0 | 759 | cursor = &sorted_head; |
michael@0 | 760 | - } |
michael@0 | 761 | |
michael@0 | 762 | while (1) { |
michael@0 | 763 | UNROLL3({ |
michael@0 | 764 | @@ -1196,26 +1248,29 @@ merge_unsorted_edges(struct edge *sorted_head, struct edge *unsorted_head) |
michael@0 | 765 | }); |
michael@0 | 766 | } |
michael@0 | 767 | |
michael@0 | 768 | - head->next = *cursor; |
michael@0 | 769 | - *cursor = head; |
michael@0 | 770 | + unsorted_head->next = *cursor; |
michael@0 | 771 | + *cursor = unsorted_head; |
michael@0 | 772 | + unsorted_head = next; |
michael@0 | 773 | + } while (unsorted_head != NULL); |
michael@0 | 774 | |
michael@0 | 775 | - head = next; |
michael@0 | 776 | - } |
michael@0 | 777 | return sorted_head; |
michael@0 | 778 | } |
michael@0 | 779 | |
michael@0 | 780 | /* Test if the edges on the active list can be safely advanced by a |
michael@0 | 781 | * full row without intersections or any edges ending. */ |
michael@0 | 782 | inline static int |
michael@0 | 783 | -active_list_can_step_full_row( |
michael@0 | 784 | - struct active_list *active) |
michael@0 | 785 | +active_list_can_step_full_row (struct active_list *active, |
michael@0 | 786 | + grid_scaled_x_t xmin) |
michael@0 | 787 | { |
michael@0 | 788 | + const struct edge *e; |
michael@0 | 789 | + grid_scaled_x_t prev_x = INT_MIN; |
michael@0 | 790 | + |
michael@0 | 791 | /* Recomputes the minimum height of all edges on the active |
michael@0 | 792 | * list if we have been dropping edges. */ |
michael@0 | 793 | if (active->min_height <= 0) { |
michael@0 | 794 | - struct edge *e = active->head; |
michael@0 | 795 | int min_height = INT_MAX; |
michael@0 | 796 | |
michael@0 | 797 | + e = active->head; |
michael@0 | 798 | while (NULL != e) { |
michael@0 | 799 | if (e->height_left < min_height) |
michael@0 | 800 | min_height = e->height_left; |
michael@0 | 801 | @@ -1225,27 +1280,38 @@ active_list_can_step_full_row( |
michael@0 | 802 | active->min_height = min_height; |
michael@0 | 803 | } |
michael@0 | 804 | |
michael@0 | 805 | - /* Check for intersections only if no edges end during the next |
michael@0 | 806 | - * row. */ |
michael@0 | 807 | - if (active->min_height >= GRID_Y) { |
michael@0 | 808 | - grid_scaled_x_t prev_x = INT_MIN; |
michael@0 | 809 | - struct edge *e = active->head; |
michael@0 | 810 | - while (NULL != e) { |
michael@0 | 811 | - struct quorem x = e->x; |
michael@0 | 812 | + if (active->min_height < GRID_Y) |
michael@0 | 813 | + return 0; |
michael@0 | 814 | |
michael@0 | 815 | + /* Check for intersections as no edges end during the next row. */ |
michael@0 | 816 | + e = active->head; |
michael@0 | 817 | + while (NULL != e) { |
michael@0 | 818 | + struct quorem x = e->x; |
michael@0 | 819 | + |
michael@0 | 820 | + if (! e->vertical) { |
michael@0 | 821 | x.quo += e->dxdy_full.quo; |
michael@0 | 822 | x.rem += e->dxdy_full.rem; |
michael@0 | 823 | if (x.rem >= 0) |
michael@0 | 824 | ++x.quo; |
michael@0 | 825 | + } |
michael@0 | 826 | |
michael@0 | 827 | - if (x.quo <= prev_x) |
michael@0 | 828 | + /* There's may be an intersection if the edge sort order might |
michael@0 | 829 | + * change. */ |
michael@0 | 830 | + if (x.quo <= prev_x) { |
michael@0 | 831 | + /* Ignore intersections to the left of the clip extents. |
michael@0 | 832 | + * This assumes that all vertical edges on or at the left |
michael@0 | 833 | + * side of the clip rectangle have been shifted slightly |
michael@0 | 834 | + * to the left in polygon_add_edge(). */ |
michael@0 | 835 | + if (prev_x >= xmin || x.quo >= xmin || e->x.quo >= xmin) |
michael@0 | 836 | return 0; |
michael@0 | 837 | + } |
michael@0 | 838 | + else { |
michael@0 | 839 | prev_x = x.quo; |
michael@0 | 840 | - e = e->next; |
michael@0 | 841 | } |
michael@0 | 842 | - return 1; |
michael@0 | 843 | + e = e->next; |
michael@0 | 844 | } |
michael@0 | 845 | - return 0; |
michael@0 | 846 | + |
michael@0 | 847 | + return 1; |
michael@0 | 848 | } |
michael@0 | 849 | |
michael@0 | 850 | /* Merges edges on the given subpixel row from the polygon to the |
michael@0 | 851 | @@ -1261,7 +1327,7 @@ active_list_merge_edges_from_polygon( |
michael@0 | 852 | unsigned ix = EDGE_Y_BUCKET_INDEX(y, polygon->ymin); |
michael@0 | 853 | int min_height = active->min_height; |
michael@0 | 854 | struct edge *subrow_edges = NULL; |
michael@0 | 855 | - struct edge **ptail = &polygon->y_buckets[ix]; |
michael@0 | 856 | + struct edge **ptail = &polygon->y_buckets[ix].edges; |
michael@0 | 857 | |
michael@0 | 858 | while (1) { |
michael@0 | 859 | struct edge *tail = *ptail; |
michael@0 | 860 | @@ -1277,8 +1343,10 @@ active_list_merge_edges_from_polygon( |
michael@0 | 861 | ptail = &tail->next; |
michael@0 | 862 | } |
michael@0 | 863 | } |
michael@0 | 864 | - active->head = merge_unsorted_edges(active->head, subrow_edges); |
michael@0 | 865 | - active->min_height = min_height; |
michael@0 | 866 | + if (subrow_edges) { |
michael@0 | 867 | + active->head = merge_unsorted_edges(active->head, subrow_edges); |
michael@0 | 868 | + active->min_height = min_height; |
michael@0 | 869 | + } |
michael@0 | 870 | } |
michael@0 | 871 | |
michael@0 | 872 | /* Advance the edges on the active list by one subsample row by |
michael@0 | 873 | @@ -1439,11 +1507,13 @@ apply_nonzero_fill_rule_and_step_edges (struct active_list *active, |
michael@0 | 874 | } |
michael@0 | 875 | } |
michael@0 | 876 | |
michael@0 | 877 | - right_edge->x.quo += right_edge->dxdy_full.quo; |
michael@0 | 878 | - right_edge->x.rem += right_edge->dxdy_full.rem; |
michael@0 | 879 | - if (right_edge->x.rem >= 0) { |
michael@0 | 880 | - ++right_edge->x.quo; |
michael@0 | 881 | - right_edge->x.rem -= right_edge->dy; |
michael@0 | 882 | + if (! right_edge->vertical) { |
michael@0 | 883 | + right_edge->x.quo += right_edge->dxdy_full.quo; |
michael@0 | 884 | + right_edge->x.rem += right_edge->dxdy_full.rem; |
michael@0 | 885 | + if (right_edge->x.rem >= 0) { |
michael@0 | 886 | + ++right_edge->x.quo; |
michael@0 | 887 | + right_edge->x.rem -= right_edge->dy; |
michael@0 | 888 | + } |
michael@0 | 889 | } |
michael@0 | 890 | } |
michael@0 | 891 | |
michael@0 | 892 | @@ -1472,6 +1542,7 @@ apply_evenodd_fill_rule_and_step_edges (struct active_list *active, |
michael@0 | 893 | left_edge = *cursor; |
michael@0 | 894 | while (NULL != left_edge) { |
michael@0 | 895 | struct edge *right_edge; |
michael@0 | 896 | + int winding = left_edge->dir; |
michael@0 | 897 | |
michael@0 | 898 | left_edge->height_left -= GRID_Y; |
michael@0 | 899 | if (left_edge->height_left) |
michael@0 | 900 | @@ -1490,17 +1561,22 @@ apply_evenodd_fill_rule_and_step_edges (struct active_list *active, |
michael@0 | 901 | else |
michael@0 | 902 | *cursor = right_edge->next; |
michael@0 | 903 | |
michael@0 | 904 | + winding += right_edge->dir; |
michael@0 | 905 | + if ((winding & 1) == 0) { |
michael@0 | 906 | if (right_edge->next == NULL || |
michael@0 | 907 | right_edge->next->x.quo != right_edge->x.quo) |
michael@0 | 908 | { |
michael@0 | 909 | break; |
michael@0 | 910 | } |
michael@0 | 911 | + } |
michael@0 | 912 | |
michael@0 | 913 | - right_edge->x.quo += right_edge->dxdy_full.quo; |
michael@0 | 914 | - right_edge->x.rem += right_edge->dxdy_full.rem; |
michael@0 | 915 | - if (right_edge->x.rem >= 0) { |
michael@0 | 916 | - ++right_edge->x.quo; |
michael@0 | 917 | - right_edge->x.rem -= right_edge->dy; |
michael@0 | 918 | + if (! right_edge->vertical) { |
michael@0 | 919 | + right_edge->x.quo += right_edge->dxdy_full.quo; |
michael@0 | 920 | + right_edge->x.rem += right_edge->dxdy_full.rem; |
michael@0 | 921 | + if (right_edge->x.rem >= 0) { |
michael@0 | 922 | + ++right_edge->x.quo; |
michael@0 | 923 | + right_edge->x.rem -= right_edge->dy; |
michael@0 | 924 | + } |
michael@0 | 925 | } |
michael@0 | 926 | } |
michael@0 | 927 | |
michael@0 | 928 | @@ -1537,8 +1613,14 @@ blit_span( |
michael@0 | 929 | } |
michael@0 | 930 | } |
michael@0 | 931 | |
michael@0 | 932 | -#define GLITTER_BLIT_COVERAGES(coverages, y, xmin, xmax) \ |
michael@0 | 933 | - blit_cells(coverages, raster_pixels + (y)*raster_stride, xmin, xmax) |
michael@0 | 934 | +#define GLITTER_BLIT_COVERAGES(coverages, y, height, xmin, xmax) \ |
michael@0 | 935 | + do { \ |
michael@0 | 936 | + int __y = y; \ |
michael@0 | 937 | + int __h = height; \ |
michael@0 | 938 | + do { \ |
michael@0 | 939 | + blit_cells(coverages, raster_pixels + (__y)*raster_stride, xmin, xmax); \ |
michael@0 | 940 | + } while (--__h); \ |
michael@0 | 941 | + } while (0) |
michael@0 | 942 | |
michael@0 | 943 | static void |
michael@0 | 944 | blit_cells( |
michael@0 | 945 | @@ -1597,7 +1679,6 @@ static void |
michael@0 | 946 | _glitter_scan_converter_fini(glitter_scan_converter_t *converter) |
michael@0 | 947 | { |
michael@0 | 948 | polygon_fini(converter->polygon); |
michael@0 | 949 | - active_list_fini(converter->active); |
michael@0 | 950 | cell_list_fini(converter->coverages); |
michael@0 | 951 | converter->xmin=0; |
michael@0 | 952 | converter->ymin=0; |
michael@0 | 953 | @@ -1641,7 +1722,7 @@ glitter_scan_converter_reset( |
michael@0 | 954 | |
michael@0 | 955 | active_list_reset(converter->active); |
michael@0 | 956 | cell_list_reset(converter->coverages); |
michael@0 | 957 | - status = polygon_reset(converter->polygon, ymin, ymax); |
michael@0 | 958 | + status = polygon_reset(converter->polygon, xmin, xmax, ymin, ymax); |
michael@0 | 959 | if (status) |
michael@0 | 960 | return status; |
michael@0 | 961 | |
michael@0 | 962 | @@ -1711,19 +1792,48 @@ glitter_scan_converter_add_edge (glitter_scan_converter_t *converter, |
michael@0 | 963 | #endif |
michael@0 | 964 | |
michael@0 | 965 | #ifndef GLITTER_BLIT_COVERAGES_EMPTY |
michael@0 | 966 | -# define GLITTER_BLIT_COVERAGES_EMPTY(y, xmin, xmax) |
michael@0 | 967 | +# define GLITTER_BLIT_COVERAGES_EMPTY(y0, y1, xmin, xmax) |
michael@0 | 968 | #endif |
michael@0 | 969 | |
michael@0 | 970 | +static cairo_bool_t |
michael@0 | 971 | +active_list_is_vertical (struct active_list *active) |
michael@0 | 972 | +{ |
michael@0 | 973 | + struct edge *e; |
michael@0 | 974 | + |
michael@0 | 975 | + for (e = active->head; e != NULL; e = e->next) { |
michael@0 | 976 | + if (! e->vertical) |
michael@0 | 977 | + return FALSE; |
michael@0 | 978 | + } |
michael@0 | 979 | + |
michael@0 | 980 | + return TRUE; |
michael@0 | 981 | +} |
michael@0 | 982 | + |
michael@0 | 983 | +static void |
michael@0 | 984 | +step_edges (struct active_list *active, int count) |
michael@0 | 985 | +{ |
michael@0 | 986 | + struct edge **cursor = &active->head; |
michael@0 | 987 | + struct edge *edge; |
michael@0 | 988 | + |
michael@0 | 989 | + for (edge = *cursor; edge != NULL; edge = *cursor) { |
michael@0 | 990 | + edge->height_left -= GRID_Y * count; |
michael@0 | 991 | + if (edge->height_left) |
michael@0 | 992 | + cursor = &edge->next; |
michael@0 | 993 | + else |
michael@0 | 994 | + *cursor = edge->next; |
michael@0 | 995 | + } |
michael@0 | 996 | +} |
michael@0 | 997 | + |
michael@0 | 998 | I glitter_status_t |
michael@0 | 999 | glitter_scan_converter_render( |
michael@0 | 1000 | glitter_scan_converter_t *converter, |
michael@0 | 1001 | int nonzero_fill, |
michael@0 | 1002 | GLITTER_BLIT_COVERAGES_ARGS) |
michael@0 | 1003 | { |
michael@0 | 1004 | - int i; |
michael@0 | 1005 | + int i, j; |
michael@0 | 1006 | int ymax_i = converter->ymax / GRID_Y; |
michael@0 | 1007 | int ymin_i = converter->ymin / GRID_Y; |
michael@0 | 1008 | int xmin_i, xmax_i; |
michael@0 | 1009 | + grid_scaled_x_t xmin = converter->xmin; |
michael@0 | 1010 | int h = ymax_i - ymin_i; |
michael@0 | 1011 | struct polygon *polygon = converter->polygon; |
michael@0 | 1012 | struct cell_list *coverages = converter->coverages; |
michael@0 | 1013 | @@ -1738,22 +1848,28 @@ glitter_scan_converter_render( |
michael@0 | 1014 | GLITTER_BLIT_COVERAGES_BEGIN; |
michael@0 | 1015 | |
michael@0 | 1016 | /* Render each pixel row. */ |
michael@0 | 1017 | - for (i=0; i<h; i++) { |
michael@0 | 1018 | + for (i = 0; i < h; i = j) { |
michael@0 | 1019 | int do_full_step = 0; |
michael@0 | 1020 | glitter_status_t status = 0; |
michael@0 | 1021 | |
michael@0 | 1022 | + j = i + 1; |
michael@0 | 1023 | + |
michael@0 | 1024 | /* Determine if we can ignore this row or use the full pixel |
michael@0 | 1025 | * stepper. */ |
michael@0 | 1026 | - if (GRID_Y == EDGE_Y_BUCKET_HEIGHT && ! polygon->y_buckets[i]) { |
michael@0 | 1027 | + if (polygon->y_buckets[i].edges == NULL) { |
michael@0 | 1028 | if (! active->head) { |
michael@0 | 1029 | - GLITTER_BLIT_COVERAGES_EMPTY (i+ymin_i, xmin_i, xmax_i); |
michael@0 | 1030 | + for (; j < h && ! polygon->y_buckets[j].edges; j++) |
michael@0 | 1031 | + ; |
michael@0 | 1032 | + GLITTER_BLIT_COVERAGES_EMPTY (i+ymin_i, j-i, xmin_i, xmax_i); |
michael@0 | 1033 | continue; |
michael@0 | 1034 | } |
michael@0 | 1035 | - |
michael@0 | 1036 | - do_full_step = active_list_can_step_full_row (active); |
michael@0 | 1037 | + do_full_step = active_list_can_step_full_row (active, xmin); |
michael@0 | 1038 | + } |
michael@0 | 1039 | + else if (! polygon->y_buckets[i].have_inside_edges) { |
michael@0 | 1040 | + grid_scaled_y_t y = (i+ymin_i)*GRID_Y; |
michael@0 | 1041 | + active_list_merge_edges_from_polygon (active, y, polygon); |
michael@0 | 1042 | + do_full_step = active_list_can_step_full_row (active, xmin); |
michael@0 | 1043 | } |
michael@0 | 1044 | - |
michael@0 | 1045 | - cell_list_reset (coverages); |
michael@0 | 1046 | |
michael@0 | 1047 | if (do_full_step) { |
michael@0 | 1048 | /* Step by a full pixel row's worth. */ |
michael@0 | 1049 | @@ -1764,8 +1880,20 @@ glitter_scan_converter_render( |
michael@0 | 1050 | status = apply_evenodd_fill_rule_and_step_edges (active, |
michael@0 | 1051 | coverages); |
michael@0 | 1052 | } |
michael@0 | 1053 | + |
michael@0 | 1054 | + if (active_list_is_vertical (active)) { |
michael@0 | 1055 | + while (j < h && |
michael@0 | 1056 | + polygon->y_buckets[j].edges == NULL && |
michael@0 | 1057 | + active->min_height >= 2*GRID_Y) |
michael@0 | 1058 | + { |
michael@0 | 1059 | + active->min_height -= GRID_Y; |
michael@0 | 1060 | + j++; |
michael@0 | 1061 | + } |
michael@0 | 1062 | + if (j != i + 1) |
michael@0 | 1063 | + step_edges (active, j - (i + 1)); |
michael@0 | 1064 | + } |
michael@0 | 1065 | } else { |
michael@0 | 1066 | - /* Subsample this row. */ |
michael@0 | 1067 | + /* Supersample this row. */ |
michael@0 | 1068 | grid_scaled_y_t suby; |
michael@0 | 1069 | for (suby = 0; suby < GRID_Y; suby++) { |
michael@0 | 1070 | grid_scaled_y_t y = (i+ymin_i)*GRID_Y + suby; |
michael@0 | 1071 | @@ -1787,13 +1915,13 @@ glitter_scan_converter_render( |
michael@0 | 1072 | if (unlikely (status)) |
michael@0 | 1073 | return status; |
michael@0 | 1074 | |
michael@0 | 1075 | - GLITTER_BLIT_COVERAGES(coverages, i+ymin_i, xmin_i, xmax_i); |
michael@0 | 1076 | + GLITTER_BLIT_COVERAGES(coverages, i+ymin_i, j-i, xmin_i, xmax_i); |
michael@0 | 1077 | + cell_list_reset (coverages); |
michael@0 | 1078 | |
michael@0 | 1079 | - if (! active->head) { |
michael@0 | 1080 | + if (! active->head) |
michael@0 | 1081 | active->min_height = INT_MAX; |
michael@0 | 1082 | - } else { |
michael@0 | 1083 | + else |
michael@0 | 1084 | active->min_height -= GRID_Y; |
michael@0 | 1085 | - } |
michael@0 | 1086 | } |
michael@0 | 1087 | |
michael@0 | 1088 | /* Clean up the coverage blitter. */ |
michael@0 | 1089 | @@ -1807,21 +1935,20 @@ glitter_scan_converter_render( |
michael@0 | 1090 | * scan converter subclass. */ |
michael@0 | 1091 | |
michael@0 | 1092 | static glitter_status_t |
michael@0 | 1093 | -blit_with_span_renderer( |
michael@0 | 1094 | - struct cell_list *cells, |
michael@0 | 1095 | - cairo_span_renderer_t *renderer, |
michael@0 | 1096 | - struct pool *span_pool, |
michael@0 | 1097 | - int y, |
michael@0 | 1098 | - int xmin, |
michael@0 | 1099 | - int xmax) |
michael@0 | 1100 | +blit_with_span_renderer (struct cell_list *cells, |
michael@0 | 1101 | + cairo_span_renderer_t *renderer, |
michael@0 | 1102 | + struct pool *span_pool, |
michael@0 | 1103 | + int y, int height, |
michael@0 | 1104 | + int xmin, int xmax) |
michael@0 | 1105 | { |
michael@0 | 1106 | struct cell *cell = cells->head; |
michael@0 | 1107 | int prev_x = xmin; |
michael@0 | 1108 | int cover = 0; |
michael@0 | 1109 | cairo_half_open_span_t *spans; |
michael@0 | 1110 | unsigned num_spans; |
michael@0 | 1111 | + |
michael@0 | 1112 | if (cell == NULL) |
michael@0 | 1113 | - return CAIRO_STATUS_SUCCESS; |
michael@0 | 1114 | + return blit_empty_with_span_renderer (renderer, y, height); |
michael@0 | 1115 | |
michael@0 | 1116 | /* Skip cells to the left of the clip region. */ |
michael@0 | 1117 | while (cell != NULL && cell->x < xmin) { |
michael@0 | 1118 | @@ -1833,12 +1960,12 @@ blit_with_span_renderer( |
michael@0 | 1119 | /* Count number of cells remaining. */ |
michael@0 | 1120 | { |
michael@0 | 1121 | struct cell *next = cell; |
michael@0 | 1122 | - num_spans = 0; |
michael@0 | 1123 | - while (next) { |
michael@0 | 1124 | + num_spans = 1; |
michael@0 | 1125 | + while (next != NULL) { |
michael@0 | 1126 | next = next->next; |
michael@0 | 1127 | ++num_spans; |
michael@0 | 1128 | } |
michael@0 | 1129 | - num_spans = 2*num_spans + 1; |
michael@0 | 1130 | + num_spans = 2*num_spans; |
michael@0 | 1131 | } |
michael@0 | 1132 | |
michael@0 | 1133 | /* Allocate enough spans for the row. */ |
michael@0 | 1134 | @@ -1853,6 +1980,7 @@ blit_with_span_renderer( |
michael@0 | 1135 | for (; cell != NULL; cell = cell->next) { |
michael@0 | 1136 | int x = cell->x; |
michael@0 | 1137 | int area; |
michael@0 | 1138 | + |
michael@0 | 1139 | if (x >= xmax) |
michael@0 | 1140 | break; |
michael@0 | 1141 | |
michael@0 | 1142 | @@ -1872,20 +2000,26 @@ blit_with_span_renderer( |
michael@0 | 1143 | prev_x = x+1; |
michael@0 | 1144 | } |
michael@0 | 1145 | |
michael@0 | 1146 | - if (prev_x < xmax) { |
michael@0 | 1147 | + if (prev_x <= xmax) { |
michael@0 | 1148 | spans[num_spans].x = prev_x; |
michael@0 | 1149 | spans[num_spans].coverage = GRID_AREA_TO_ALPHA (cover); |
michael@0 | 1150 | ++num_spans; |
michael@0 | 1151 | } |
michael@0 | 1152 | |
michael@0 | 1153 | + if (prev_x < xmax && cover) { |
michael@0 | 1154 | + spans[num_spans].x = xmax; |
michael@0 | 1155 | + spans[num_spans].coverage = 0; |
michael@0 | 1156 | + ++num_spans; |
michael@0 | 1157 | + } |
michael@0 | 1158 | + |
michael@0 | 1159 | /* Dump them into the renderer. */ |
michael@0 | 1160 | - return renderer->render_row (renderer, y, spans, num_spans); |
michael@0 | 1161 | + return renderer->render_rows (renderer, y, height, spans, num_spans); |
michael@0 | 1162 | } |
michael@0 | 1163 | |
michael@0 | 1164 | static glitter_status_t |
michael@0 | 1165 | -blit_empty_with_span_renderer (cairo_span_renderer_t *renderer, int y) |
michael@0 | 1166 | +blit_empty_with_span_renderer (cairo_span_renderer_t *renderer, int y, int height) |
michael@0 | 1167 | { |
michael@0 | 1168 | - return renderer->render_row (renderer, y, NULL, 0); |
michael@0 | 1169 | + return renderer->render_rows (renderer, y, height, NULL, 0); |
michael@0 | 1170 | } |
michael@0 | 1171 | |
michael@0 | 1172 | struct _cairo_tor_scan_converter { |
michael@0 | 1173 | diff --git a/src/cairo-win32-surface.c b/src/cairo-win32-surface.c |
michael@0 | 1174 | index 82d1cf5..d4575a3 100644 |
michael@0 | 1175 | --- a/src/cairo-win32-surface.c |
michael@0 | 1176 | +++ b/src/cairo-win32-surface.c |
michael@0 | 1177 | @@ -1954,6 +1954,9 @@ typedef struct _cairo_win32_surface_span_renderer { |
michael@0 | 1178 | const cairo_pattern_t *pattern; |
michael@0 | 1179 | cairo_antialias_t antialias; |
michael@0 | 1180 | |
michael@0 | 1181 | + uint8_t *mask_data; |
michael@0 | 1182 | + uint32_t mask_stride; |
michael@0 | 1183 | + |
michael@0 | 1184 | cairo_image_surface_t *mask; |
michael@0 | 1185 | cairo_win32_surface_t *dst; |
michael@0 | 1186 | cairo_region_t *clip_region; |
michael@0 | 1187 | @@ -1962,14 +1965,16 @@ typedef struct _cairo_win32_surface_span_renderer { |
michael@0 | 1188 | } cairo_win32_surface_span_renderer_t; |
michael@0 | 1189 | |
michael@0 | 1190 | static cairo_status_t |
michael@0 | 1191 | -_cairo_win32_surface_span_renderer_render_row ( |
michael@0 | 1192 | +_cairo_win32_surface_span_renderer_render_rows ( |
michael@0 | 1193 | void *abstract_renderer, |
michael@0 | 1194 | int y, |
michael@0 | 1195 | + int height, |
michael@0 | 1196 | const cairo_half_open_span_t *spans, |
michael@0 | 1197 | unsigned num_spans) |
michael@0 | 1198 | { |
michael@0 | 1199 | cairo_win32_surface_span_renderer_t *renderer = abstract_renderer; |
michael@0 | 1200 | - _cairo_image_surface_span_render_row (y, spans, num_spans, renderer->mask, &renderer->composite_rectangles); |
michael@0 | 1201 | + while (height--) |
michael@0 | 1202 | + _cairo_image_surface_span_render_row (y++, spans, num_spans, renderer->mask_data, renderer->mask_stride); |
michael@0 | 1203 | return CAIRO_STATUS_SUCCESS; |
michael@0 | 1204 | } |
michael@0 | 1205 | |
michael@0 | 1206 | @@ -2066,8 +2071,7 @@ _cairo_win32_surface_create_span_renderer (cairo_operator_t op, |
michael@0 | 1207 | |
michael@0 | 1208 | renderer->base.destroy = _cairo_win32_surface_span_renderer_destroy; |
michael@0 | 1209 | renderer->base.finish = _cairo_win32_surface_span_renderer_finish; |
michael@0 | 1210 | - renderer->base.render_row = |
michael@0 | 1211 | - _cairo_win32_surface_span_renderer_render_row; |
michael@0 | 1212 | + renderer->base.render_rows = _cairo_win32_surface_span_renderer_render_rows; |
michael@0 | 1213 | renderer->op = op; |
michael@0 | 1214 | renderer->pattern = pattern; |
michael@0 | 1215 | renderer->antialias = antialias; |
michael@0 | 1216 | @@ -2088,6 +2092,9 @@ _cairo_win32_surface_create_span_renderer (cairo_operator_t op, |
michael@0 | 1217 | _cairo_win32_surface_span_renderer_destroy (renderer); |
michael@0 | 1218 | return _cairo_span_renderer_create_in_error (status); |
michael@0 | 1219 | } |
michael@0 | 1220 | + |
michael@0 | 1221 | + renderer->mask_data = renderer->mask->data - rects->mask.x - rects->mask.y * renderer->mask->stride; |
michael@0 | 1222 | + renderer->mask_stride = renderer->mask->stride; |
michael@0 | 1223 | return &renderer->base; |
michael@0 | 1224 | } |
michael@0 | 1225 | |
michael@0 | 1226 | diff --git a/src/cairo-xlib-display.c b/src/cairo-xlib-display.c |
michael@0 | 1227 | index a7a40b8..566d9fb 100644 |
michael@0 | 1228 | --- a/src/cairo-xlib-display.c |
michael@0 | 1229 | +++ b/src/cairo-xlib-display.c |
michael@0 | 1230 | @@ -407,6 +407,10 @@ _cairo_xlib_display_get (Display *dpy, |
michael@0 | 1231 | display->buggy_pad_reflect = TRUE; |
michael@0 | 1232 | } |
michael@0 | 1233 | |
michael@0 | 1234 | + /* gradients don't seem to work */ |
michael@0 | 1235 | + display->buggy_gradients = TRUE; |
michael@0 | 1236 | + |
michael@0 | 1237 | + |
michael@0 | 1238 | /* XXX workaround; see https://bugzilla.mozilla.org/show_bug.cgi?id=413583 */ |
michael@0 | 1239 | /* If buggy_repeat_force == -1, then initialize. |
michael@0 | 1240 | * - set to -2, meaning "nothing was specified", and we trust the above detection. |
michael@0 | 1241 | diff --git a/src/cairoint.h b/src/cairoint.h |
michael@0 | 1242 | index 58850ab..1cdf6ff 100644 |
michael@0 | 1243 | --- a/src/cairoint.h |
michael@0 | 1244 | +++ b/src/cairoint.h |
michael@0 | 1245 | @@ -2257,8 +2257,8 @@ cairo_private void |
michael@0 | 1246 | _cairo_image_surface_span_render_row (int y, |
michael@0 | 1247 | const cairo_half_open_span_t *spans, |
michael@0 | 1248 | unsigned num_spans, |
michael@0 | 1249 | - cairo_image_surface_t *mask, |
michael@0 | 1250 | - const cairo_composite_rectangles_t *rects); |
michael@0 | 1251 | + uint8_t *data, |
michael@0 | 1252 | + uint32_t stride); |
michael@0 | 1253 | |
michael@0 | 1254 | cairo_private cairo_image_transparency_t |
michael@0 | 1255 | _cairo_image_analyze_transparency (cairo_image_surface_t *image); |