|
1 diff --git a/src/cairo-gl-surface.c b/src/cairo-gl-surface.c |
|
2 index 2acc8b5..019249e 100644 |
|
3 --- a/src/cairo-gl-surface.c |
|
4 +++ b/src/cairo-gl-surface.c |
|
5 @@ -2012,13 +2012,14 @@ typedef struct _cairo_gl_surface_span_renderer { |
|
6 |
|
7 cairo_gl_composite_setup_t setup; |
|
8 |
|
9 + int xmin, xmax; |
|
10 + |
|
11 cairo_operator_t op; |
|
12 cairo_antialias_t antialias; |
|
13 |
|
14 cairo_gl_surface_t *dst; |
|
15 cairo_region_t *clip; |
|
16 |
|
17 - cairo_composite_rectangles_t composite_rectangles; |
|
18 GLuint vbo; |
|
19 void *vbo_base; |
|
20 unsigned int vbo_size; |
|
21 @@ -2049,11 +2050,11 @@ _cairo_gl_span_renderer_flush (cairo_gl_surface_span_renderer_t *renderer) |
|
22 cairo_region_get_rectangle (renderer->clip, i, &rect); |
|
23 |
|
24 glScissor (rect.x, rect.y, rect.width, rect.height); |
|
25 - glDrawArrays (GL_LINES, 0, count); |
|
26 + glDrawArrays (GL_QUADS, 0, count); |
|
27 } |
|
28 glDisable (GL_SCISSOR_TEST); |
|
29 } else { |
|
30 - glDrawArrays (GL_LINES, 0, count); |
|
31 + glDrawArrays (GL_QUADS, 0, count); |
|
32 } |
|
33 } |
|
34 |
|
35 @@ -2134,72 +2135,87 @@ _cairo_gl_emit_span_vertex (cairo_gl_surface_span_renderer_t *renderer, |
|
36 |
|
37 static void |
|
38 _cairo_gl_emit_span (cairo_gl_surface_span_renderer_t *renderer, |
|
39 - int x1, int x2, int y, uint8_t alpha) |
|
40 + int x, int y1, int y2, |
|
41 + uint8_t alpha) |
|
42 { |
|
43 float *vertices = _cairo_gl_span_renderer_get_vbo (renderer, 2); |
|
44 |
|
45 - _cairo_gl_emit_span_vertex (renderer, x1, y, alpha, vertices); |
|
46 - _cairo_gl_emit_span_vertex (renderer, x2, y, alpha, |
|
47 + _cairo_gl_emit_span_vertex (renderer, x, y1, alpha, vertices); |
|
48 + _cairo_gl_emit_span_vertex (renderer, x, y2, alpha, |
|
49 vertices + renderer->vertex_size / 4); |
|
50 } |
|
51 |
|
52 -/* Emits the contents of the span renderer rows as GL_LINES with the span's |
|
53 - * alpha. |
|
54 - * |
|
55 - * Unlike the image surface, which is compositing into a temporary, we emit |
|
56 - * coverage even for alpha == 0, in case we're using an unbounded operator. |
|
57 - * But it means we avoid having to do the fixup. |
|
58 - */ |
|
59 +static void |
|
60 +_cairo_gl_emit_rectangle (cairo_gl_surface_span_renderer_t *renderer, |
|
61 + int x1, int y1, |
|
62 + int x2, int y2, |
|
63 + int coverage) |
|
64 +{ |
|
65 + _cairo_gl_emit_span (renderer, x1, y1, y2, coverage); |
|
66 + _cairo_gl_emit_span (renderer, x2, y2, y1, coverage); |
|
67 +} |
|
68 + |
|
69 static cairo_status_t |
|
70 -_cairo_gl_surface_span_renderer_render_row ( |
|
71 - void *abstract_renderer, |
|
72 - int y, |
|
73 - const cairo_half_open_span_t *spans, |
|
74 - unsigned num_spans) |
|
75 +_cairo_gl_render_bounded_spans (void *abstract_renderer, |
|
76 + int y, int height, |
|
77 + const cairo_half_open_span_t *spans, |
|
78 + unsigned num_spans) |
|
79 { |
|
80 cairo_gl_surface_span_renderer_t *renderer = abstract_renderer; |
|
81 - int xmin = renderer->composite_rectangles.mask.x; |
|
82 - int xmax = xmin + renderer->composite_rectangles.width; |
|
83 - int prev_x = xmin; |
|
84 - int prev_alpha = 0; |
|
85 - unsigned i; |
|
86 - int x_translate; |
|
87 - |
|
88 - /* Make sure we're within y-range. */ |
|
89 - if (y < renderer->composite_rectangles.mask.y || |
|
90 - y >= renderer->composite_rectangles.mask.y + |
|
91 - renderer->composite_rectangles.height) |
|
92 + |
|
93 + if (num_spans == 0) |
|
94 return CAIRO_STATUS_SUCCESS; |
|
95 |
|
96 - x_translate = renderer->composite_rectangles.dst.x - |
|
97 - renderer->composite_rectangles.mask.x; |
|
98 - y += renderer->composite_rectangles.dst.y - |
|
99 - renderer->composite_rectangles.mask.y; |
|
100 + do { |
|
101 + if (spans[0].coverage) { |
|
102 + _cairo_gl_emit_rectangle (renderer, |
|
103 + spans[0].x, y, |
|
104 + spans[1].x, y + height, |
|
105 + spans[0].coverage); |
|
106 + } |
|
107 |
|
108 - /* Find the first span within x-range. */ |
|
109 - for (i=0; i < num_spans && spans[i].x < xmin; i++) {} |
|
110 - if (i>0) |
|
111 - prev_alpha = spans[i-1].coverage; |
|
112 + spans++; |
|
113 + } while (--num_spans > 1); |
|
114 |
|
115 - /* Set the intermediate spans. */ |
|
116 - for (; i < num_spans; i++) { |
|
117 - int x = spans[i].x; |
|
118 + return CAIRO_STATUS_SUCCESS; |
|
119 +} |
|
120 |
|
121 - if (x >= xmax) |
|
122 - break; |
|
123 +static cairo_status_t |
|
124 +_cairo_gl_render_unbounded_spans (void *abstract_renderer, |
|
125 + int y, int height, |
|
126 + const cairo_half_open_span_t *spans, |
|
127 + unsigned num_spans) |
|
128 +{ |
|
129 + cairo_gl_surface_span_renderer_t *renderer = abstract_renderer; |
|
130 |
|
131 - _cairo_gl_emit_span (renderer, |
|
132 - prev_x + x_translate, x + x_translate, y, |
|
133 - prev_alpha); |
|
134 + if (num_spans == 0) { |
|
135 + _cairo_gl_emit_rectangle (renderer, |
|
136 + renderer->xmin, y, |
|
137 + renderer->xmax, y + height, |
|
138 + 0); |
|
139 + return CAIRO_STATUS_SUCCESS; |
|
140 + } |
|
141 |
|
142 - prev_x = x; |
|
143 - prev_alpha = spans[i].coverage; |
|
144 + if (spans[0].x != renderer->xmin) { |
|
145 + _cairo_gl_emit_rectangle (renderer, |
|
146 + renderer->xmin, y, |
|
147 + spans[0].x, y + height, |
|
148 + 0); |
|
149 } |
|
150 |
|
151 - if (prev_x < xmax) { |
|
152 - _cairo_gl_emit_span (renderer, |
|
153 - prev_x + x_translate, xmax + x_translate, y, |
|
154 - prev_alpha); |
|
155 + do { |
|
156 + _cairo_gl_emit_rectangle (renderer, |
|
157 + spans[0].x, y, |
|
158 + spans[1].x, y + height, |
|
159 + spans[0].coverage); |
|
160 + spans++; |
|
161 + } while (--num_spans > 1); |
|
162 + |
|
163 + if (spans[0].x != renderer->xmax) { |
|
164 + _cairo_gl_emit_rectangle (renderer, |
|
165 + spans[0].x, y, |
|
166 + renderer->xmax, y + height, |
|
167 + 0); |
|
168 } |
|
169 |
|
170 return CAIRO_STATUS_SUCCESS; |
|
171 @@ -2274,8 +2290,6 @@ _cairo_gl_surface_create_span_renderer (cairo_operator_t op, |
|
172 cairo_gl_surface_t *dst = abstract_dst; |
|
173 cairo_gl_surface_span_renderer_t *renderer; |
|
174 cairo_status_t status; |
|
175 - int width = rects->width; |
|
176 - int height = rects->height; |
|
177 cairo_surface_attributes_t *src_attributes; |
|
178 GLenum err; |
|
179 |
|
180 diff --git a/src/cairo-image-surface.c b/src/cairo-image-surface.c |
|
181 index 48d8013..d52979d 100644 |
|
182 --- a/src/cairo-image-surface.c |
|
183 +++ b/src/cairo-image-surface.c |
|
184 @@ -1390,11 +1390,13 @@ typedef struct _cairo_image_surface_span_renderer { |
|
185 const cairo_pattern_t *pattern; |
|
186 cairo_antialias_t antialias; |
|
187 |
|
188 + uint8_t *mask_data; |
|
189 + uint32_t mask_stride; |
|
190 + |
|
191 cairo_image_surface_t *src; |
|
192 cairo_surface_attributes_t src_attributes; |
|
193 cairo_image_surface_t *mask; |
|
194 cairo_image_surface_t *dst; |
|
195 - |
|
196 cairo_composite_rectangles_t composite_rectangles; |
|
197 } cairo_image_surface_span_renderer_t; |
|
198 |
|
199 @@ -1403,66 +1405,46 @@ _cairo_image_surface_span_render_row ( |
|
200 int y, |
|
201 const cairo_half_open_span_t *spans, |
|
202 unsigned num_spans, |
|
203 - cairo_image_surface_t *mask, |
|
204 - const cairo_composite_rectangles_t *rects) |
|
205 + uint8_t *data, |
|
206 + uint32_t stride) |
|
207 { |
|
208 - int xmin = rects->mask.x; |
|
209 - int xmax = xmin + rects->width; |
|
210 uint8_t *row; |
|
211 - int prev_x = xmin; |
|
212 - int prev_alpha = 0; |
|
213 unsigned i; |
|
214 |
|
215 - /* Make sure we're within y-range. */ |
|
216 - y -= rects->mask.y; |
|
217 - if (y < 0 || y >= rects->height) |
|
218 + if (num_spans == 0) |
|
219 return; |
|
220 |
|
221 - row = (uint8_t*)(mask->data) + y*(size_t)mask->stride - xmin; |
|
222 - |
|
223 - /* Find the first span within x-range. */ |
|
224 - for (i=0; i < num_spans && spans[i].x < xmin; i++) {} |
|
225 - if (i>0) |
|
226 - prev_alpha = spans[i-1].coverage; |
|
227 - |
|
228 - /* Set the intermediate spans. */ |
|
229 - for (; i < num_spans; i++) { |
|
230 - int x = spans[i].x; |
|
231 - |
|
232 - if (x >= xmax) |
|
233 - break; |
|
234 - |
|
235 - if (prev_alpha != 0) { |
|
236 - /* We implement setting rendering the most common single |
|
237 - * pixel wide span case to avoid the overhead of a memset |
|
238 - * call. Open coding setting longer spans didn't show a |
|
239 - * noticeable improvement over memset. */ |
|
240 - if (x == prev_x + 1) { |
|
241 - row[prev_x] = prev_alpha; |
|
242 - } |
|
243 - else { |
|
244 - memset(row + prev_x, prev_alpha, x - prev_x); |
|
245 - } |
|
246 + row = data + y * stride; |
|
247 + for (i = 0; i < num_spans - 1; i++) { |
|
248 + if (! spans[i].coverage) |
|
249 + continue; |
|
250 + |
|
251 + /* We implement setting the most common single pixel wide |
|
252 + * span case to avoid the overhead of a memset call. |
|
253 + * Open coding setting longer spans didn't show a |
|
254 + * noticeable improvement over memset. |
|
255 + */ |
|
256 + if (spans[i+1].x == spans[i].x + 1) { |
|
257 + row[spans[i].x] = spans[i].coverage; |
|
258 + } else { |
|
259 + memset (row + spans[i].x, |
|
260 + spans[i].coverage, |
|
261 + spans[i+1].x - spans[i].x); |
|
262 } |
|
263 - |
|
264 - prev_x = x; |
|
265 - prev_alpha = spans[i].coverage; |
|
266 - } |
|
267 - |
|
268 - if (prev_alpha != 0 && prev_x < xmax) { |
|
269 - memset(row + prev_x, prev_alpha, xmax - prev_x); |
|
270 } |
|
271 } |
|
272 |
|
273 static cairo_status_t |
|
274 -_cairo_image_surface_span_renderer_render_row ( |
|
275 +_cairo_image_surface_span_renderer_render_rows ( |
|
276 void *abstract_renderer, |
|
277 int y, |
|
278 + int height, |
|
279 const cairo_half_open_span_t *spans, |
|
280 unsigned num_spans) |
|
281 { |
|
282 cairo_image_surface_span_renderer_t *renderer = abstract_renderer; |
|
283 - _cairo_image_surface_span_render_row (y, spans, num_spans, renderer->mask, &renderer->composite_rectangles); |
|
284 + while (height--) |
|
285 + _cairo_image_surface_span_render_row (y++, spans, num_spans, renderer->mask_data, renderer->mask_stride); |
|
286 return CAIRO_STATUS_SUCCESS; |
|
287 } |
|
288 |
|
289 @@ -1517,11 +1499,11 @@ _cairo_image_surface_span_renderer_finish (void *abstract_renderer) |
|
290 &dst->base, |
|
291 src_attributes, |
|
292 src->width, src->height, |
|
293 - rects->width, rects->height, |
|
294 + width, height, |
|
295 rects->src.x, rects->src.y, |
|
296 0, 0, /* mask.x, mask.y */ |
|
297 rects->dst.x, rects->dst.y, |
|
298 - rects->width, rects->height, |
|
299 + width, height, |
|
300 dst->clip_region); |
|
301 } |
|
302 } |
|
303 @@ -1567,7 +1549,7 @@ _cairo_image_surface_create_span_renderer (cairo_operator_t op, |
|
304 |
|
305 renderer->base.destroy = _cairo_image_surface_span_renderer_destroy; |
|
306 renderer->base.finish = _cairo_image_surface_span_renderer_finish; |
|
307 - renderer->base.render_row = _cairo_image_surface_span_renderer_render_row; |
|
308 + renderer->base.render_rows = _cairo_image_surface_span_renderer_render_rows; |
|
309 renderer->op = op; |
|
310 renderer->pattern = pattern; |
|
311 renderer->antialias = antialias; |
|
312 @@ -1604,6 +1586,9 @@ _cairo_image_surface_create_span_renderer (cairo_operator_t op, |
|
313 _cairo_image_surface_span_renderer_destroy (renderer); |
|
314 return _cairo_span_renderer_create_in_error (status); |
|
315 } |
|
316 + |
|
317 + renderer->mask_data = renderer->mask->data - rects->mask.x - rects->mask.y * renderer->mask->stride; |
|
318 + renderer->mask_stride = renderer->mask->stride; |
|
319 return &renderer->base; |
|
320 } |
|
321 |
|
322 diff --git a/src/cairo-spans-private.h b/src/cairo-spans-private.h |
|
323 index e29a567..af3b38c 100644 |
|
324 --- a/src/cairo-spans-private.h |
|
325 +++ b/src/cairo-spans-private.h |
|
326 @@ -47,26 +47,24 @@ typedef struct _cairo_half_open_span { |
|
327 * surfaces if they want to composite spans instead of trapezoids. */ |
|
328 typedef struct _cairo_span_renderer cairo_span_renderer_t; |
|
329 struct _cairo_span_renderer { |
|
330 + /* Private status variable. */ |
|
331 + cairo_status_t status; |
|
332 + |
|
333 /* Called to destroy the renderer. */ |
|
334 cairo_destroy_func_t destroy; |
|
335 |
|
336 - /* Render the spans on row y of the source by whatever compositing |
|
337 - * method is required. The function should ignore spans outside |
|
338 - * the bounding box set by the init() function. */ |
|
339 - cairo_status_t (*render_row)( |
|
340 - void *abstract_renderer, |
|
341 - int y, |
|
342 - const cairo_half_open_span_t *coverages, |
|
343 - unsigned num_coverages); |
|
344 + /* Render the spans on row y of the destination by whatever compositing |
|
345 + * method is required. */ |
|
346 + cairo_warn cairo_status_t |
|
347 + (*render_rows) (void *abstract_renderer, |
|
348 + int y, int height, |
|
349 + const cairo_half_open_span_t *coverages, |
|
350 + unsigned num_coverages); |
|
351 |
|
352 /* Called after all rows have been rendered to perform whatever |
|
353 * final rendering step is required. This function is called just |
|
354 * once before the renderer is destroyed. */ |
|
355 - cairo_status_t (*finish)( |
|
356 - void *abstract_renderer); |
|
357 - |
|
358 - /* Private status variable. */ |
|
359 - cairo_status_t status; |
|
360 + cairo_status_t (*finish) (void *abstract_renderer); |
|
361 }; |
|
362 |
|
363 /* Scan converter interface. */ |
|
364 diff --git a/src/cairo-spans.c b/src/cairo-spans.c |
|
365 index af3b85f..69894c1 100644 |
|
366 --- a/src/cairo-spans.c |
|
367 +++ b/src/cairo-spans.c |
|
368 @@ -275,13 +275,15 @@ _cairo_scan_converter_create_in_error (cairo_status_t status) |
|
369 } |
|
370 |
|
371 static cairo_status_t |
|
372 -_cairo_nil_span_renderer_render_row ( |
|
373 +_cairo_nil_span_renderer_render_rows ( |
|
374 void *abstract_renderer, |
|
375 int y, |
|
376 + int height, |
|
377 const cairo_half_open_span_t *coverages, |
|
378 unsigned num_coverages) |
|
379 { |
|
380 (void) y; |
|
381 + (void) height; |
|
382 (void) coverages; |
|
383 (void) num_coverages; |
|
384 return _cairo_span_renderer_status (abstract_renderer); |
|
385 @@ -310,7 +312,7 @@ _cairo_span_renderer_set_error ( |
|
386 ASSERT_NOT_REACHED; |
|
387 } |
|
388 if (renderer->status == CAIRO_STATUS_SUCCESS) { |
|
389 - renderer->render_row = _cairo_nil_span_renderer_render_row; |
|
390 + renderer->render_rows = _cairo_nil_span_renderer_render_rows; |
|
391 renderer->finish = _cairo_nil_span_renderer_finish; |
|
392 renderer->status = error; |
|
393 } |
|
394 diff --git a/src/cairo-tor-scan-converter.c b/src/cairo-tor-scan-converter.c |
|
395 index 29262c2..2b9fb1b 100644 |
|
396 --- a/src/cairo-tor-scan-converter.c |
|
397 +++ b/src/cairo-tor-scan-converter.c |
|
398 @@ -128,27 +128,29 @@ blit_with_span_renderer( |
|
399 cairo_span_renderer_t *span_renderer, |
|
400 struct pool *span_pool, |
|
401 int y, |
|
402 + int height, |
|
403 int xmin, |
|
404 int xmax); |
|
405 |
|
406 static glitter_status_t |
|
407 -blit_empty_with_span_renderer (cairo_span_renderer_t *renderer, int y); |
|
408 +blit_empty_with_span_renderer (cairo_span_renderer_t *renderer, int y, int height); |
|
409 |
|
410 #define GLITTER_BLIT_COVERAGES_ARGS \ |
|
411 cairo_span_renderer_t *span_renderer, \ |
|
412 struct pool *span_pool |
|
413 |
|
414 -#define GLITTER_BLIT_COVERAGES(cells, y, xmin, xmax) do { \ |
|
415 +#define GLITTER_BLIT_COVERAGES(cells, y, height,xmin, xmax) do { \ |
|
416 cairo_status_t status = blit_with_span_renderer (cells, \ |
|
417 span_renderer, \ |
|
418 span_pool, \ |
|
419 - y, xmin, xmax); \ |
|
420 + y, height, \ |
|
421 + xmin, xmax); \ |
|
422 if (unlikely (status)) \ |
|
423 return status; \ |
|
424 } while (0) |
|
425 |
|
426 -#define GLITTER_BLIT_COVERAGES_EMPTY(y, xmin, xmax) do { \ |
|
427 - cairo_status_t status = blit_empty_with_span_renderer (span_renderer, y); \ |
|
428 +#define GLITTER_BLIT_COVERAGES_EMPTY(y, height, xmin, xmax) do { \ |
|
429 + cairo_status_t status = blit_empty_with_span_renderer (span_renderer, y, height); \ |
|
430 if (unlikely (status)) \ |
|
431 return status; \ |
|
432 } while (0) |
|
433 @@ -309,8 +311,8 @@ typedef int grid_area_t; |
|
434 #define UNROLL3(x) x x x |
|
435 |
|
436 struct quorem { |
|
437 - int quo; |
|
438 - int rem; |
|
439 + int32_t quo; |
|
440 + int32_t rem; |
|
441 }; |
|
442 |
|
443 /* Header for a chunk of memory in a memory pool. */ |
|
444 @@ -382,6 +384,7 @@ struct edge { |
|
445 /* Original sign of the edge: +1 for downwards, -1 for upwards |
|
446 * edges. */ |
|
447 int dir; |
|
448 + int vertical; |
|
449 }; |
|
450 |
|
451 /* Number of subsample rows per y-bucket. Must be GRID_Y. */ |
|
452 @@ -389,18 +392,28 @@ struct edge { |
|
453 |
|
454 #define EDGE_Y_BUCKET_INDEX(y, ymin) (((y) - (ymin))/EDGE_Y_BUCKET_HEIGHT) |
|
455 |
|
456 +struct bucket { |
|
457 + /* Unsorted list of edges starting within this bucket. */ |
|
458 + struct edge *edges; |
|
459 + |
|
460 + /* Set to non-zero if there are edges starting strictly within the |
|
461 + * bucket. */ |
|
462 + unsigned have_inside_edges; |
|
463 +}; |
|
464 + |
|
465 /* A collection of sorted and vertically clipped edges of the polygon. |
|
466 * Edges are moved from the polygon to an active list while scan |
|
467 * converting. */ |
|
468 struct polygon { |
|
469 - /* The vertical clip extents. */ |
|
470 + /* The clip extents. */ |
|
471 + grid_scaled_x_t xmin, xmax; |
|
472 grid_scaled_y_t ymin, ymax; |
|
473 |
|
474 /* Array of edges all starting in the same bucket. An edge is put |
|
475 * into bucket EDGE_BUCKET_INDEX(edge->ytop, polygon->ymin) when |
|
476 * it is added to the polygon. */ |
|
477 - struct edge **y_buckets; |
|
478 - struct edge *y_buckets_embedded[64]; |
|
479 + struct bucket *y_buckets; |
|
480 + struct bucket y_buckets_embedded[64]; |
|
481 |
|
482 struct { |
|
483 struct pool base[1]; |
|
484 @@ -702,7 +715,6 @@ static void |
|
485 cell_list_fini(struct cell_list *cells) |
|
486 { |
|
487 pool_fini (cells->cell_pool.base); |
|
488 - cell_list_init (cells); |
|
489 } |
|
490 |
|
491 /* Empty the cell list. This is called at the start of every pixel |
|
492 @@ -715,6 +727,26 @@ cell_list_reset (struct cell_list *cells) |
|
493 pool_reset (cells->cell_pool.base); |
|
494 } |
|
495 |
|
496 +static struct cell * |
|
497 +cell_list_alloc (struct cell_list *cells, |
|
498 + struct cell **cursor, |
|
499 + struct cell *tail, |
|
500 + int x) |
|
501 +{ |
|
502 + struct cell *cell; |
|
503 + |
|
504 + cell = pool_alloc (cells->cell_pool.base, sizeof (struct cell)); |
|
505 + if (unlikely (NULL == cell)) |
|
506 + return NULL; |
|
507 + |
|
508 + *cursor = cell; |
|
509 + cell->next = tail; |
|
510 + cell->x = x; |
|
511 + cell->uncovered_area = 0; |
|
512 + cell->covered_height = 0; |
|
513 + return cell; |
|
514 +} |
|
515 + |
|
516 /* Find a cell at the given x-coordinate. Returns %NULL if a new cell |
|
517 * needed to be allocated but couldn't be. Cells must be found with |
|
518 * non-decreasing x-coordinate until the cell list is rewound using |
|
519 @@ -737,22 +769,10 @@ cell_list_find (struct cell_list *cells, int x) |
|
520 } |
|
521 cells->cursor = cursor; |
|
522 |
|
523 - if (tail->x == x) { |
|
524 + if (tail->x == x) |
|
525 return tail; |
|
526 - } else { |
|
527 - struct cell *cell; |
|
528 - |
|
529 - cell = pool_alloc (cells->cell_pool.base, sizeof (struct cell)); |
|
530 - if (unlikely (NULL == cell)) |
|
531 - return NULL; |
|
532 |
|
533 - *cursor = cell; |
|
534 - cell->next = tail; |
|
535 - cell->x = x; |
|
536 - cell->uncovered_area = 0; |
|
537 - cell->covered_height = 0; |
|
538 - return cell; |
|
539 - } |
|
540 + return cell_list_alloc (cells, cursor, tail, x); |
|
541 } |
|
542 |
|
543 /* Find two cells at x1 and x2. This is exactly equivalent |
|
544 @@ -832,9 +852,8 @@ cell_list_find_pair(struct cell_list *cells, int x1, int x2) |
|
545 /* Add an unbounded subpixel span covering subpixels >= x to the |
|
546 * coverage cells. */ |
|
547 static glitter_status_t |
|
548 -cell_list_add_unbounded_subspan( |
|
549 - struct cell_list *cells, |
|
550 - grid_scaled_x_t x) |
|
551 +cell_list_add_unbounded_subspan (struct cell_list *cells, |
|
552 + grid_scaled_x_t x) |
|
553 { |
|
554 struct cell *cell; |
|
555 int ix, fx; |
|
556 @@ -907,20 +926,24 @@ cell_list_render_edge( |
|
557 struct edge *edge, |
|
558 int sign) |
|
559 { |
|
560 - struct quorem x1 = edge->x; |
|
561 - struct quorem x2 = x1; |
|
562 grid_scaled_y_t y1, y2, dy; |
|
563 grid_scaled_x_t dx; |
|
564 int ix1, ix2; |
|
565 grid_scaled_x_t fx1, fx2; |
|
566 |
|
567 - x2.quo += edge->dxdy_full.quo; |
|
568 - x2.rem += edge->dxdy_full.rem; |
|
569 - if (x2.rem >= 0) { |
|
570 - ++x2.quo; |
|
571 - x2.rem -= edge->dy; |
|
572 + struct quorem x1 = edge->x; |
|
573 + struct quorem x2 = x1; |
|
574 + |
|
575 + if (! edge->vertical) { |
|
576 + x2.quo += edge->dxdy_full.quo; |
|
577 + x2.rem += edge->dxdy_full.rem; |
|
578 + if (x2.rem >= 0) { |
|
579 + ++x2.quo; |
|
580 + x2.rem -= edge->dy; |
|
581 + } |
|
582 + |
|
583 + edge->x = x2; |
|
584 } |
|
585 - edge->x = x2; |
|
586 |
|
587 GRID_X_TO_INT_FRAC(x1.quo, ix1, fx1); |
|
588 GRID_X_TO_INT_FRAC(x2.quo, ix2, fx2); |
|
589 @@ -1026,6 +1049,7 @@ static void |
|
590 polygon_init (struct polygon *polygon) |
|
591 { |
|
592 polygon->ymin = polygon->ymax = 0; |
|
593 + polygon->xmin = polygon->xmax = 0; |
|
594 polygon->y_buckets = polygon->y_buckets_embedded; |
|
595 pool_init (polygon->edge_pool.base, |
|
596 8192 - sizeof (struct _pool_chunk), |
|
597 @@ -1045,10 +1069,11 @@ polygon_fini (struct polygon *polygon) |
|
598 * receive new edges and clip them to the vertical range |
|
599 * [ymin,ymax). */ |
|
600 static glitter_status_t |
|
601 -polygon_reset( |
|
602 - struct polygon *polygon, |
|
603 - grid_scaled_y_t ymin, |
|
604 - grid_scaled_y_t ymax) |
|
605 +polygon_reset (struct polygon *polygon, |
|
606 + grid_scaled_x_t xmin, |
|
607 + grid_scaled_x_t xmax, |
|
608 + grid_scaled_y_t ymin, |
|
609 + grid_scaled_y_t ymax) |
|
610 { |
|
611 unsigned h = ymax - ymin; |
|
612 unsigned num_buckets = EDGE_Y_BUCKET_INDEX(ymax + EDGE_Y_BUCKET_HEIGHT-1, |
|
613 @@ -1065,14 +1090,16 @@ polygon_reset( |
|
614 polygon->y_buckets = polygon->y_buckets_embedded; |
|
615 if (num_buckets > ARRAY_LENGTH (polygon->y_buckets_embedded)) { |
|
616 polygon->y_buckets = _cairo_malloc_ab (num_buckets, |
|
617 - sizeof (struct edge *)); |
|
618 + sizeof (struct bucket)); |
|
619 if (unlikely (NULL == polygon->y_buckets)) |
|
620 goto bail_no_mem; |
|
621 } |
|
622 - memset (polygon->y_buckets, 0, num_buckets * sizeof (struct edge *)); |
|
623 + memset (polygon->y_buckets, 0, num_buckets * sizeof (struct bucket)); |
|
624 |
|
625 polygon->ymin = ymin; |
|
626 polygon->ymax = ymax; |
|
627 + polygon->xmin = xmin; |
|
628 + polygon->xmax = xmax; |
|
629 return GLITTER_STATUS_SUCCESS; |
|
630 |
|
631 bail_no_mem: |
|
632 @@ -1086,10 +1113,13 @@ _polygon_insert_edge_into_its_y_bucket( |
|
633 struct polygon *polygon, |
|
634 struct edge *e) |
|
635 { |
|
636 - unsigned ix = EDGE_Y_BUCKET_INDEX(e->ytop, polygon->ymin); |
|
637 - struct edge **ptail = &polygon->y_buckets[ix]; |
|
638 + unsigned j = e->ytop - polygon->ymin; |
|
639 + unsigned ix = j / EDGE_Y_BUCKET_HEIGHT; |
|
640 + unsigned offset = j % EDGE_Y_BUCKET_HEIGHT; |
|
641 + struct edge **ptail = &polygon->y_buckets[ix].edges; |
|
642 e->next = *ptail; |
|
643 *ptail = e; |
|
644 + polygon->y_buckets[ix].have_inside_edges |= offset; |
|
645 } |
|
646 |
|
647 inline static glitter_status_t |
|
648 @@ -1115,30 +1145,53 @@ polygon_add_edge (struct polygon *polygon, |
|
649 dx = edge->line.p2.x - edge->line.p1.x; |
|
650 dy = edge->line.p2.y - edge->line.p1.y; |
|
651 e->dy = dy; |
|
652 - e->dxdy = floored_divrem (dx, dy); |
|
653 - |
|
654 - if (ymin <= edge->top) |
|
655 - ytop = edge->top; |
|
656 - else |
|
657 - ytop = ymin; |
|
658 - if (ytop == edge->line.p1.y) { |
|
659 - e->x.quo = edge->line.p1.x; |
|
660 - e->x.rem = 0; |
|
661 - } else { |
|
662 - e->x = floored_muldivrem (ytop - edge->line.p1.y, dx, dy); |
|
663 - e->x.quo += edge->line.p1.x; |
|
664 - } |
|
665 - |
|
666 e->dir = edge->dir; |
|
667 + |
|
668 + ytop = edge->top >= ymin ? edge->top : ymin; |
|
669 + ybot = edge->bottom <= ymax ? edge->bottom : ymax; |
|
670 e->ytop = ytop; |
|
671 - ybot = edge->bottom < ymax ? edge->bottom : ymax; |
|
672 e->height_left = ybot - ytop; |
|
673 |
|
674 - if (e->height_left >= GRID_Y) { |
|
675 - e->dxdy_full = floored_muldivrem (GRID_Y, dx, dy); |
|
676 - } else { |
|
677 + if (dx == 0) { |
|
678 + e->vertical = TRUE; |
|
679 + e->x.quo = edge->line.p1.x; |
|
680 + e->x.rem = 0; |
|
681 + e->dxdy.quo = 0; |
|
682 + e->dxdy.rem = 0; |
|
683 e->dxdy_full.quo = 0; |
|
684 e->dxdy_full.rem = 0; |
|
685 + |
|
686 + /* Drop edges to the right of the clip extents. */ |
|
687 + if (e->x.quo >= polygon->xmax) |
|
688 + return GLITTER_STATUS_SUCCESS; |
|
689 + |
|
690 + /* Offset vertical edges at the left side of the clip extents |
|
691 + * to just shy of the left side. We depend on this when |
|
692 + * checking for possible intersections within the clip |
|
693 + * rectangle. */ |
|
694 + if (e->x.quo <= polygon->xmin) { |
|
695 + e->x.quo = polygon->xmin - 1; |
|
696 + } |
|
697 + } else { |
|
698 + e->vertical = FALSE; |
|
699 + e->dxdy = floored_divrem (dx, dy); |
|
700 + if (ytop == edge->line.p1.y) { |
|
701 + e->x.quo = edge->line.p1.x; |
|
702 + e->x.rem = 0; |
|
703 + } else { |
|
704 + e->x = floored_muldivrem (ytop - edge->line.p1.y, dx, dy); |
|
705 + e->x.quo += edge->line.p1.x; |
|
706 + } |
|
707 + |
|
708 + if (e->x.quo >= polygon->xmax && e->dxdy.quo >= 0) |
|
709 + return GLITTER_STATUS_SUCCESS; |
|
710 + |
|
711 + if (e->height_left >= GRID_Y) { |
|
712 + e->dxdy_full = floored_muldivrem (GRID_Y, dx, dy); |
|
713 + } else { |
|
714 + e->dxdy_full.quo = 0; |
|
715 + e->dxdy_full.rem = 0; |
|
716 + } |
|
717 } |
|
718 |
|
719 _polygon_insert_edge_into_its_y_bucket (polygon, e); |
|
720 @@ -1161,31 +1214,30 @@ active_list_init(struct active_list *active) |
|
721 active_list_reset(active); |
|
722 } |
|
723 |
|
724 -static void |
|
725 -active_list_fini( |
|
726 - struct active_list *active) |
|
727 -{ |
|
728 - active_list_reset(active); |
|
729 -} |
|
730 - |
|
731 /* Merge the edges in an unsorted list of edges into a sorted |
|
732 * list. The sort order is edges ascending by edge->x.quo. Returns |
|
733 * the new head of the sorted list. */ |
|
734 static struct edge * |
|
735 merge_unsorted_edges(struct edge *sorted_head, struct edge *unsorted_head) |
|
736 { |
|
737 - struct edge *head = unsorted_head; |
|
738 struct edge **cursor = &sorted_head; |
|
739 int x; |
|
740 |
|
741 - while (NULL != head) { |
|
742 + if (sorted_head == NULL) { |
|
743 + sorted_head = unsorted_head; |
|
744 + unsorted_head = unsorted_head->next; |
|
745 + sorted_head->next = NULL; |
|
746 + if (unsorted_head == NULL) |
|
747 + return sorted_head; |
|
748 + } |
|
749 + |
|
750 + do { |
|
751 + struct edge *next = unsorted_head->next; |
|
752 struct edge *prev = *cursor; |
|
753 - struct edge *next = head->next; |
|
754 - x = head->x.quo; |
|
755 |
|
756 - if (NULL == prev || x < prev->x.quo) { |
|
757 + x = unsorted_head->x.quo; |
|
758 + if (x < prev->x.quo) |
|
759 cursor = &sorted_head; |
|
760 - } |
|
761 |
|
762 while (1) { |
|
763 UNROLL3({ |
|
764 @@ -1196,26 +1248,29 @@ merge_unsorted_edges(struct edge *sorted_head, struct edge *unsorted_head) |
|
765 }); |
|
766 } |
|
767 |
|
768 - head->next = *cursor; |
|
769 - *cursor = head; |
|
770 + unsorted_head->next = *cursor; |
|
771 + *cursor = unsorted_head; |
|
772 + unsorted_head = next; |
|
773 + } while (unsorted_head != NULL); |
|
774 |
|
775 - head = next; |
|
776 - } |
|
777 return sorted_head; |
|
778 } |
|
779 |
|
780 /* Test if the edges on the active list can be safely advanced by a |
|
781 * full row without intersections or any edges ending. */ |
|
782 inline static int |
|
783 -active_list_can_step_full_row( |
|
784 - struct active_list *active) |
|
785 +active_list_can_step_full_row (struct active_list *active, |
|
786 + grid_scaled_x_t xmin) |
|
787 { |
|
788 + const struct edge *e; |
|
789 + grid_scaled_x_t prev_x = INT_MIN; |
|
790 + |
|
791 /* Recomputes the minimum height of all edges on the active |
|
792 * list if we have been dropping edges. */ |
|
793 if (active->min_height <= 0) { |
|
794 - struct edge *e = active->head; |
|
795 int min_height = INT_MAX; |
|
796 |
|
797 + e = active->head; |
|
798 while (NULL != e) { |
|
799 if (e->height_left < min_height) |
|
800 min_height = e->height_left; |
|
801 @@ -1225,27 +1280,38 @@ active_list_can_step_full_row( |
|
802 active->min_height = min_height; |
|
803 } |
|
804 |
|
805 - /* Check for intersections only if no edges end during the next |
|
806 - * row. */ |
|
807 - if (active->min_height >= GRID_Y) { |
|
808 - grid_scaled_x_t prev_x = INT_MIN; |
|
809 - struct edge *e = active->head; |
|
810 - while (NULL != e) { |
|
811 - struct quorem x = e->x; |
|
812 + if (active->min_height < GRID_Y) |
|
813 + return 0; |
|
814 |
|
815 + /* Check for intersections as no edges end during the next row. */ |
|
816 + e = active->head; |
|
817 + while (NULL != e) { |
|
818 + struct quorem x = e->x; |
|
819 + |
|
820 + if (! e->vertical) { |
|
821 x.quo += e->dxdy_full.quo; |
|
822 x.rem += e->dxdy_full.rem; |
|
823 if (x.rem >= 0) |
|
824 ++x.quo; |
|
825 + } |
|
826 |
|
827 - if (x.quo <= prev_x) |
|
828 + /* There's may be an intersection if the edge sort order might |
|
829 + * change. */ |
|
830 + if (x.quo <= prev_x) { |
|
831 + /* Ignore intersections to the left of the clip extents. |
|
832 + * This assumes that all vertical edges on or at the left |
|
833 + * side of the clip rectangle have been shifted slightly |
|
834 + * to the left in polygon_add_edge(). */ |
|
835 + if (prev_x >= xmin || x.quo >= xmin || e->x.quo >= xmin) |
|
836 return 0; |
|
837 + } |
|
838 + else { |
|
839 prev_x = x.quo; |
|
840 - e = e->next; |
|
841 } |
|
842 - return 1; |
|
843 + e = e->next; |
|
844 } |
|
845 - return 0; |
|
846 + |
|
847 + return 1; |
|
848 } |
|
849 |
|
850 /* Merges edges on the given subpixel row from the polygon to the |
|
851 @@ -1261,7 +1327,7 @@ active_list_merge_edges_from_polygon( |
|
852 unsigned ix = EDGE_Y_BUCKET_INDEX(y, polygon->ymin); |
|
853 int min_height = active->min_height; |
|
854 struct edge *subrow_edges = NULL; |
|
855 - struct edge **ptail = &polygon->y_buckets[ix]; |
|
856 + struct edge **ptail = &polygon->y_buckets[ix].edges; |
|
857 |
|
858 while (1) { |
|
859 struct edge *tail = *ptail; |
|
860 @@ -1277,8 +1343,10 @@ active_list_merge_edges_from_polygon( |
|
861 ptail = &tail->next; |
|
862 } |
|
863 } |
|
864 - active->head = merge_unsorted_edges(active->head, subrow_edges); |
|
865 - active->min_height = min_height; |
|
866 + if (subrow_edges) { |
|
867 + active->head = merge_unsorted_edges(active->head, subrow_edges); |
|
868 + active->min_height = min_height; |
|
869 + } |
|
870 } |
|
871 |
|
872 /* Advance the edges on the active list by one subsample row by |
|
873 @@ -1439,11 +1507,13 @@ apply_nonzero_fill_rule_and_step_edges (struct active_list *active, |
|
874 } |
|
875 } |
|
876 |
|
877 - right_edge->x.quo += right_edge->dxdy_full.quo; |
|
878 - right_edge->x.rem += right_edge->dxdy_full.rem; |
|
879 - if (right_edge->x.rem >= 0) { |
|
880 - ++right_edge->x.quo; |
|
881 - right_edge->x.rem -= right_edge->dy; |
|
882 + if (! right_edge->vertical) { |
|
883 + right_edge->x.quo += right_edge->dxdy_full.quo; |
|
884 + right_edge->x.rem += right_edge->dxdy_full.rem; |
|
885 + if (right_edge->x.rem >= 0) { |
|
886 + ++right_edge->x.quo; |
|
887 + right_edge->x.rem -= right_edge->dy; |
|
888 + } |
|
889 } |
|
890 } |
|
891 |
|
892 @@ -1472,6 +1542,7 @@ apply_evenodd_fill_rule_and_step_edges (struct active_list *active, |
|
893 left_edge = *cursor; |
|
894 while (NULL != left_edge) { |
|
895 struct edge *right_edge; |
|
896 + int winding = left_edge->dir; |
|
897 |
|
898 left_edge->height_left -= GRID_Y; |
|
899 if (left_edge->height_left) |
|
900 @@ -1490,17 +1561,22 @@ apply_evenodd_fill_rule_and_step_edges (struct active_list *active, |
|
901 else |
|
902 *cursor = right_edge->next; |
|
903 |
|
904 + winding += right_edge->dir; |
|
905 + if ((winding & 1) == 0) { |
|
906 if (right_edge->next == NULL || |
|
907 right_edge->next->x.quo != right_edge->x.quo) |
|
908 { |
|
909 break; |
|
910 } |
|
911 + } |
|
912 |
|
913 - right_edge->x.quo += right_edge->dxdy_full.quo; |
|
914 - right_edge->x.rem += right_edge->dxdy_full.rem; |
|
915 - if (right_edge->x.rem >= 0) { |
|
916 - ++right_edge->x.quo; |
|
917 - right_edge->x.rem -= right_edge->dy; |
|
918 + if (! right_edge->vertical) { |
|
919 + right_edge->x.quo += right_edge->dxdy_full.quo; |
|
920 + right_edge->x.rem += right_edge->dxdy_full.rem; |
|
921 + if (right_edge->x.rem >= 0) { |
|
922 + ++right_edge->x.quo; |
|
923 + right_edge->x.rem -= right_edge->dy; |
|
924 + } |
|
925 } |
|
926 } |
|
927 |
|
928 @@ -1537,8 +1613,14 @@ blit_span( |
|
929 } |
|
930 } |
|
931 |
|
932 -#define GLITTER_BLIT_COVERAGES(coverages, y, xmin, xmax) \ |
|
933 - blit_cells(coverages, raster_pixels + (y)*raster_stride, xmin, xmax) |
|
934 +#define GLITTER_BLIT_COVERAGES(coverages, y, height, xmin, xmax) \ |
|
935 + do { \ |
|
936 + int __y = y; \ |
|
937 + int __h = height; \ |
|
938 + do { \ |
|
939 + blit_cells(coverages, raster_pixels + (__y)*raster_stride, xmin, xmax); \ |
|
940 + } while (--__h); \ |
|
941 + } while (0) |
|
942 |
|
943 static void |
|
944 blit_cells( |
|
945 @@ -1597,7 +1679,6 @@ static void |
|
946 _glitter_scan_converter_fini(glitter_scan_converter_t *converter) |
|
947 { |
|
948 polygon_fini(converter->polygon); |
|
949 - active_list_fini(converter->active); |
|
950 cell_list_fini(converter->coverages); |
|
951 converter->xmin=0; |
|
952 converter->ymin=0; |
|
953 @@ -1641,7 +1722,7 @@ glitter_scan_converter_reset( |
|
954 |
|
955 active_list_reset(converter->active); |
|
956 cell_list_reset(converter->coverages); |
|
957 - status = polygon_reset(converter->polygon, ymin, ymax); |
|
958 + status = polygon_reset(converter->polygon, xmin, xmax, ymin, ymax); |
|
959 if (status) |
|
960 return status; |
|
961 |
|
962 @@ -1711,19 +1792,48 @@ glitter_scan_converter_add_edge (glitter_scan_converter_t *converter, |
|
963 #endif |
|
964 |
|
965 #ifndef GLITTER_BLIT_COVERAGES_EMPTY |
|
966 -# define GLITTER_BLIT_COVERAGES_EMPTY(y, xmin, xmax) |
|
967 +# define GLITTER_BLIT_COVERAGES_EMPTY(y0, y1, xmin, xmax) |
|
968 #endif |
|
969 |
|
970 +static cairo_bool_t |
|
971 +active_list_is_vertical (struct active_list *active) |
|
972 +{ |
|
973 + struct edge *e; |
|
974 + |
|
975 + for (e = active->head; e != NULL; e = e->next) { |
|
976 + if (! e->vertical) |
|
977 + return FALSE; |
|
978 + } |
|
979 + |
|
980 + return TRUE; |
|
981 +} |
|
982 + |
|
983 +static void |
|
984 +step_edges (struct active_list *active, int count) |
|
985 +{ |
|
986 + struct edge **cursor = &active->head; |
|
987 + struct edge *edge; |
|
988 + |
|
989 + for (edge = *cursor; edge != NULL; edge = *cursor) { |
|
990 + edge->height_left -= GRID_Y * count; |
|
991 + if (edge->height_left) |
|
992 + cursor = &edge->next; |
|
993 + else |
|
994 + *cursor = edge->next; |
|
995 + } |
|
996 +} |
|
997 + |
|
998 I glitter_status_t |
|
999 glitter_scan_converter_render( |
|
1000 glitter_scan_converter_t *converter, |
|
1001 int nonzero_fill, |
|
1002 GLITTER_BLIT_COVERAGES_ARGS) |
|
1003 { |
|
1004 - int i; |
|
1005 + int i, j; |
|
1006 int ymax_i = converter->ymax / GRID_Y; |
|
1007 int ymin_i = converter->ymin / GRID_Y; |
|
1008 int xmin_i, xmax_i; |
|
1009 + grid_scaled_x_t xmin = converter->xmin; |
|
1010 int h = ymax_i - ymin_i; |
|
1011 struct polygon *polygon = converter->polygon; |
|
1012 struct cell_list *coverages = converter->coverages; |
|
1013 @@ -1738,22 +1848,28 @@ glitter_scan_converter_render( |
|
1014 GLITTER_BLIT_COVERAGES_BEGIN; |
|
1015 |
|
1016 /* Render each pixel row. */ |
|
1017 - for (i=0; i<h; i++) { |
|
1018 + for (i = 0; i < h; i = j) { |
|
1019 int do_full_step = 0; |
|
1020 glitter_status_t status = 0; |
|
1021 |
|
1022 + j = i + 1; |
|
1023 + |
|
1024 /* Determine if we can ignore this row or use the full pixel |
|
1025 * stepper. */ |
|
1026 - if (GRID_Y == EDGE_Y_BUCKET_HEIGHT && ! polygon->y_buckets[i]) { |
|
1027 + if (polygon->y_buckets[i].edges == NULL) { |
|
1028 if (! active->head) { |
|
1029 - GLITTER_BLIT_COVERAGES_EMPTY (i+ymin_i, xmin_i, xmax_i); |
|
1030 + for (; j < h && ! polygon->y_buckets[j].edges; j++) |
|
1031 + ; |
|
1032 + GLITTER_BLIT_COVERAGES_EMPTY (i+ymin_i, j-i, xmin_i, xmax_i); |
|
1033 continue; |
|
1034 } |
|
1035 - |
|
1036 - do_full_step = active_list_can_step_full_row (active); |
|
1037 + do_full_step = active_list_can_step_full_row (active, xmin); |
|
1038 + } |
|
1039 + else if (! polygon->y_buckets[i].have_inside_edges) { |
|
1040 + grid_scaled_y_t y = (i+ymin_i)*GRID_Y; |
|
1041 + active_list_merge_edges_from_polygon (active, y, polygon); |
|
1042 + do_full_step = active_list_can_step_full_row (active, xmin); |
|
1043 } |
|
1044 - |
|
1045 - cell_list_reset (coverages); |
|
1046 |
|
1047 if (do_full_step) { |
|
1048 /* Step by a full pixel row's worth. */ |
|
1049 @@ -1764,8 +1880,20 @@ glitter_scan_converter_render( |
|
1050 status = apply_evenodd_fill_rule_and_step_edges (active, |
|
1051 coverages); |
|
1052 } |
|
1053 + |
|
1054 + if (active_list_is_vertical (active)) { |
|
1055 + while (j < h && |
|
1056 + polygon->y_buckets[j].edges == NULL && |
|
1057 + active->min_height >= 2*GRID_Y) |
|
1058 + { |
|
1059 + active->min_height -= GRID_Y; |
|
1060 + j++; |
|
1061 + } |
|
1062 + if (j != i + 1) |
|
1063 + step_edges (active, j - (i + 1)); |
|
1064 + } |
|
1065 } else { |
|
1066 - /* Subsample this row. */ |
|
1067 + /* Supersample this row. */ |
|
1068 grid_scaled_y_t suby; |
|
1069 for (suby = 0; suby < GRID_Y; suby++) { |
|
1070 grid_scaled_y_t y = (i+ymin_i)*GRID_Y + suby; |
|
1071 @@ -1787,13 +1915,13 @@ glitter_scan_converter_render( |
|
1072 if (unlikely (status)) |
|
1073 return status; |
|
1074 |
|
1075 - GLITTER_BLIT_COVERAGES(coverages, i+ymin_i, xmin_i, xmax_i); |
|
1076 + GLITTER_BLIT_COVERAGES(coverages, i+ymin_i, j-i, xmin_i, xmax_i); |
|
1077 + cell_list_reset (coverages); |
|
1078 |
|
1079 - if (! active->head) { |
|
1080 + if (! active->head) |
|
1081 active->min_height = INT_MAX; |
|
1082 - } else { |
|
1083 + else |
|
1084 active->min_height -= GRID_Y; |
|
1085 - } |
|
1086 } |
|
1087 |
|
1088 /* Clean up the coverage blitter. */ |
|
1089 @@ -1807,21 +1935,20 @@ glitter_scan_converter_render( |
|
1090 * scan converter subclass. */ |
|
1091 |
|
1092 static glitter_status_t |
|
1093 -blit_with_span_renderer( |
|
1094 - struct cell_list *cells, |
|
1095 - cairo_span_renderer_t *renderer, |
|
1096 - struct pool *span_pool, |
|
1097 - int y, |
|
1098 - int xmin, |
|
1099 - int xmax) |
|
1100 +blit_with_span_renderer (struct cell_list *cells, |
|
1101 + cairo_span_renderer_t *renderer, |
|
1102 + struct pool *span_pool, |
|
1103 + int y, int height, |
|
1104 + int xmin, int xmax) |
|
1105 { |
|
1106 struct cell *cell = cells->head; |
|
1107 int prev_x = xmin; |
|
1108 int cover = 0; |
|
1109 cairo_half_open_span_t *spans; |
|
1110 unsigned num_spans; |
|
1111 + |
|
1112 if (cell == NULL) |
|
1113 - return CAIRO_STATUS_SUCCESS; |
|
1114 + return blit_empty_with_span_renderer (renderer, y, height); |
|
1115 |
|
1116 /* Skip cells to the left of the clip region. */ |
|
1117 while (cell != NULL && cell->x < xmin) { |
|
1118 @@ -1833,12 +1960,12 @@ blit_with_span_renderer( |
|
1119 /* Count number of cells remaining. */ |
|
1120 { |
|
1121 struct cell *next = cell; |
|
1122 - num_spans = 0; |
|
1123 - while (next) { |
|
1124 + num_spans = 1; |
|
1125 + while (next != NULL) { |
|
1126 next = next->next; |
|
1127 ++num_spans; |
|
1128 } |
|
1129 - num_spans = 2*num_spans + 1; |
|
1130 + num_spans = 2*num_spans; |
|
1131 } |
|
1132 |
|
1133 /* Allocate enough spans for the row. */ |
|
1134 @@ -1853,6 +1980,7 @@ blit_with_span_renderer( |
|
1135 for (; cell != NULL; cell = cell->next) { |
|
1136 int x = cell->x; |
|
1137 int area; |
|
1138 + |
|
1139 if (x >= xmax) |
|
1140 break; |
|
1141 |
|
1142 @@ -1872,20 +2000,26 @@ blit_with_span_renderer( |
|
1143 prev_x = x+1; |
|
1144 } |
|
1145 |
|
1146 - if (prev_x < xmax) { |
|
1147 + if (prev_x <= xmax) { |
|
1148 spans[num_spans].x = prev_x; |
|
1149 spans[num_spans].coverage = GRID_AREA_TO_ALPHA (cover); |
|
1150 ++num_spans; |
|
1151 } |
|
1152 |
|
1153 + if (prev_x < xmax && cover) { |
|
1154 + spans[num_spans].x = xmax; |
|
1155 + spans[num_spans].coverage = 0; |
|
1156 + ++num_spans; |
|
1157 + } |
|
1158 + |
|
1159 /* Dump them into the renderer. */ |
|
1160 - return renderer->render_row (renderer, y, spans, num_spans); |
|
1161 + return renderer->render_rows (renderer, y, height, spans, num_spans); |
|
1162 } |
|
1163 |
|
1164 static glitter_status_t |
|
1165 -blit_empty_with_span_renderer (cairo_span_renderer_t *renderer, int y) |
|
1166 +blit_empty_with_span_renderer (cairo_span_renderer_t *renderer, int y, int height) |
|
1167 { |
|
1168 - return renderer->render_row (renderer, y, NULL, 0); |
|
1169 + return renderer->render_rows (renderer, y, height, NULL, 0); |
|
1170 } |
|
1171 |
|
1172 struct _cairo_tor_scan_converter { |
|
1173 diff --git a/src/cairo-win32-surface.c b/src/cairo-win32-surface.c |
|
1174 index 82d1cf5..d4575a3 100644 |
|
1175 --- a/src/cairo-win32-surface.c |
|
1176 +++ b/src/cairo-win32-surface.c |
|
1177 @@ -1954,6 +1954,9 @@ typedef struct _cairo_win32_surface_span_renderer { |
|
1178 const cairo_pattern_t *pattern; |
|
1179 cairo_antialias_t antialias; |
|
1180 |
|
1181 + uint8_t *mask_data; |
|
1182 + uint32_t mask_stride; |
|
1183 + |
|
1184 cairo_image_surface_t *mask; |
|
1185 cairo_win32_surface_t *dst; |
|
1186 cairo_region_t *clip_region; |
|
1187 @@ -1962,14 +1965,16 @@ typedef struct _cairo_win32_surface_span_renderer { |
|
1188 } cairo_win32_surface_span_renderer_t; |
|
1189 |
|
1190 static cairo_status_t |
|
1191 -_cairo_win32_surface_span_renderer_render_row ( |
|
1192 +_cairo_win32_surface_span_renderer_render_rows ( |
|
1193 void *abstract_renderer, |
|
1194 int y, |
|
1195 + int height, |
|
1196 const cairo_half_open_span_t *spans, |
|
1197 unsigned num_spans) |
|
1198 { |
|
1199 cairo_win32_surface_span_renderer_t *renderer = abstract_renderer; |
|
1200 - _cairo_image_surface_span_render_row (y, spans, num_spans, renderer->mask, &renderer->composite_rectangles); |
|
1201 + while (height--) |
|
1202 + _cairo_image_surface_span_render_row (y++, spans, num_spans, renderer->mask_data, renderer->mask_stride); |
|
1203 return CAIRO_STATUS_SUCCESS; |
|
1204 } |
|
1205 |
|
1206 @@ -2066,8 +2071,7 @@ _cairo_win32_surface_create_span_renderer (cairo_operator_t op, |
|
1207 |
|
1208 renderer->base.destroy = _cairo_win32_surface_span_renderer_destroy; |
|
1209 renderer->base.finish = _cairo_win32_surface_span_renderer_finish; |
|
1210 - renderer->base.render_row = |
|
1211 - _cairo_win32_surface_span_renderer_render_row; |
|
1212 + renderer->base.render_rows = _cairo_win32_surface_span_renderer_render_rows; |
|
1213 renderer->op = op; |
|
1214 renderer->pattern = pattern; |
|
1215 renderer->antialias = antialias; |
|
1216 @@ -2088,6 +2092,9 @@ _cairo_win32_surface_create_span_renderer (cairo_operator_t op, |
|
1217 _cairo_win32_surface_span_renderer_destroy (renderer); |
|
1218 return _cairo_span_renderer_create_in_error (status); |
|
1219 } |
|
1220 + |
|
1221 + renderer->mask_data = renderer->mask->data - rects->mask.x - rects->mask.y * renderer->mask->stride; |
|
1222 + renderer->mask_stride = renderer->mask->stride; |
|
1223 return &renderer->base; |
|
1224 } |
|
1225 |
|
1226 diff --git a/src/cairo-xlib-display.c b/src/cairo-xlib-display.c |
|
1227 index a7a40b8..566d9fb 100644 |
|
1228 --- a/src/cairo-xlib-display.c |
|
1229 +++ b/src/cairo-xlib-display.c |
|
1230 @@ -407,6 +407,10 @@ _cairo_xlib_display_get (Display *dpy, |
|
1231 display->buggy_pad_reflect = TRUE; |
|
1232 } |
|
1233 |
|
1234 + /* gradients don't seem to work */ |
|
1235 + display->buggy_gradients = TRUE; |
|
1236 + |
|
1237 + |
|
1238 /* XXX workaround; see https://bugzilla.mozilla.org/show_bug.cgi?id=413583 */ |
|
1239 /* If buggy_repeat_force == -1, then initialize. |
|
1240 * - set to -2, meaning "nothing was specified", and we trust the above detection. |
|
1241 diff --git a/src/cairoint.h b/src/cairoint.h |
|
1242 index 58850ab..1cdf6ff 100644 |
|
1243 --- a/src/cairoint.h |
|
1244 +++ b/src/cairoint.h |
|
1245 @@ -2257,8 +2257,8 @@ cairo_private void |
|
1246 _cairo_image_surface_span_render_row (int y, |
|
1247 const cairo_half_open_span_t *spans, |
|
1248 unsigned num_spans, |
|
1249 - cairo_image_surface_t *mask, |
|
1250 - const cairo_composite_rectangles_t *rects); |
|
1251 + uint8_t *data, |
|
1252 + uint32_t stride); |
|
1253 |
|
1254 cairo_private cairo_image_transparency_t |
|
1255 _cairo_image_analyze_transparency (cairo_image_surface_t *image); |