Sat, 03 Jan 2015 20:18:00 +0100
Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.
2 /*
3 * Copyright 2006 The Android Open Source Project
4 *
5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file.
7 */
10 #include "SkScanPriv.h"
11 #include "SkPath.h"
12 #include "SkMatrix.h"
13 #include "SkBlitter.h"
14 #include "SkRegion.h"
15 #include "SkAntiRun.h"
17 #define SHIFT 2
18 #define SCALE (1 << SHIFT)
19 #define MASK (SCALE - 1)
21 /** @file
22 We have two techniques for capturing the output of the supersampler:
23 - SUPERMASK, which records a large mask-bitmap
24 this is often faster for small, complex objects
25 - RLE, which records a rle-encoded scanline
26 this is often faster for large objects with big spans
28 These blitters use two coordinate systems:
29 - destination coordinates, scale equal to the output - often
30 abbreviated with 'i' or 'I' in variable names
31 - supersampled coordinates, scale equal to the output * SCALE
33 Enabling SK_USE_LEGACY_AA_COVERAGE keeps the aa coverage calculations as
34 they were before the fix that unified the output of the RLE and MASK
35 supersamplers.
36 */
38 //#define FORCE_SUPERMASK
39 //#define FORCE_RLE
40 //#define SK_USE_LEGACY_AA_COVERAGE
42 ///////////////////////////////////////////////////////////////////////////////
44 /// Base class for a single-pass supersampled blitter.
45 class BaseSuperBlitter : public SkBlitter {
46 public:
47 BaseSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
48 const SkRegion& clip);
50 /// Must be explicitly defined on subclasses.
51 virtual void blitAntiH(int x, int y, const SkAlpha antialias[],
52 const int16_t runs[]) SK_OVERRIDE {
53 SkDEBUGFAIL("How did I get here?");
54 }
55 /// May not be called on BaseSuperBlitter because it blits out of order.
56 virtual void blitV(int x, int y, int height, SkAlpha alpha) SK_OVERRIDE {
57 SkDEBUGFAIL("How did I get here?");
58 }
60 protected:
61 SkBlitter* fRealBlitter;
62 /// Current y coordinate, in destination coordinates.
63 int fCurrIY;
64 /// Widest row of region to be blitted, in destination coordinates.
65 int fWidth;
66 /// Leftmost x coordinate in any row, in destination coordinates.
67 int fLeft;
68 /// Leftmost x coordinate in any row, in supersampled coordinates.
69 int fSuperLeft;
71 SkDEBUGCODE(int fCurrX;)
72 /// Current y coordinate in supersampled coordinates.
73 int fCurrY;
74 /// Initial y coordinate (top of bounds).
75 int fTop;
76 };
78 BaseSuperBlitter::BaseSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
79 const SkRegion& clip) {
80 fRealBlitter = realBlitter;
82 /*
83 * We use the clip bounds instead of the ir, since we may be asked to
84 * draw outside of the rect if we're a inverse filltype
85 */
86 const int left = clip.getBounds().fLeft;
87 const int right = clip.getBounds().fRight;
89 fLeft = left;
90 fSuperLeft = left << SHIFT;
91 fWidth = right - left;
92 #if 0
93 fCurrIY = -1;
94 fCurrY = -1;
95 #else
96 fTop = ir.fTop;
97 fCurrIY = ir.fTop - 1;
98 fCurrY = (ir.fTop << SHIFT) - 1;
99 #endif
100 SkDEBUGCODE(fCurrX = -1;)
101 }
103 /// Run-length-encoded supersampling antialiased blitter.
104 class SuperBlitter : public BaseSuperBlitter {
105 public:
106 SuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
107 const SkRegion& clip);
109 virtual ~SuperBlitter() {
110 this->flush();
111 sk_free(fRuns.fRuns);
112 }
114 /// Once fRuns contains a complete supersampled row, flush() blits
115 /// it out through the wrapped blitter.
116 void flush();
118 /// Blits a row of pixels, with location and width specified
119 /// in supersampled coordinates.
120 virtual void blitH(int x, int y, int width) SK_OVERRIDE;
121 /// Blits a rectangle of pixels, with location and size specified
122 /// in supersampled coordinates.
123 virtual void blitRect(int x, int y, int width, int height) SK_OVERRIDE;
125 private:
126 SkAlphaRuns fRuns;
127 int fOffsetX;
128 };
130 SuperBlitter::SuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
131 const SkRegion& clip)
132 : BaseSuperBlitter(realBlitter, ir, clip) {
133 const int width = fWidth;
135 // extra one to store the zero at the end
136 fRuns.fRuns = (int16_t*)sk_malloc_throw((width + 1 + (width + 2)/2) * sizeof(int16_t));
137 fRuns.fAlpha = (uint8_t*)(fRuns.fRuns + width + 1);
138 fRuns.reset(width);
140 fOffsetX = 0;
141 }
143 void SuperBlitter::flush() {
144 if (fCurrIY >= fTop) {
145 if (!fRuns.empty()) {
146 // SkDEBUGCODE(fRuns.dump();)
147 fRealBlitter->blitAntiH(fLeft, fCurrIY, fRuns.fAlpha, fRuns.fRuns);
148 fRuns.reset(fWidth);
149 fOffsetX = 0;
150 }
151 fCurrIY = fTop - 1;
152 SkDEBUGCODE(fCurrX = -1;)
153 }
154 }
156 /** coverage_to_partial_alpha() is being used by SkAlphaRuns, which
157 *accumulates* SCALE pixels worth of "alpha" in [0,(256/SCALE)]
158 to produce a final value in [0, 255] and handles clamping 256->255
159 itself, with the same (alpha - (alpha >> 8)) correction as
160 coverage_to_exact_alpha().
161 */
162 static inline int coverage_to_partial_alpha(int aa) {
163 aa <<= 8 - 2*SHIFT;
164 #ifdef SK_USE_LEGACY_AA_COVERAGE
165 aa -= aa >> (8 - SHIFT - 1);
166 #endif
167 return aa;
168 }
170 /** coverage_to_exact_alpha() is being used by our blitter, which wants
171 a final value in [0, 255].
172 */
173 static inline int coverage_to_exact_alpha(int aa) {
174 int alpha = (256 >> SHIFT) * aa;
175 // clamp 256->255
176 return alpha - (alpha >> 8);
177 }
179 void SuperBlitter::blitH(int x, int y, int width) {
180 SkASSERT(width > 0);
182 int iy = y >> SHIFT;
183 SkASSERT(iy >= fCurrIY);
185 x -= fSuperLeft;
186 // hack, until I figure out why my cubics (I think) go beyond the bounds
187 if (x < 0) {
188 width += x;
189 x = 0;
190 }
192 #ifdef SK_DEBUG
193 SkASSERT(y != fCurrY || x >= fCurrX);
194 #endif
195 SkASSERT(y >= fCurrY);
196 if (fCurrY != y) {
197 fOffsetX = 0;
198 fCurrY = y;
199 }
201 if (iy != fCurrIY) { // new scanline
202 this->flush();
203 fCurrIY = iy;
204 }
206 int start = x;
207 int stop = x + width;
209 SkASSERT(start >= 0 && stop > start);
210 // integer-pixel-aligned ends of blit, rounded out
211 int fb = start & MASK;
212 int fe = stop & MASK;
213 int n = (stop >> SHIFT) - (start >> SHIFT) - 1;
215 if (n < 0) {
216 fb = fe - fb;
217 n = 0;
218 fe = 0;
219 } else {
220 if (fb == 0) {
221 n += 1;
222 } else {
223 fb = SCALE - fb;
224 }
225 }
227 fOffsetX = fRuns.add(x >> SHIFT, coverage_to_partial_alpha(fb),
228 n, coverage_to_partial_alpha(fe),
229 (1 << (8 - SHIFT)) - (((y & MASK) + 1) >> SHIFT),
230 fOffsetX);
232 #ifdef SK_DEBUG
233 fRuns.assertValid(y & MASK, (1 << (8 - SHIFT)));
234 fCurrX = x + width;
235 #endif
236 }
238 #if 0 // UNUSED
239 static void set_left_rite_runs(SkAlphaRuns& runs, int ileft, U8CPU leftA,
240 int n, U8CPU riteA) {
241 SkASSERT(leftA <= 0xFF);
242 SkASSERT(riteA <= 0xFF);
244 int16_t* run = runs.fRuns;
245 uint8_t* aa = runs.fAlpha;
247 if (ileft > 0) {
248 run[0] = ileft;
249 aa[0] = 0;
250 run += ileft;
251 aa += ileft;
252 }
254 SkASSERT(leftA < 0xFF);
255 if (leftA > 0) {
256 *run++ = 1;
257 *aa++ = leftA;
258 }
260 if (n > 0) {
261 run[0] = n;
262 aa[0] = 0xFF;
263 run += n;
264 aa += n;
265 }
267 SkASSERT(riteA < 0xFF);
268 if (riteA > 0) {
269 *run++ = 1;
270 *aa++ = riteA;
271 }
272 run[0] = 0;
273 }
274 #endif
276 void SuperBlitter::blitRect(int x, int y, int width, int height) {
277 SkASSERT(width > 0);
278 SkASSERT(height > 0);
280 // blit leading rows
281 while ((y & MASK)) {
282 this->blitH(x, y++, width);
283 if (--height <= 0) {
284 return;
285 }
286 }
287 SkASSERT(height > 0);
289 // Since this is a rect, instead of blitting supersampled rows one at a
290 // time and then resolving to the destination canvas, we can blit
291 // directly to the destintion canvas one row per SCALE supersampled rows.
292 int start_y = y >> SHIFT;
293 int stop_y = (y + height) >> SHIFT;
294 int count = stop_y - start_y;
295 if (count > 0) {
296 y += count << SHIFT;
297 height -= count << SHIFT;
299 // save original X for our tail blitH() loop at the bottom
300 int origX = x;
302 x -= fSuperLeft;
303 // hack, until I figure out why my cubics (I think) go beyond the bounds
304 if (x < 0) {
305 width += x;
306 x = 0;
307 }
309 // There is always a left column, a middle, and a right column.
310 // ileft is the destination x of the first pixel of the entire rect.
311 // xleft is (SCALE - # of covered supersampled pixels) in that
312 // destination pixel.
313 int ileft = x >> SHIFT;
314 int xleft = x & MASK;
315 // irite is the destination x of the last pixel of the OPAQUE section.
316 // xrite is the number of supersampled pixels extending beyond irite;
317 // xrite/SCALE should give us alpha.
318 int irite = (x + width) >> SHIFT;
319 int xrite = (x + width) & MASK;
320 if (!xrite) {
321 xrite = SCALE;
322 irite--;
323 }
325 // Need to call flush() to clean up pending draws before we
326 // even consider blitV(), since otherwise it can look nonmonotonic.
327 SkASSERT(start_y > fCurrIY);
328 this->flush();
330 int n = irite - ileft - 1;
331 if (n < 0) {
332 // If n < 0, we'll only have a single partially-transparent column
333 // of pixels to render.
334 xleft = xrite - xleft;
335 SkASSERT(xleft <= SCALE);
336 SkASSERT(xleft > 0);
337 xrite = 0;
338 fRealBlitter->blitV(ileft + fLeft, start_y, count,
339 coverage_to_exact_alpha(xleft));
340 } else {
341 // With n = 0, we have two possibly-transparent columns of pixels
342 // to render; with n > 0, we have opaque columns between them.
344 xleft = SCALE - xleft;
346 // Using coverage_to_exact_alpha is not consistent with blitH()
347 const int coverageL = coverage_to_exact_alpha(xleft);
348 const int coverageR = coverage_to_exact_alpha(xrite);
350 SkASSERT(coverageL > 0 || n > 0 || coverageR > 0);
351 SkASSERT((coverageL != 0) + n + (coverageR != 0) <= fWidth);
353 fRealBlitter->blitAntiRect(ileft + fLeft, start_y, n, count,
354 coverageL, coverageR);
355 }
357 // preamble for our next call to blitH()
358 fCurrIY = stop_y - 1;
359 fOffsetX = 0;
360 fCurrY = y - 1;
361 fRuns.reset(fWidth);
362 x = origX;
363 }
365 // catch any remaining few rows
366 SkASSERT(height <= MASK);
367 while (--height >= 0) {
368 this->blitH(x, y++, width);
369 }
370 }
372 ///////////////////////////////////////////////////////////////////////////////
374 /// Masked supersampling antialiased blitter.
375 class MaskSuperBlitter : public BaseSuperBlitter {
376 public:
377 MaskSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
378 const SkRegion& clip);
379 virtual ~MaskSuperBlitter() {
380 fRealBlitter->blitMask(fMask, fClipRect);
381 }
383 virtual void blitH(int x, int y, int width) SK_OVERRIDE;
385 static bool CanHandleRect(const SkIRect& bounds) {
386 #ifdef FORCE_RLE
387 return false;
388 #endif
389 int width = bounds.width();
390 int64_t rb = SkAlign4(width);
391 // use 64bits to detect overflow
392 int64_t storage = rb * bounds.height();
394 return (width <= MaskSuperBlitter::kMAX_WIDTH) &&
395 (storage <= MaskSuperBlitter::kMAX_STORAGE);
396 }
398 private:
399 enum {
400 #ifdef FORCE_SUPERMASK
401 kMAX_WIDTH = 2048,
402 kMAX_STORAGE = 1024 * 1024 * 2
403 #else
404 kMAX_WIDTH = 32, // so we don't try to do very wide things, where the RLE blitter would be faster
405 kMAX_STORAGE = 1024
406 #endif
407 };
409 SkMask fMask;
410 SkIRect fClipRect;
411 // we add 1 because add_aa_span can write (unchanged) 1 extra byte at the end, rather than
412 // perform a test to see if stopAlpha != 0
413 uint32_t fStorage[(kMAX_STORAGE >> 2) + 1];
414 };
416 MaskSuperBlitter::MaskSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
417 const SkRegion& clip)
418 : BaseSuperBlitter(realBlitter, ir, clip) {
419 SkASSERT(CanHandleRect(ir));
421 fMask.fImage = (uint8_t*)fStorage;
422 fMask.fBounds = ir;
423 fMask.fRowBytes = ir.width();
424 fMask.fFormat = SkMask::kA8_Format;
426 fClipRect = ir;
427 fClipRect.intersect(clip.getBounds());
429 // For valgrind, write 1 extra byte at the end so we don't read
430 // uninitialized memory. See comment in add_aa_span and fStorage[].
431 memset(fStorage, 0, fMask.fBounds.height() * fMask.fRowBytes + 1);
432 }
434 static void add_aa_span(uint8_t* alpha, U8CPU startAlpha) {
435 /* I should be able to just add alpha[x] + startAlpha.
436 However, if the trailing edge of the previous span and the leading
437 edge of the current span round to the same super-sampled x value,
438 I might overflow to 256 with this add, hence the funny subtract.
439 */
440 unsigned tmp = *alpha + startAlpha;
441 SkASSERT(tmp <= 256);
442 *alpha = SkToU8(tmp - (tmp >> 8));
443 }
445 static inline uint32_t quadplicate_byte(U8CPU value) {
446 uint32_t pair = (value << 8) | value;
447 return (pair << 16) | pair;
448 }
450 // Perform this tricky subtract, to avoid overflowing to 256. Our caller should
451 // only ever call us with at most enough to hit 256 (never larger), so it is
452 // enough to just subtract the high-bit. Actually clamping with a branch would
453 // be slower (e.g. if (tmp > 255) tmp = 255;)
454 //
455 static inline void saturated_add(uint8_t* ptr, U8CPU add) {
456 unsigned tmp = *ptr + add;
457 SkASSERT(tmp <= 256);
458 *ptr = SkToU8(tmp - (tmp >> 8));
459 }
461 // minimum count before we want to setup an inner loop, adding 4-at-a-time
462 #define MIN_COUNT_FOR_QUAD_LOOP 16
464 static void add_aa_span(uint8_t* alpha, U8CPU startAlpha, int middleCount,
465 U8CPU stopAlpha, U8CPU maxValue) {
466 SkASSERT(middleCount >= 0);
468 saturated_add(alpha, startAlpha);
469 alpha += 1;
471 if (middleCount >= MIN_COUNT_FOR_QUAD_LOOP) {
472 // loop until we're quad-byte aligned
473 while (SkTCast<intptr_t>(alpha) & 0x3) {
474 alpha[0] = SkToU8(alpha[0] + maxValue);
475 alpha += 1;
476 middleCount -= 1;
477 }
479 int bigCount = middleCount >> 2;
480 uint32_t* qptr = reinterpret_cast<uint32_t*>(alpha);
481 uint32_t qval = quadplicate_byte(maxValue);
482 do {
483 *qptr++ += qval;
484 } while (--bigCount > 0);
486 middleCount &= 3;
487 alpha = reinterpret_cast<uint8_t*> (qptr);
488 // fall through to the following while-loop
489 }
491 while (--middleCount >= 0) {
492 alpha[0] = SkToU8(alpha[0] + maxValue);
493 alpha += 1;
494 }
496 // potentially this can be off the end of our "legal" alpha values, but that
497 // only happens if stopAlpha is also 0. Rather than test for stopAlpha != 0
498 // every time (slow), we just do it, and ensure that we've allocated extra space
499 // (see the + 1 comment in fStorage[]
500 saturated_add(alpha, stopAlpha);
501 }
503 void MaskSuperBlitter::blitH(int x, int y, int width) {
504 int iy = (y >> SHIFT);
506 SkASSERT(iy >= fMask.fBounds.fTop && iy < fMask.fBounds.fBottom);
507 iy -= fMask.fBounds.fTop; // make it relative to 0
509 // This should never happen, but it does. Until the true cause is
510 // discovered, let's skip this span instead of crashing.
511 // See http://crbug.com/17569.
512 if (iy < 0) {
513 return;
514 }
516 #ifdef SK_DEBUG
517 {
518 int ix = x >> SHIFT;
519 SkASSERT(ix >= fMask.fBounds.fLeft && ix < fMask.fBounds.fRight);
520 }
521 #endif
523 x -= (fMask.fBounds.fLeft << SHIFT);
525 // hack, until I figure out why my cubics (I think) go beyond the bounds
526 if (x < 0) {
527 width += x;
528 x = 0;
529 }
531 uint8_t* row = fMask.fImage + iy * fMask.fRowBytes + (x >> SHIFT);
533 int start = x;
534 int stop = x + width;
536 SkASSERT(start >= 0 && stop > start);
537 int fb = start & MASK;
538 int fe = stop & MASK;
539 int n = (stop >> SHIFT) - (start >> SHIFT) - 1;
542 if (n < 0) {
543 SkASSERT(row >= fMask.fImage);
544 SkASSERT(row < fMask.fImage + kMAX_STORAGE + 1);
545 add_aa_span(row, coverage_to_partial_alpha(fe - fb));
546 } else {
547 fb = SCALE - fb;
548 SkASSERT(row >= fMask.fImage);
549 SkASSERT(row + n + 1 < fMask.fImage + kMAX_STORAGE + 1);
550 add_aa_span(row, coverage_to_partial_alpha(fb),
551 n, coverage_to_partial_alpha(fe),
552 (1 << (8 - SHIFT)) - (((y & MASK) + 1) >> SHIFT));
553 }
555 #ifdef SK_DEBUG
556 fCurrX = x + width;
557 #endif
558 }
560 ///////////////////////////////////////////////////////////////////////////////
562 static bool fitsInsideLimit(const SkRect& r, SkScalar max) {
563 const SkScalar min = -max;
564 return r.fLeft > min && r.fTop > min &&
565 r.fRight < max && r.fBottom < max;
566 }
568 static int overflows_short_shift(int value, int shift) {
569 const int s = 16 + shift;
570 return (value << s >> s) - value;
571 }
573 /**
574 Would any of the coordinates of this rectangle not fit in a short,
575 when left-shifted by shift?
576 */
577 static int rect_overflows_short_shift(SkIRect rect, int shift) {
578 SkASSERT(!overflows_short_shift(8191, SHIFT));
579 SkASSERT(overflows_short_shift(8192, SHIFT));
580 SkASSERT(!overflows_short_shift(32767, 0));
581 SkASSERT(overflows_short_shift(32768, 0));
583 // Since we expect these to succeed, we bit-or together
584 // for a tiny extra bit of speed.
585 return overflows_short_shift(rect.fLeft, SHIFT) |
586 overflows_short_shift(rect.fRight, SHIFT) |
587 overflows_short_shift(rect.fTop, SHIFT) |
588 overflows_short_shift(rect.fBottom, SHIFT);
589 }
591 static bool safeRoundOut(const SkRect& src, SkIRect* dst, int32_t maxInt) {
592 const SkScalar maxScalar = SkIntToScalar(maxInt);
594 if (fitsInsideLimit(src, maxScalar)) {
595 src.roundOut(dst);
596 return true;
597 }
598 return false;
599 }
601 void SkScan::AntiFillPath(const SkPath& path, const SkRegion& origClip,
602 SkBlitter* blitter, bool forceRLE) {
603 if (origClip.isEmpty()) {
604 return;
605 }
607 SkIRect ir;
609 if (!safeRoundOut(path.getBounds(), &ir, SK_MaxS32 >> SHIFT)) {
610 #if 0
611 const SkRect& r = path.getBounds();
612 SkDebugf("--- bounds can't fit in SkIRect\n", r.fLeft, r.fTop, r.fRight, r.fBottom);
613 #endif
614 return;
615 }
616 if (ir.isEmpty()) {
617 if (path.isInverseFillType()) {
618 blitter->blitRegion(origClip);
619 }
620 return;
621 }
623 // If the intersection of the path bounds and the clip bounds
624 // will overflow 32767 when << by SHIFT, we can't supersample,
625 // so draw without antialiasing.
626 SkIRect clippedIR;
627 if (path.isInverseFillType()) {
628 // If the path is an inverse fill, it's going to fill the entire
629 // clip, and we care whether the entire clip exceeds our limits.
630 clippedIR = origClip.getBounds();
631 } else {
632 if (!clippedIR.intersect(ir, origClip.getBounds())) {
633 return;
634 }
635 }
636 if (rect_overflows_short_shift(clippedIR, SHIFT)) {
637 SkScan::FillPath(path, origClip, blitter);
638 return;
639 }
641 // Our antialiasing can't handle a clip larger than 32767, so we restrict
642 // the clip to that limit here. (the runs[] uses int16_t for its index).
643 //
644 // A more general solution (one that could also eliminate the need to
645 // disable aa based on ir bounds (see overflows_short_shift) would be
646 // to tile the clip/target...
647 SkRegion tmpClipStorage;
648 const SkRegion* clipRgn = &origClip;
649 {
650 static const int32_t kMaxClipCoord = 32767;
651 const SkIRect& bounds = origClip.getBounds();
652 if (bounds.fRight > kMaxClipCoord || bounds.fBottom > kMaxClipCoord) {
653 SkIRect limit = { 0, 0, kMaxClipCoord, kMaxClipCoord };
654 tmpClipStorage.op(origClip, limit, SkRegion::kIntersect_Op);
655 clipRgn = &tmpClipStorage;
656 }
657 }
658 // for here down, use clipRgn, not origClip
660 SkScanClipper clipper(blitter, clipRgn, ir);
661 const SkIRect* clipRect = clipper.getClipRect();
663 if (clipper.getBlitter() == NULL) { // clipped out
664 if (path.isInverseFillType()) {
665 blitter->blitRegion(*clipRgn);
666 }
667 return;
668 }
670 // now use the (possibly wrapped) blitter
671 blitter = clipper.getBlitter();
673 if (path.isInverseFillType()) {
674 sk_blit_above(blitter, ir, *clipRgn);
675 }
677 SkIRect superRect, *superClipRect = NULL;
679 if (clipRect) {
680 superRect.set( clipRect->fLeft << SHIFT, clipRect->fTop << SHIFT,
681 clipRect->fRight << SHIFT, clipRect->fBottom << SHIFT);
682 superClipRect = &superRect;
683 }
685 SkASSERT(SkIntToScalar(ir.fTop) <= path.getBounds().fTop);
687 // MaskSuperBlitter can't handle drawing outside of ir, so we can't use it
688 // if we're an inverse filltype
689 if (!path.isInverseFillType() && MaskSuperBlitter::CanHandleRect(ir) && !forceRLE) {
690 MaskSuperBlitter superBlit(blitter, ir, *clipRgn);
691 SkASSERT(SkIntToScalar(ir.fTop) <= path.getBounds().fTop);
692 sk_fill_path(path, superClipRect, &superBlit, ir.fTop, ir.fBottom, SHIFT, *clipRgn);
693 } else {
694 SuperBlitter superBlit(blitter, ir, *clipRgn);
695 sk_fill_path(path, superClipRect, &superBlit, ir.fTop, ir.fBottom, SHIFT, *clipRgn);
696 }
698 if (path.isInverseFillType()) {
699 sk_blit_below(blitter, ir, *clipRgn);
700 }
701 }
703 ///////////////////////////////////////////////////////////////////////////////
705 #include "SkRasterClip.h"
707 void SkScan::FillPath(const SkPath& path, const SkRasterClip& clip,
708 SkBlitter* blitter) {
709 if (clip.isEmpty()) {
710 return;
711 }
713 if (clip.isBW()) {
714 FillPath(path, clip.bwRgn(), blitter);
715 } else {
716 SkRegion tmp;
717 SkAAClipBlitter aaBlitter;
719 tmp.setRect(clip.getBounds());
720 aaBlitter.init(blitter, &clip.aaRgn());
721 SkScan::FillPath(path, tmp, &aaBlitter);
722 }
723 }
725 void SkScan::AntiFillPath(const SkPath& path, const SkRasterClip& clip,
726 SkBlitter* blitter) {
727 if (clip.isEmpty()) {
728 return;
729 }
731 if (clip.isBW()) {
732 AntiFillPath(path, clip.bwRgn(), blitter);
733 } else {
734 SkRegion tmp;
735 SkAAClipBlitter aaBlitter;
737 tmp.setRect(clip.getBounds());
738 aaBlitter.init(blitter, &clip.aaRgn());
739 SkScan::AntiFillPath(path, tmp, &aaBlitter, true);
740 }
741 }