1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/gfx/skia/trunk/src/core/SkScan_AntiPath.cpp Wed Dec 31 06:09:35 2014 +0100 1.3 @@ -0,0 +1,741 @@ 1.4 + 1.5 +/* 1.6 + * Copyright 2006 The Android Open Source Project 1.7 + * 1.8 + * Use of this source code is governed by a BSD-style license that can be 1.9 + * found in the LICENSE file. 1.10 + */ 1.11 + 1.12 + 1.13 +#include "SkScanPriv.h" 1.14 +#include "SkPath.h" 1.15 +#include "SkMatrix.h" 1.16 +#include "SkBlitter.h" 1.17 +#include "SkRegion.h" 1.18 +#include "SkAntiRun.h" 1.19 + 1.20 +#define SHIFT 2 1.21 +#define SCALE (1 << SHIFT) 1.22 +#define MASK (SCALE - 1) 1.23 + 1.24 +/** @file 1.25 + We have two techniques for capturing the output of the supersampler: 1.26 + - SUPERMASK, which records a large mask-bitmap 1.27 + this is often faster for small, complex objects 1.28 + - RLE, which records a rle-encoded scanline 1.29 + this is often faster for large objects with big spans 1.30 + 1.31 + These blitters use two coordinate systems: 1.32 + - destination coordinates, scale equal to the output - often 1.33 + abbreviated with 'i' or 'I' in variable names 1.34 + - supersampled coordinates, scale equal to the output * SCALE 1.35 + 1.36 + Enabling SK_USE_LEGACY_AA_COVERAGE keeps the aa coverage calculations as 1.37 + they were before the fix that unified the output of the RLE and MASK 1.38 + supersamplers. 1.39 + */ 1.40 + 1.41 +//#define FORCE_SUPERMASK 1.42 +//#define FORCE_RLE 1.43 +//#define SK_USE_LEGACY_AA_COVERAGE 1.44 + 1.45 +/////////////////////////////////////////////////////////////////////////////// 1.46 + 1.47 +/// Base class for a single-pass supersampled blitter. 1.48 +class BaseSuperBlitter : public SkBlitter { 1.49 +public: 1.50 + BaseSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, 1.51 + const SkRegion& clip); 1.52 + 1.53 + /// Must be explicitly defined on subclasses. 1.54 + virtual void blitAntiH(int x, int y, const SkAlpha antialias[], 1.55 + const int16_t runs[]) SK_OVERRIDE { 1.56 + SkDEBUGFAIL("How did I get here?"); 1.57 + } 1.58 + /// May not be called on BaseSuperBlitter because it blits out of order. 1.59 + virtual void blitV(int x, int y, int height, SkAlpha alpha) SK_OVERRIDE { 1.60 + SkDEBUGFAIL("How did I get here?"); 1.61 + } 1.62 + 1.63 +protected: 1.64 + SkBlitter* fRealBlitter; 1.65 + /// Current y coordinate, in destination coordinates. 1.66 + int fCurrIY; 1.67 + /// Widest row of region to be blitted, in destination coordinates. 1.68 + int fWidth; 1.69 + /// Leftmost x coordinate in any row, in destination coordinates. 1.70 + int fLeft; 1.71 + /// Leftmost x coordinate in any row, in supersampled coordinates. 1.72 + int fSuperLeft; 1.73 + 1.74 + SkDEBUGCODE(int fCurrX;) 1.75 + /// Current y coordinate in supersampled coordinates. 1.76 + int fCurrY; 1.77 + /// Initial y coordinate (top of bounds). 1.78 + int fTop; 1.79 +}; 1.80 + 1.81 +BaseSuperBlitter::BaseSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, 1.82 + const SkRegion& clip) { 1.83 + fRealBlitter = realBlitter; 1.84 + 1.85 + /* 1.86 + * We use the clip bounds instead of the ir, since we may be asked to 1.87 + * draw outside of the rect if we're a inverse filltype 1.88 + */ 1.89 + const int left = clip.getBounds().fLeft; 1.90 + const int right = clip.getBounds().fRight; 1.91 + 1.92 + fLeft = left; 1.93 + fSuperLeft = left << SHIFT; 1.94 + fWidth = right - left; 1.95 +#if 0 1.96 + fCurrIY = -1; 1.97 + fCurrY = -1; 1.98 +#else 1.99 + fTop = ir.fTop; 1.100 + fCurrIY = ir.fTop - 1; 1.101 + fCurrY = (ir.fTop << SHIFT) - 1; 1.102 +#endif 1.103 + SkDEBUGCODE(fCurrX = -1;) 1.104 +} 1.105 + 1.106 +/// Run-length-encoded supersampling antialiased blitter. 1.107 +class SuperBlitter : public BaseSuperBlitter { 1.108 +public: 1.109 + SuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, 1.110 + const SkRegion& clip); 1.111 + 1.112 + virtual ~SuperBlitter() { 1.113 + this->flush(); 1.114 + sk_free(fRuns.fRuns); 1.115 + } 1.116 + 1.117 + /// Once fRuns contains a complete supersampled row, flush() blits 1.118 + /// it out through the wrapped blitter. 1.119 + void flush(); 1.120 + 1.121 + /// Blits a row of pixels, with location and width specified 1.122 + /// in supersampled coordinates. 1.123 + virtual void blitH(int x, int y, int width) SK_OVERRIDE; 1.124 + /// Blits a rectangle of pixels, with location and size specified 1.125 + /// in supersampled coordinates. 1.126 + virtual void blitRect(int x, int y, int width, int height) SK_OVERRIDE; 1.127 + 1.128 +private: 1.129 + SkAlphaRuns fRuns; 1.130 + int fOffsetX; 1.131 +}; 1.132 + 1.133 +SuperBlitter::SuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, 1.134 + const SkRegion& clip) 1.135 + : BaseSuperBlitter(realBlitter, ir, clip) { 1.136 + const int width = fWidth; 1.137 + 1.138 + // extra one to store the zero at the end 1.139 + fRuns.fRuns = (int16_t*)sk_malloc_throw((width + 1 + (width + 2)/2) * sizeof(int16_t)); 1.140 + fRuns.fAlpha = (uint8_t*)(fRuns.fRuns + width + 1); 1.141 + fRuns.reset(width); 1.142 + 1.143 + fOffsetX = 0; 1.144 +} 1.145 + 1.146 +void SuperBlitter::flush() { 1.147 + if (fCurrIY >= fTop) { 1.148 + if (!fRuns.empty()) { 1.149 + // SkDEBUGCODE(fRuns.dump();) 1.150 + fRealBlitter->blitAntiH(fLeft, fCurrIY, fRuns.fAlpha, fRuns.fRuns); 1.151 + fRuns.reset(fWidth); 1.152 + fOffsetX = 0; 1.153 + } 1.154 + fCurrIY = fTop - 1; 1.155 + SkDEBUGCODE(fCurrX = -1;) 1.156 + } 1.157 +} 1.158 + 1.159 +/** coverage_to_partial_alpha() is being used by SkAlphaRuns, which 1.160 + *accumulates* SCALE pixels worth of "alpha" in [0,(256/SCALE)] 1.161 + to produce a final value in [0, 255] and handles clamping 256->255 1.162 + itself, with the same (alpha - (alpha >> 8)) correction as 1.163 + coverage_to_exact_alpha(). 1.164 +*/ 1.165 +static inline int coverage_to_partial_alpha(int aa) { 1.166 + aa <<= 8 - 2*SHIFT; 1.167 +#ifdef SK_USE_LEGACY_AA_COVERAGE 1.168 + aa -= aa >> (8 - SHIFT - 1); 1.169 +#endif 1.170 + return aa; 1.171 +} 1.172 + 1.173 +/** coverage_to_exact_alpha() is being used by our blitter, which wants 1.174 + a final value in [0, 255]. 1.175 +*/ 1.176 +static inline int coverage_to_exact_alpha(int aa) { 1.177 + int alpha = (256 >> SHIFT) * aa; 1.178 + // clamp 256->255 1.179 + return alpha - (alpha >> 8); 1.180 +} 1.181 + 1.182 +void SuperBlitter::blitH(int x, int y, int width) { 1.183 + SkASSERT(width > 0); 1.184 + 1.185 + int iy = y >> SHIFT; 1.186 + SkASSERT(iy >= fCurrIY); 1.187 + 1.188 + x -= fSuperLeft; 1.189 + // hack, until I figure out why my cubics (I think) go beyond the bounds 1.190 + if (x < 0) { 1.191 + width += x; 1.192 + x = 0; 1.193 + } 1.194 + 1.195 +#ifdef SK_DEBUG 1.196 + SkASSERT(y != fCurrY || x >= fCurrX); 1.197 +#endif 1.198 + SkASSERT(y >= fCurrY); 1.199 + if (fCurrY != y) { 1.200 + fOffsetX = 0; 1.201 + fCurrY = y; 1.202 + } 1.203 + 1.204 + if (iy != fCurrIY) { // new scanline 1.205 + this->flush(); 1.206 + fCurrIY = iy; 1.207 + } 1.208 + 1.209 + int start = x; 1.210 + int stop = x + width; 1.211 + 1.212 + SkASSERT(start >= 0 && stop > start); 1.213 + // integer-pixel-aligned ends of blit, rounded out 1.214 + int fb = start & MASK; 1.215 + int fe = stop & MASK; 1.216 + int n = (stop >> SHIFT) - (start >> SHIFT) - 1; 1.217 + 1.218 + if (n < 0) { 1.219 + fb = fe - fb; 1.220 + n = 0; 1.221 + fe = 0; 1.222 + } else { 1.223 + if (fb == 0) { 1.224 + n += 1; 1.225 + } else { 1.226 + fb = SCALE - fb; 1.227 + } 1.228 + } 1.229 + 1.230 + fOffsetX = fRuns.add(x >> SHIFT, coverage_to_partial_alpha(fb), 1.231 + n, coverage_to_partial_alpha(fe), 1.232 + (1 << (8 - SHIFT)) - (((y & MASK) + 1) >> SHIFT), 1.233 + fOffsetX); 1.234 + 1.235 +#ifdef SK_DEBUG 1.236 + fRuns.assertValid(y & MASK, (1 << (8 - SHIFT))); 1.237 + fCurrX = x + width; 1.238 +#endif 1.239 +} 1.240 + 1.241 +#if 0 // UNUSED 1.242 +static void set_left_rite_runs(SkAlphaRuns& runs, int ileft, U8CPU leftA, 1.243 + int n, U8CPU riteA) { 1.244 + SkASSERT(leftA <= 0xFF); 1.245 + SkASSERT(riteA <= 0xFF); 1.246 + 1.247 + int16_t* run = runs.fRuns; 1.248 + uint8_t* aa = runs.fAlpha; 1.249 + 1.250 + if (ileft > 0) { 1.251 + run[0] = ileft; 1.252 + aa[0] = 0; 1.253 + run += ileft; 1.254 + aa += ileft; 1.255 + } 1.256 + 1.257 + SkASSERT(leftA < 0xFF); 1.258 + if (leftA > 0) { 1.259 + *run++ = 1; 1.260 + *aa++ = leftA; 1.261 + } 1.262 + 1.263 + if (n > 0) { 1.264 + run[0] = n; 1.265 + aa[0] = 0xFF; 1.266 + run += n; 1.267 + aa += n; 1.268 + } 1.269 + 1.270 + SkASSERT(riteA < 0xFF); 1.271 + if (riteA > 0) { 1.272 + *run++ = 1; 1.273 + *aa++ = riteA; 1.274 + } 1.275 + run[0] = 0; 1.276 +} 1.277 +#endif 1.278 + 1.279 +void SuperBlitter::blitRect(int x, int y, int width, int height) { 1.280 + SkASSERT(width > 0); 1.281 + SkASSERT(height > 0); 1.282 + 1.283 + // blit leading rows 1.284 + while ((y & MASK)) { 1.285 + this->blitH(x, y++, width); 1.286 + if (--height <= 0) { 1.287 + return; 1.288 + } 1.289 + } 1.290 + SkASSERT(height > 0); 1.291 + 1.292 + // Since this is a rect, instead of blitting supersampled rows one at a 1.293 + // time and then resolving to the destination canvas, we can blit 1.294 + // directly to the destintion canvas one row per SCALE supersampled rows. 1.295 + int start_y = y >> SHIFT; 1.296 + int stop_y = (y + height) >> SHIFT; 1.297 + int count = stop_y - start_y; 1.298 + if (count > 0) { 1.299 + y += count << SHIFT; 1.300 + height -= count << SHIFT; 1.301 + 1.302 + // save original X for our tail blitH() loop at the bottom 1.303 + int origX = x; 1.304 + 1.305 + x -= fSuperLeft; 1.306 + // hack, until I figure out why my cubics (I think) go beyond the bounds 1.307 + if (x < 0) { 1.308 + width += x; 1.309 + x = 0; 1.310 + } 1.311 + 1.312 + // There is always a left column, a middle, and a right column. 1.313 + // ileft is the destination x of the first pixel of the entire rect. 1.314 + // xleft is (SCALE - # of covered supersampled pixels) in that 1.315 + // destination pixel. 1.316 + int ileft = x >> SHIFT; 1.317 + int xleft = x & MASK; 1.318 + // irite is the destination x of the last pixel of the OPAQUE section. 1.319 + // xrite is the number of supersampled pixels extending beyond irite; 1.320 + // xrite/SCALE should give us alpha. 1.321 + int irite = (x + width) >> SHIFT; 1.322 + int xrite = (x + width) & MASK; 1.323 + if (!xrite) { 1.324 + xrite = SCALE; 1.325 + irite--; 1.326 + } 1.327 + 1.328 + // Need to call flush() to clean up pending draws before we 1.329 + // even consider blitV(), since otherwise it can look nonmonotonic. 1.330 + SkASSERT(start_y > fCurrIY); 1.331 + this->flush(); 1.332 + 1.333 + int n = irite - ileft - 1; 1.334 + if (n < 0) { 1.335 + // If n < 0, we'll only have a single partially-transparent column 1.336 + // of pixels to render. 1.337 + xleft = xrite - xleft; 1.338 + SkASSERT(xleft <= SCALE); 1.339 + SkASSERT(xleft > 0); 1.340 + xrite = 0; 1.341 + fRealBlitter->blitV(ileft + fLeft, start_y, count, 1.342 + coverage_to_exact_alpha(xleft)); 1.343 + } else { 1.344 + // With n = 0, we have two possibly-transparent columns of pixels 1.345 + // to render; with n > 0, we have opaque columns between them. 1.346 + 1.347 + xleft = SCALE - xleft; 1.348 + 1.349 + // Using coverage_to_exact_alpha is not consistent with blitH() 1.350 + const int coverageL = coverage_to_exact_alpha(xleft); 1.351 + const int coverageR = coverage_to_exact_alpha(xrite); 1.352 + 1.353 + SkASSERT(coverageL > 0 || n > 0 || coverageR > 0); 1.354 + SkASSERT((coverageL != 0) + n + (coverageR != 0) <= fWidth); 1.355 + 1.356 + fRealBlitter->blitAntiRect(ileft + fLeft, start_y, n, count, 1.357 + coverageL, coverageR); 1.358 + } 1.359 + 1.360 + // preamble for our next call to blitH() 1.361 + fCurrIY = stop_y - 1; 1.362 + fOffsetX = 0; 1.363 + fCurrY = y - 1; 1.364 + fRuns.reset(fWidth); 1.365 + x = origX; 1.366 + } 1.367 + 1.368 + // catch any remaining few rows 1.369 + SkASSERT(height <= MASK); 1.370 + while (--height >= 0) { 1.371 + this->blitH(x, y++, width); 1.372 + } 1.373 +} 1.374 + 1.375 +/////////////////////////////////////////////////////////////////////////////// 1.376 + 1.377 +/// Masked supersampling antialiased blitter. 1.378 +class MaskSuperBlitter : public BaseSuperBlitter { 1.379 +public: 1.380 + MaskSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, 1.381 + const SkRegion& clip); 1.382 + virtual ~MaskSuperBlitter() { 1.383 + fRealBlitter->blitMask(fMask, fClipRect); 1.384 + } 1.385 + 1.386 + virtual void blitH(int x, int y, int width) SK_OVERRIDE; 1.387 + 1.388 + static bool CanHandleRect(const SkIRect& bounds) { 1.389 +#ifdef FORCE_RLE 1.390 + return false; 1.391 +#endif 1.392 + int width = bounds.width(); 1.393 + int64_t rb = SkAlign4(width); 1.394 + // use 64bits to detect overflow 1.395 + int64_t storage = rb * bounds.height(); 1.396 + 1.397 + return (width <= MaskSuperBlitter::kMAX_WIDTH) && 1.398 + (storage <= MaskSuperBlitter::kMAX_STORAGE); 1.399 + } 1.400 + 1.401 +private: 1.402 + enum { 1.403 +#ifdef FORCE_SUPERMASK 1.404 + kMAX_WIDTH = 2048, 1.405 + kMAX_STORAGE = 1024 * 1024 * 2 1.406 +#else 1.407 + kMAX_WIDTH = 32, // so we don't try to do very wide things, where the RLE blitter would be faster 1.408 + kMAX_STORAGE = 1024 1.409 +#endif 1.410 + }; 1.411 + 1.412 + SkMask fMask; 1.413 + SkIRect fClipRect; 1.414 + // we add 1 because add_aa_span can write (unchanged) 1 extra byte at the end, rather than 1.415 + // perform a test to see if stopAlpha != 0 1.416 + uint32_t fStorage[(kMAX_STORAGE >> 2) + 1]; 1.417 +}; 1.418 + 1.419 +MaskSuperBlitter::MaskSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, 1.420 + const SkRegion& clip) 1.421 + : BaseSuperBlitter(realBlitter, ir, clip) { 1.422 + SkASSERT(CanHandleRect(ir)); 1.423 + 1.424 + fMask.fImage = (uint8_t*)fStorage; 1.425 + fMask.fBounds = ir; 1.426 + fMask.fRowBytes = ir.width(); 1.427 + fMask.fFormat = SkMask::kA8_Format; 1.428 + 1.429 + fClipRect = ir; 1.430 + fClipRect.intersect(clip.getBounds()); 1.431 + 1.432 + // For valgrind, write 1 extra byte at the end so we don't read 1.433 + // uninitialized memory. See comment in add_aa_span and fStorage[]. 1.434 + memset(fStorage, 0, fMask.fBounds.height() * fMask.fRowBytes + 1); 1.435 +} 1.436 + 1.437 +static void add_aa_span(uint8_t* alpha, U8CPU startAlpha) { 1.438 + /* I should be able to just add alpha[x] + startAlpha. 1.439 + However, if the trailing edge of the previous span and the leading 1.440 + edge of the current span round to the same super-sampled x value, 1.441 + I might overflow to 256 with this add, hence the funny subtract. 1.442 + */ 1.443 + unsigned tmp = *alpha + startAlpha; 1.444 + SkASSERT(tmp <= 256); 1.445 + *alpha = SkToU8(tmp - (tmp >> 8)); 1.446 +} 1.447 + 1.448 +static inline uint32_t quadplicate_byte(U8CPU value) { 1.449 + uint32_t pair = (value << 8) | value; 1.450 + return (pair << 16) | pair; 1.451 +} 1.452 + 1.453 +// Perform this tricky subtract, to avoid overflowing to 256. Our caller should 1.454 +// only ever call us with at most enough to hit 256 (never larger), so it is 1.455 +// enough to just subtract the high-bit. Actually clamping with a branch would 1.456 +// be slower (e.g. if (tmp > 255) tmp = 255;) 1.457 +// 1.458 +static inline void saturated_add(uint8_t* ptr, U8CPU add) { 1.459 + unsigned tmp = *ptr + add; 1.460 + SkASSERT(tmp <= 256); 1.461 + *ptr = SkToU8(tmp - (tmp >> 8)); 1.462 +} 1.463 + 1.464 +// minimum count before we want to setup an inner loop, adding 4-at-a-time 1.465 +#define MIN_COUNT_FOR_QUAD_LOOP 16 1.466 + 1.467 +static void add_aa_span(uint8_t* alpha, U8CPU startAlpha, int middleCount, 1.468 + U8CPU stopAlpha, U8CPU maxValue) { 1.469 + SkASSERT(middleCount >= 0); 1.470 + 1.471 + saturated_add(alpha, startAlpha); 1.472 + alpha += 1; 1.473 + 1.474 + if (middleCount >= MIN_COUNT_FOR_QUAD_LOOP) { 1.475 + // loop until we're quad-byte aligned 1.476 + while (SkTCast<intptr_t>(alpha) & 0x3) { 1.477 + alpha[0] = SkToU8(alpha[0] + maxValue); 1.478 + alpha += 1; 1.479 + middleCount -= 1; 1.480 + } 1.481 + 1.482 + int bigCount = middleCount >> 2; 1.483 + uint32_t* qptr = reinterpret_cast<uint32_t*>(alpha); 1.484 + uint32_t qval = quadplicate_byte(maxValue); 1.485 + do { 1.486 + *qptr++ += qval; 1.487 + } while (--bigCount > 0); 1.488 + 1.489 + middleCount &= 3; 1.490 + alpha = reinterpret_cast<uint8_t*> (qptr); 1.491 + // fall through to the following while-loop 1.492 + } 1.493 + 1.494 + while (--middleCount >= 0) { 1.495 + alpha[0] = SkToU8(alpha[0] + maxValue); 1.496 + alpha += 1; 1.497 + } 1.498 + 1.499 + // potentially this can be off the end of our "legal" alpha values, but that 1.500 + // only happens if stopAlpha is also 0. Rather than test for stopAlpha != 0 1.501 + // every time (slow), we just do it, and ensure that we've allocated extra space 1.502 + // (see the + 1 comment in fStorage[] 1.503 + saturated_add(alpha, stopAlpha); 1.504 +} 1.505 + 1.506 +void MaskSuperBlitter::blitH(int x, int y, int width) { 1.507 + int iy = (y >> SHIFT); 1.508 + 1.509 + SkASSERT(iy >= fMask.fBounds.fTop && iy < fMask.fBounds.fBottom); 1.510 + iy -= fMask.fBounds.fTop; // make it relative to 0 1.511 + 1.512 + // This should never happen, but it does. Until the true cause is 1.513 + // discovered, let's skip this span instead of crashing. 1.514 + // See http://crbug.com/17569. 1.515 + if (iy < 0) { 1.516 + return; 1.517 + } 1.518 + 1.519 +#ifdef SK_DEBUG 1.520 + { 1.521 + int ix = x >> SHIFT; 1.522 + SkASSERT(ix >= fMask.fBounds.fLeft && ix < fMask.fBounds.fRight); 1.523 + } 1.524 +#endif 1.525 + 1.526 + x -= (fMask.fBounds.fLeft << SHIFT); 1.527 + 1.528 + // hack, until I figure out why my cubics (I think) go beyond the bounds 1.529 + if (x < 0) { 1.530 + width += x; 1.531 + x = 0; 1.532 + } 1.533 + 1.534 + uint8_t* row = fMask.fImage + iy * fMask.fRowBytes + (x >> SHIFT); 1.535 + 1.536 + int start = x; 1.537 + int stop = x + width; 1.538 + 1.539 + SkASSERT(start >= 0 && stop > start); 1.540 + int fb = start & MASK; 1.541 + int fe = stop & MASK; 1.542 + int n = (stop >> SHIFT) - (start >> SHIFT) - 1; 1.543 + 1.544 + 1.545 + if (n < 0) { 1.546 + SkASSERT(row >= fMask.fImage); 1.547 + SkASSERT(row < fMask.fImage + kMAX_STORAGE + 1); 1.548 + add_aa_span(row, coverage_to_partial_alpha(fe - fb)); 1.549 + } else { 1.550 + fb = SCALE - fb; 1.551 + SkASSERT(row >= fMask.fImage); 1.552 + SkASSERT(row + n + 1 < fMask.fImage + kMAX_STORAGE + 1); 1.553 + add_aa_span(row, coverage_to_partial_alpha(fb), 1.554 + n, coverage_to_partial_alpha(fe), 1.555 + (1 << (8 - SHIFT)) - (((y & MASK) + 1) >> SHIFT)); 1.556 + } 1.557 + 1.558 +#ifdef SK_DEBUG 1.559 + fCurrX = x + width; 1.560 +#endif 1.561 +} 1.562 + 1.563 +/////////////////////////////////////////////////////////////////////////////// 1.564 + 1.565 +static bool fitsInsideLimit(const SkRect& r, SkScalar max) { 1.566 + const SkScalar min = -max; 1.567 + return r.fLeft > min && r.fTop > min && 1.568 + r.fRight < max && r.fBottom < max; 1.569 +} 1.570 + 1.571 +static int overflows_short_shift(int value, int shift) { 1.572 + const int s = 16 + shift; 1.573 + return (value << s >> s) - value; 1.574 +} 1.575 + 1.576 +/** 1.577 + Would any of the coordinates of this rectangle not fit in a short, 1.578 + when left-shifted by shift? 1.579 +*/ 1.580 +static int rect_overflows_short_shift(SkIRect rect, int shift) { 1.581 + SkASSERT(!overflows_short_shift(8191, SHIFT)); 1.582 + SkASSERT(overflows_short_shift(8192, SHIFT)); 1.583 + SkASSERT(!overflows_short_shift(32767, 0)); 1.584 + SkASSERT(overflows_short_shift(32768, 0)); 1.585 + 1.586 + // Since we expect these to succeed, we bit-or together 1.587 + // for a tiny extra bit of speed. 1.588 + return overflows_short_shift(rect.fLeft, SHIFT) | 1.589 + overflows_short_shift(rect.fRight, SHIFT) | 1.590 + overflows_short_shift(rect.fTop, SHIFT) | 1.591 + overflows_short_shift(rect.fBottom, SHIFT); 1.592 +} 1.593 + 1.594 +static bool safeRoundOut(const SkRect& src, SkIRect* dst, int32_t maxInt) { 1.595 + const SkScalar maxScalar = SkIntToScalar(maxInt); 1.596 + 1.597 + if (fitsInsideLimit(src, maxScalar)) { 1.598 + src.roundOut(dst); 1.599 + return true; 1.600 + } 1.601 + return false; 1.602 +} 1.603 + 1.604 +void SkScan::AntiFillPath(const SkPath& path, const SkRegion& origClip, 1.605 + SkBlitter* blitter, bool forceRLE) { 1.606 + if (origClip.isEmpty()) { 1.607 + return; 1.608 + } 1.609 + 1.610 + SkIRect ir; 1.611 + 1.612 + if (!safeRoundOut(path.getBounds(), &ir, SK_MaxS32 >> SHIFT)) { 1.613 +#if 0 1.614 + const SkRect& r = path.getBounds(); 1.615 + SkDebugf("--- bounds can't fit in SkIRect\n", r.fLeft, r.fTop, r.fRight, r.fBottom); 1.616 +#endif 1.617 + return; 1.618 + } 1.619 + if (ir.isEmpty()) { 1.620 + if (path.isInverseFillType()) { 1.621 + blitter->blitRegion(origClip); 1.622 + } 1.623 + return; 1.624 + } 1.625 + 1.626 + // If the intersection of the path bounds and the clip bounds 1.627 + // will overflow 32767 when << by SHIFT, we can't supersample, 1.628 + // so draw without antialiasing. 1.629 + SkIRect clippedIR; 1.630 + if (path.isInverseFillType()) { 1.631 + // If the path is an inverse fill, it's going to fill the entire 1.632 + // clip, and we care whether the entire clip exceeds our limits. 1.633 + clippedIR = origClip.getBounds(); 1.634 + } else { 1.635 + if (!clippedIR.intersect(ir, origClip.getBounds())) { 1.636 + return; 1.637 + } 1.638 + } 1.639 + if (rect_overflows_short_shift(clippedIR, SHIFT)) { 1.640 + SkScan::FillPath(path, origClip, blitter); 1.641 + return; 1.642 + } 1.643 + 1.644 + // Our antialiasing can't handle a clip larger than 32767, so we restrict 1.645 + // the clip to that limit here. (the runs[] uses int16_t for its index). 1.646 + // 1.647 + // A more general solution (one that could also eliminate the need to 1.648 + // disable aa based on ir bounds (see overflows_short_shift) would be 1.649 + // to tile the clip/target... 1.650 + SkRegion tmpClipStorage; 1.651 + const SkRegion* clipRgn = &origClip; 1.652 + { 1.653 + static const int32_t kMaxClipCoord = 32767; 1.654 + const SkIRect& bounds = origClip.getBounds(); 1.655 + if (bounds.fRight > kMaxClipCoord || bounds.fBottom > kMaxClipCoord) { 1.656 + SkIRect limit = { 0, 0, kMaxClipCoord, kMaxClipCoord }; 1.657 + tmpClipStorage.op(origClip, limit, SkRegion::kIntersect_Op); 1.658 + clipRgn = &tmpClipStorage; 1.659 + } 1.660 + } 1.661 + // for here down, use clipRgn, not origClip 1.662 + 1.663 + SkScanClipper clipper(blitter, clipRgn, ir); 1.664 + const SkIRect* clipRect = clipper.getClipRect(); 1.665 + 1.666 + if (clipper.getBlitter() == NULL) { // clipped out 1.667 + if (path.isInverseFillType()) { 1.668 + blitter->blitRegion(*clipRgn); 1.669 + } 1.670 + return; 1.671 + } 1.672 + 1.673 + // now use the (possibly wrapped) blitter 1.674 + blitter = clipper.getBlitter(); 1.675 + 1.676 + if (path.isInverseFillType()) { 1.677 + sk_blit_above(blitter, ir, *clipRgn); 1.678 + } 1.679 + 1.680 + SkIRect superRect, *superClipRect = NULL; 1.681 + 1.682 + if (clipRect) { 1.683 + superRect.set( clipRect->fLeft << SHIFT, clipRect->fTop << SHIFT, 1.684 + clipRect->fRight << SHIFT, clipRect->fBottom << SHIFT); 1.685 + superClipRect = &superRect; 1.686 + } 1.687 + 1.688 + SkASSERT(SkIntToScalar(ir.fTop) <= path.getBounds().fTop); 1.689 + 1.690 + // MaskSuperBlitter can't handle drawing outside of ir, so we can't use it 1.691 + // if we're an inverse filltype 1.692 + if (!path.isInverseFillType() && MaskSuperBlitter::CanHandleRect(ir) && !forceRLE) { 1.693 + MaskSuperBlitter superBlit(blitter, ir, *clipRgn); 1.694 + SkASSERT(SkIntToScalar(ir.fTop) <= path.getBounds().fTop); 1.695 + sk_fill_path(path, superClipRect, &superBlit, ir.fTop, ir.fBottom, SHIFT, *clipRgn); 1.696 + } else { 1.697 + SuperBlitter superBlit(blitter, ir, *clipRgn); 1.698 + sk_fill_path(path, superClipRect, &superBlit, ir.fTop, ir.fBottom, SHIFT, *clipRgn); 1.699 + } 1.700 + 1.701 + if (path.isInverseFillType()) { 1.702 + sk_blit_below(blitter, ir, *clipRgn); 1.703 + } 1.704 +} 1.705 + 1.706 +/////////////////////////////////////////////////////////////////////////////// 1.707 + 1.708 +#include "SkRasterClip.h" 1.709 + 1.710 +void SkScan::FillPath(const SkPath& path, const SkRasterClip& clip, 1.711 + SkBlitter* blitter) { 1.712 + if (clip.isEmpty()) { 1.713 + return; 1.714 + } 1.715 + 1.716 + if (clip.isBW()) { 1.717 + FillPath(path, clip.bwRgn(), blitter); 1.718 + } else { 1.719 + SkRegion tmp; 1.720 + SkAAClipBlitter aaBlitter; 1.721 + 1.722 + tmp.setRect(clip.getBounds()); 1.723 + aaBlitter.init(blitter, &clip.aaRgn()); 1.724 + SkScan::FillPath(path, tmp, &aaBlitter); 1.725 + } 1.726 +} 1.727 + 1.728 +void SkScan::AntiFillPath(const SkPath& path, const SkRasterClip& clip, 1.729 + SkBlitter* blitter) { 1.730 + if (clip.isEmpty()) { 1.731 + return; 1.732 + } 1.733 + 1.734 + if (clip.isBW()) { 1.735 + AntiFillPath(path, clip.bwRgn(), blitter); 1.736 + } else { 1.737 + SkRegion tmp; 1.738 + SkAAClipBlitter aaBlitter; 1.739 + 1.740 + tmp.setRect(clip.getBounds()); 1.741 + aaBlitter.init(blitter, &clip.aaRgn()); 1.742 + SkScan::AntiFillPath(path, tmp, &aaBlitter, true); 1.743 + } 1.744 +}