Sat, 03 Jan 2015 20:18:00 +0100
Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.
michael@0 | 1 | |
michael@0 | 2 | /* |
michael@0 | 3 | * Copyright 2006 The Android Open Source Project |
michael@0 | 4 | * |
michael@0 | 5 | * Use of this source code is governed by a BSD-style license that can be |
michael@0 | 6 | * found in the LICENSE file. |
michael@0 | 7 | */ |
michael@0 | 8 | |
michael@0 | 9 | |
michael@0 | 10 | #include "SkScanPriv.h" |
michael@0 | 11 | #include "SkPath.h" |
michael@0 | 12 | #include "SkMatrix.h" |
michael@0 | 13 | #include "SkBlitter.h" |
michael@0 | 14 | #include "SkRegion.h" |
michael@0 | 15 | #include "SkAntiRun.h" |
michael@0 | 16 | |
michael@0 | 17 | #define SHIFT 2 |
michael@0 | 18 | #define SCALE (1 << SHIFT) |
michael@0 | 19 | #define MASK (SCALE - 1) |
michael@0 | 20 | |
michael@0 | 21 | /** @file |
michael@0 | 22 | We have two techniques for capturing the output of the supersampler: |
michael@0 | 23 | - SUPERMASK, which records a large mask-bitmap |
michael@0 | 24 | this is often faster for small, complex objects |
michael@0 | 25 | - RLE, which records a rle-encoded scanline |
michael@0 | 26 | this is often faster for large objects with big spans |
michael@0 | 27 | |
michael@0 | 28 | These blitters use two coordinate systems: |
michael@0 | 29 | - destination coordinates, scale equal to the output - often |
michael@0 | 30 | abbreviated with 'i' or 'I' in variable names |
michael@0 | 31 | - supersampled coordinates, scale equal to the output * SCALE |
michael@0 | 32 | |
michael@0 | 33 | Enabling SK_USE_LEGACY_AA_COVERAGE keeps the aa coverage calculations as |
michael@0 | 34 | they were before the fix that unified the output of the RLE and MASK |
michael@0 | 35 | supersamplers. |
michael@0 | 36 | */ |
michael@0 | 37 | |
michael@0 | 38 | //#define FORCE_SUPERMASK |
michael@0 | 39 | //#define FORCE_RLE |
michael@0 | 40 | //#define SK_USE_LEGACY_AA_COVERAGE |
michael@0 | 41 | |
michael@0 | 42 | /////////////////////////////////////////////////////////////////////////////// |
michael@0 | 43 | |
michael@0 | 44 | /// Base class for a single-pass supersampled blitter. |
michael@0 | 45 | class BaseSuperBlitter : public SkBlitter { |
michael@0 | 46 | public: |
michael@0 | 47 | BaseSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, |
michael@0 | 48 | const SkRegion& clip); |
michael@0 | 49 | |
michael@0 | 50 | /// Must be explicitly defined on subclasses. |
michael@0 | 51 | virtual void blitAntiH(int x, int y, const SkAlpha antialias[], |
michael@0 | 52 | const int16_t runs[]) SK_OVERRIDE { |
michael@0 | 53 | SkDEBUGFAIL("How did I get here?"); |
michael@0 | 54 | } |
michael@0 | 55 | /// May not be called on BaseSuperBlitter because it blits out of order. |
michael@0 | 56 | virtual void blitV(int x, int y, int height, SkAlpha alpha) SK_OVERRIDE { |
michael@0 | 57 | SkDEBUGFAIL("How did I get here?"); |
michael@0 | 58 | } |
michael@0 | 59 | |
michael@0 | 60 | protected: |
michael@0 | 61 | SkBlitter* fRealBlitter; |
michael@0 | 62 | /// Current y coordinate, in destination coordinates. |
michael@0 | 63 | int fCurrIY; |
michael@0 | 64 | /// Widest row of region to be blitted, in destination coordinates. |
michael@0 | 65 | int fWidth; |
michael@0 | 66 | /// Leftmost x coordinate in any row, in destination coordinates. |
michael@0 | 67 | int fLeft; |
michael@0 | 68 | /// Leftmost x coordinate in any row, in supersampled coordinates. |
michael@0 | 69 | int fSuperLeft; |
michael@0 | 70 | |
michael@0 | 71 | SkDEBUGCODE(int fCurrX;) |
michael@0 | 72 | /// Current y coordinate in supersampled coordinates. |
michael@0 | 73 | int fCurrY; |
michael@0 | 74 | /// Initial y coordinate (top of bounds). |
michael@0 | 75 | int fTop; |
michael@0 | 76 | }; |
michael@0 | 77 | |
michael@0 | 78 | BaseSuperBlitter::BaseSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, |
michael@0 | 79 | const SkRegion& clip) { |
michael@0 | 80 | fRealBlitter = realBlitter; |
michael@0 | 81 | |
michael@0 | 82 | /* |
michael@0 | 83 | * We use the clip bounds instead of the ir, since we may be asked to |
michael@0 | 84 | * draw outside of the rect if we're a inverse filltype |
michael@0 | 85 | */ |
michael@0 | 86 | const int left = clip.getBounds().fLeft; |
michael@0 | 87 | const int right = clip.getBounds().fRight; |
michael@0 | 88 | |
michael@0 | 89 | fLeft = left; |
michael@0 | 90 | fSuperLeft = left << SHIFT; |
michael@0 | 91 | fWidth = right - left; |
michael@0 | 92 | #if 0 |
michael@0 | 93 | fCurrIY = -1; |
michael@0 | 94 | fCurrY = -1; |
michael@0 | 95 | #else |
michael@0 | 96 | fTop = ir.fTop; |
michael@0 | 97 | fCurrIY = ir.fTop - 1; |
michael@0 | 98 | fCurrY = (ir.fTop << SHIFT) - 1; |
michael@0 | 99 | #endif |
michael@0 | 100 | SkDEBUGCODE(fCurrX = -1;) |
michael@0 | 101 | } |
michael@0 | 102 | |
michael@0 | 103 | /// Run-length-encoded supersampling antialiased blitter. |
michael@0 | 104 | class SuperBlitter : public BaseSuperBlitter { |
michael@0 | 105 | public: |
michael@0 | 106 | SuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, |
michael@0 | 107 | const SkRegion& clip); |
michael@0 | 108 | |
michael@0 | 109 | virtual ~SuperBlitter() { |
michael@0 | 110 | this->flush(); |
michael@0 | 111 | sk_free(fRuns.fRuns); |
michael@0 | 112 | } |
michael@0 | 113 | |
michael@0 | 114 | /// Once fRuns contains a complete supersampled row, flush() blits |
michael@0 | 115 | /// it out through the wrapped blitter. |
michael@0 | 116 | void flush(); |
michael@0 | 117 | |
michael@0 | 118 | /// Blits a row of pixels, with location and width specified |
michael@0 | 119 | /// in supersampled coordinates. |
michael@0 | 120 | virtual void blitH(int x, int y, int width) SK_OVERRIDE; |
michael@0 | 121 | /// Blits a rectangle of pixels, with location and size specified |
michael@0 | 122 | /// in supersampled coordinates. |
michael@0 | 123 | virtual void blitRect(int x, int y, int width, int height) SK_OVERRIDE; |
michael@0 | 124 | |
michael@0 | 125 | private: |
michael@0 | 126 | SkAlphaRuns fRuns; |
michael@0 | 127 | int fOffsetX; |
michael@0 | 128 | }; |
michael@0 | 129 | |
michael@0 | 130 | SuperBlitter::SuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, |
michael@0 | 131 | const SkRegion& clip) |
michael@0 | 132 | : BaseSuperBlitter(realBlitter, ir, clip) { |
michael@0 | 133 | const int width = fWidth; |
michael@0 | 134 | |
michael@0 | 135 | // extra one to store the zero at the end |
michael@0 | 136 | fRuns.fRuns = (int16_t*)sk_malloc_throw((width + 1 + (width + 2)/2) * sizeof(int16_t)); |
michael@0 | 137 | fRuns.fAlpha = (uint8_t*)(fRuns.fRuns + width + 1); |
michael@0 | 138 | fRuns.reset(width); |
michael@0 | 139 | |
michael@0 | 140 | fOffsetX = 0; |
michael@0 | 141 | } |
michael@0 | 142 | |
michael@0 | 143 | void SuperBlitter::flush() { |
michael@0 | 144 | if (fCurrIY >= fTop) { |
michael@0 | 145 | if (!fRuns.empty()) { |
michael@0 | 146 | // SkDEBUGCODE(fRuns.dump();) |
michael@0 | 147 | fRealBlitter->blitAntiH(fLeft, fCurrIY, fRuns.fAlpha, fRuns.fRuns); |
michael@0 | 148 | fRuns.reset(fWidth); |
michael@0 | 149 | fOffsetX = 0; |
michael@0 | 150 | } |
michael@0 | 151 | fCurrIY = fTop - 1; |
michael@0 | 152 | SkDEBUGCODE(fCurrX = -1;) |
michael@0 | 153 | } |
michael@0 | 154 | } |
michael@0 | 155 | |
michael@0 | 156 | /** coverage_to_partial_alpha() is being used by SkAlphaRuns, which |
michael@0 | 157 | *accumulates* SCALE pixels worth of "alpha" in [0,(256/SCALE)] |
michael@0 | 158 | to produce a final value in [0, 255] and handles clamping 256->255 |
michael@0 | 159 | itself, with the same (alpha - (alpha >> 8)) correction as |
michael@0 | 160 | coverage_to_exact_alpha(). |
michael@0 | 161 | */ |
michael@0 | 162 | static inline int coverage_to_partial_alpha(int aa) { |
michael@0 | 163 | aa <<= 8 - 2*SHIFT; |
michael@0 | 164 | #ifdef SK_USE_LEGACY_AA_COVERAGE |
michael@0 | 165 | aa -= aa >> (8 - SHIFT - 1); |
michael@0 | 166 | #endif |
michael@0 | 167 | return aa; |
michael@0 | 168 | } |
michael@0 | 169 | |
michael@0 | 170 | /** coverage_to_exact_alpha() is being used by our blitter, which wants |
michael@0 | 171 | a final value in [0, 255]. |
michael@0 | 172 | */ |
michael@0 | 173 | static inline int coverage_to_exact_alpha(int aa) { |
michael@0 | 174 | int alpha = (256 >> SHIFT) * aa; |
michael@0 | 175 | // clamp 256->255 |
michael@0 | 176 | return alpha - (alpha >> 8); |
michael@0 | 177 | } |
michael@0 | 178 | |
michael@0 | 179 | void SuperBlitter::blitH(int x, int y, int width) { |
michael@0 | 180 | SkASSERT(width > 0); |
michael@0 | 181 | |
michael@0 | 182 | int iy = y >> SHIFT; |
michael@0 | 183 | SkASSERT(iy >= fCurrIY); |
michael@0 | 184 | |
michael@0 | 185 | x -= fSuperLeft; |
michael@0 | 186 | // hack, until I figure out why my cubics (I think) go beyond the bounds |
michael@0 | 187 | if (x < 0) { |
michael@0 | 188 | width += x; |
michael@0 | 189 | x = 0; |
michael@0 | 190 | } |
michael@0 | 191 | |
michael@0 | 192 | #ifdef SK_DEBUG |
michael@0 | 193 | SkASSERT(y != fCurrY || x >= fCurrX); |
michael@0 | 194 | #endif |
michael@0 | 195 | SkASSERT(y >= fCurrY); |
michael@0 | 196 | if (fCurrY != y) { |
michael@0 | 197 | fOffsetX = 0; |
michael@0 | 198 | fCurrY = y; |
michael@0 | 199 | } |
michael@0 | 200 | |
michael@0 | 201 | if (iy != fCurrIY) { // new scanline |
michael@0 | 202 | this->flush(); |
michael@0 | 203 | fCurrIY = iy; |
michael@0 | 204 | } |
michael@0 | 205 | |
michael@0 | 206 | int start = x; |
michael@0 | 207 | int stop = x + width; |
michael@0 | 208 | |
michael@0 | 209 | SkASSERT(start >= 0 && stop > start); |
michael@0 | 210 | // integer-pixel-aligned ends of blit, rounded out |
michael@0 | 211 | int fb = start & MASK; |
michael@0 | 212 | int fe = stop & MASK; |
michael@0 | 213 | int n = (stop >> SHIFT) - (start >> SHIFT) - 1; |
michael@0 | 214 | |
michael@0 | 215 | if (n < 0) { |
michael@0 | 216 | fb = fe - fb; |
michael@0 | 217 | n = 0; |
michael@0 | 218 | fe = 0; |
michael@0 | 219 | } else { |
michael@0 | 220 | if (fb == 0) { |
michael@0 | 221 | n += 1; |
michael@0 | 222 | } else { |
michael@0 | 223 | fb = SCALE - fb; |
michael@0 | 224 | } |
michael@0 | 225 | } |
michael@0 | 226 | |
michael@0 | 227 | fOffsetX = fRuns.add(x >> SHIFT, coverage_to_partial_alpha(fb), |
michael@0 | 228 | n, coverage_to_partial_alpha(fe), |
michael@0 | 229 | (1 << (8 - SHIFT)) - (((y & MASK) + 1) >> SHIFT), |
michael@0 | 230 | fOffsetX); |
michael@0 | 231 | |
michael@0 | 232 | #ifdef SK_DEBUG |
michael@0 | 233 | fRuns.assertValid(y & MASK, (1 << (8 - SHIFT))); |
michael@0 | 234 | fCurrX = x + width; |
michael@0 | 235 | #endif |
michael@0 | 236 | } |
michael@0 | 237 | |
michael@0 | 238 | #if 0 // UNUSED |
michael@0 | 239 | static void set_left_rite_runs(SkAlphaRuns& runs, int ileft, U8CPU leftA, |
michael@0 | 240 | int n, U8CPU riteA) { |
michael@0 | 241 | SkASSERT(leftA <= 0xFF); |
michael@0 | 242 | SkASSERT(riteA <= 0xFF); |
michael@0 | 243 | |
michael@0 | 244 | int16_t* run = runs.fRuns; |
michael@0 | 245 | uint8_t* aa = runs.fAlpha; |
michael@0 | 246 | |
michael@0 | 247 | if (ileft > 0) { |
michael@0 | 248 | run[0] = ileft; |
michael@0 | 249 | aa[0] = 0; |
michael@0 | 250 | run += ileft; |
michael@0 | 251 | aa += ileft; |
michael@0 | 252 | } |
michael@0 | 253 | |
michael@0 | 254 | SkASSERT(leftA < 0xFF); |
michael@0 | 255 | if (leftA > 0) { |
michael@0 | 256 | *run++ = 1; |
michael@0 | 257 | *aa++ = leftA; |
michael@0 | 258 | } |
michael@0 | 259 | |
michael@0 | 260 | if (n > 0) { |
michael@0 | 261 | run[0] = n; |
michael@0 | 262 | aa[0] = 0xFF; |
michael@0 | 263 | run += n; |
michael@0 | 264 | aa += n; |
michael@0 | 265 | } |
michael@0 | 266 | |
michael@0 | 267 | SkASSERT(riteA < 0xFF); |
michael@0 | 268 | if (riteA > 0) { |
michael@0 | 269 | *run++ = 1; |
michael@0 | 270 | *aa++ = riteA; |
michael@0 | 271 | } |
michael@0 | 272 | run[0] = 0; |
michael@0 | 273 | } |
michael@0 | 274 | #endif |
michael@0 | 275 | |
michael@0 | 276 | void SuperBlitter::blitRect(int x, int y, int width, int height) { |
michael@0 | 277 | SkASSERT(width > 0); |
michael@0 | 278 | SkASSERT(height > 0); |
michael@0 | 279 | |
michael@0 | 280 | // blit leading rows |
michael@0 | 281 | while ((y & MASK)) { |
michael@0 | 282 | this->blitH(x, y++, width); |
michael@0 | 283 | if (--height <= 0) { |
michael@0 | 284 | return; |
michael@0 | 285 | } |
michael@0 | 286 | } |
michael@0 | 287 | SkASSERT(height > 0); |
michael@0 | 288 | |
michael@0 | 289 | // Since this is a rect, instead of blitting supersampled rows one at a |
michael@0 | 290 | // time and then resolving to the destination canvas, we can blit |
michael@0 | 291 | // directly to the destintion canvas one row per SCALE supersampled rows. |
michael@0 | 292 | int start_y = y >> SHIFT; |
michael@0 | 293 | int stop_y = (y + height) >> SHIFT; |
michael@0 | 294 | int count = stop_y - start_y; |
michael@0 | 295 | if (count > 0) { |
michael@0 | 296 | y += count << SHIFT; |
michael@0 | 297 | height -= count << SHIFT; |
michael@0 | 298 | |
michael@0 | 299 | // save original X for our tail blitH() loop at the bottom |
michael@0 | 300 | int origX = x; |
michael@0 | 301 | |
michael@0 | 302 | x -= fSuperLeft; |
michael@0 | 303 | // hack, until I figure out why my cubics (I think) go beyond the bounds |
michael@0 | 304 | if (x < 0) { |
michael@0 | 305 | width += x; |
michael@0 | 306 | x = 0; |
michael@0 | 307 | } |
michael@0 | 308 | |
michael@0 | 309 | // There is always a left column, a middle, and a right column. |
michael@0 | 310 | // ileft is the destination x of the first pixel of the entire rect. |
michael@0 | 311 | // xleft is (SCALE - # of covered supersampled pixels) in that |
michael@0 | 312 | // destination pixel. |
michael@0 | 313 | int ileft = x >> SHIFT; |
michael@0 | 314 | int xleft = x & MASK; |
michael@0 | 315 | // irite is the destination x of the last pixel of the OPAQUE section. |
michael@0 | 316 | // xrite is the number of supersampled pixels extending beyond irite; |
michael@0 | 317 | // xrite/SCALE should give us alpha. |
michael@0 | 318 | int irite = (x + width) >> SHIFT; |
michael@0 | 319 | int xrite = (x + width) & MASK; |
michael@0 | 320 | if (!xrite) { |
michael@0 | 321 | xrite = SCALE; |
michael@0 | 322 | irite--; |
michael@0 | 323 | } |
michael@0 | 324 | |
michael@0 | 325 | // Need to call flush() to clean up pending draws before we |
michael@0 | 326 | // even consider blitV(), since otherwise it can look nonmonotonic. |
michael@0 | 327 | SkASSERT(start_y > fCurrIY); |
michael@0 | 328 | this->flush(); |
michael@0 | 329 | |
michael@0 | 330 | int n = irite - ileft - 1; |
michael@0 | 331 | if (n < 0) { |
michael@0 | 332 | // If n < 0, we'll only have a single partially-transparent column |
michael@0 | 333 | // of pixels to render. |
michael@0 | 334 | xleft = xrite - xleft; |
michael@0 | 335 | SkASSERT(xleft <= SCALE); |
michael@0 | 336 | SkASSERT(xleft > 0); |
michael@0 | 337 | xrite = 0; |
michael@0 | 338 | fRealBlitter->blitV(ileft + fLeft, start_y, count, |
michael@0 | 339 | coverage_to_exact_alpha(xleft)); |
michael@0 | 340 | } else { |
michael@0 | 341 | // With n = 0, we have two possibly-transparent columns of pixels |
michael@0 | 342 | // to render; with n > 0, we have opaque columns between them. |
michael@0 | 343 | |
michael@0 | 344 | xleft = SCALE - xleft; |
michael@0 | 345 | |
michael@0 | 346 | // Using coverage_to_exact_alpha is not consistent with blitH() |
michael@0 | 347 | const int coverageL = coverage_to_exact_alpha(xleft); |
michael@0 | 348 | const int coverageR = coverage_to_exact_alpha(xrite); |
michael@0 | 349 | |
michael@0 | 350 | SkASSERT(coverageL > 0 || n > 0 || coverageR > 0); |
michael@0 | 351 | SkASSERT((coverageL != 0) + n + (coverageR != 0) <= fWidth); |
michael@0 | 352 | |
michael@0 | 353 | fRealBlitter->blitAntiRect(ileft + fLeft, start_y, n, count, |
michael@0 | 354 | coverageL, coverageR); |
michael@0 | 355 | } |
michael@0 | 356 | |
michael@0 | 357 | // preamble for our next call to blitH() |
michael@0 | 358 | fCurrIY = stop_y - 1; |
michael@0 | 359 | fOffsetX = 0; |
michael@0 | 360 | fCurrY = y - 1; |
michael@0 | 361 | fRuns.reset(fWidth); |
michael@0 | 362 | x = origX; |
michael@0 | 363 | } |
michael@0 | 364 | |
michael@0 | 365 | // catch any remaining few rows |
michael@0 | 366 | SkASSERT(height <= MASK); |
michael@0 | 367 | while (--height >= 0) { |
michael@0 | 368 | this->blitH(x, y++, width); |
michael@0 | 369 | } |
michael@0 | 370 | } |
michael@0 | 371 | |
michael@0 | 372 | /////////////////////////////////////////////////////////////////////////////// |
michael@0 | 373 | |
michael@0 | 374 | /// Masked supersampling antialiased blitter. |
michael@0 | 375 | class MaskSuperBlitter : public BaseSuperBlitter { |
michael@0 | 376 | public: |
michael@0 | 377 | MaskSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, |
michael@0 | 378 | const SkRegion& clip); |
michael@0 | 379 | virtual ~MaskSuperBlitter() { |
michael@0 | 380 | fRealBlitter->blitMask(fMask, fClipRect); |
michael@0 | 381 | } |
michael@0 | 382 | |
michael@0 | 383 | virtual void blitH(int x, int y, int width) SK_OVERRIDE; |
michael@0 | 384 | |
michael@0 | 385 | static bool CanHandleRect(const SkIRect& bounds) { |
michael@0 | 386 | #ifdef FORCE_RLE |
michael@0 | 387 | return false; |
michael@0 | 388 | #endif |
michael@0 | 389 | int width = bounds.width(); |
michael@0 | 390 | int64_t rb = SkAlign4(width); |
michael@0 | 391 | // use 64bits to detect overflow |
michael@0 | 392 | int64_t storage = rb * bounds.height(); |
michael@0 | 393 | |
michael@0 | 394 | return (width <= MaskSuperBlitter::kMAX_WIDTH) && |
michael@0 | 395 | (storage <= MaskSuperBlitter::kMAX_STORAGE); |
michael@0 | 396 | } |
michael@0 | 397 | |
michael@0 | 398 | private: |
michael@0 | 399 | enum { |
michael@0 | 400 | #ifdef FORCE_SUPERMASK |
michael@0 | 401 | kMAX_WIDTH = 2048, |
michael@0 | 402 | kMAX_STORAGE = 1024 * 1024 * 2 |
michael@0 | 403 | #else |
michael@0 | 404 | kMAX_WIDTH = 32, // so we don't try to do very wide things, where the RLE blitter would be faster |
michael@0 | 405 | kMAX_STORAGE = 1024 |
michael@0 | 406 | #endif |
michael@0 | 407 | }; |
michael@0 | 408 | |
michael@0 | 409 | SkMask fMask; |
michael@0 | 410 | SkIRect fClipRect; |
michael@0 | 411 | // we add 1 because add_aa_span can write (unchanged) 1 extra byte at the end, rather than |
michael@0 | 412 | // perform a test to see if stopAlpha != 0 |
michael@0 | 413 | uint32_t fStorage[(kMAX_STORAGE >> 2) + 1]; |
michael@0 | 414 | }; |
michael@0 | 415 | |
michael@0 | 416 | MaskSuperBlitter::MaskSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, |
michael@0 | 417 | const SkRegion& clip) |
michael@0 | 418 | : BaseSuperBlitter(realBlitter, ir, clip) { |
michael@0 | 419 | SkASSERT(CanHandleRect(ir)); |
michael@0 | 420 | |
michael@0 | 421 | fMask.fImage = (uint8_t*)fStorage; |
michael@0 | 422 | fMask.fBounds = ir; |
michael@0 | 423 | fMask.fRowBytes = ir.width(); |
michael@0 | 424 | fMask.fFormat = SkMask::kA8_Format; |
michael@0 | 425 | |
michael@0 | 426 | fClipRect = ir; |
michael@0 | 427 | fClipRect.intersect(clip.getBounds()); |
michael@0 | 428 | |
michael@0 | 429 | // For valgrind, write 1 extra byte at the end so we don't read |
michael@0 | 430 | // uninitialized memory. See comment in add_aa_span and fStorage[]. |
michael@0 | 431 | memset(fStorage, 0, fMask.fBounds.height() * fMask.fRowBytes + 1); |
michael@0 | 432 | } |
michael@0 | 433 | |
michael@0 | 434 | static void add_aa_span(uint8_t* alpha, U8CPU startAlpha) { |
michael@0 | 435 | /* I should be able to just add alpha[x] + startAlpha. |
michael@0 | 436 | However, if the trailing edge of the previous span and the leading |
michael@0 | 437 | edge of the current span round to the same super-sampled x value, |
michael@0 | 438 | I might overflow to 256 with this add, hence the funny subtract. |
michael@0 | 439 | */ |
michael@0 | 440 | unsigned tmp = *alpha + startAlpha; |
michael@0 | 441 | SkASSERT(tmp <= 256); |
michael@0 | 442 | *alpha = SkToU8(tmp - (tmp >> 8)); |
michael@0 | 443 | } |
michael@0 | 444 | |
michael@0 | 445 | static inline uint32_t quadplicate_byte(U8CPU value) { |
michael@0 | 446 | uint32_t pair = (value << 8) | value; |
michael@0 | 447 | return (pair << 16) | pair; |
michael@0 | 448 | } |
michael@0 | 449 | |
michael@0 | 450 | // Perform this tricky subtract, to avoid overflowing to 256. Our caller should |
michael@0 | 451 | // only ever call us with at most enough to hit 256 (never larger), so it is |
michael@0 | 452 | // enough to just subtract the high-bit. Actually clamping with a branch would |
michael@0 | 453 | // be slower (e.g. if (tmp > 255) tmp = 255;) |
michael@0 | 454 | // |
michael@0 | 455 | static inline void saturated_add(uint8_t* ptr, U8CPU add) { |
michael@0 | 456 | unsigned tmp = *ptr + add; |
michael@0 | 457 | SkASSERT(tmp <= 256); |
michael@0 | 458 | *ptr = SkToU8(tmp - (tmp >> 8)); |
michael@0 | 459 | } |
michael@0 | 460 | |
michael@0 | 461 | // minimum count before we want to setup an inner loop, adding 4-at-a-time |
michael@0 | 462 | #define MIN_COUNT_FOR_QUAD_LOOP 16 |
michael@0 | 463 | |
michael@0 | 464 | static void add_aa_span(uint8_t* alpha, U8CPU startAlpha, int middleCount, |
michael@0 | 465 | U8CPU stopAlpha, U8CPU maxValue) { |
michael@0 | 466 | SkASSERT(middleCount >= 0); |
michael@0 | 467 | |
michael@0 | 468 | saturated_add(alpha, startAlpha); |
michael@0 | 469 | alpha += 1; |
michael@0 | 470 | |
michael@0 | 471 | if (middleCount >= MIN_COUNT_FOR_QUAD_LOOP) { |
michael@0 | 472 | // loop until we're quad-byte aligned |
michael@0 | 473 | while (SkTCast<intptr_t>(alpha) & 0x3) { |
michael@0 | 474 | alpha[0] = SkToU8(alpha[0] + maxValue); |
michael@0 | 475 | alpha += 1; |
michael@0 | 476 | middleCount -= 1; |
michael@0 | 477 | } |
michael@0 | 478 | |
michael@0 | 479 | int bigCount = middleCount >> 2; |
michael@0 | 480 | uint32_t* qptr = reinterpret_cast<uint32_t*>(alpha); |
michael@0 | 481 | uint32_t qval = quadplicate_byte(maxValue); |
michael@0 | 482 | do { |
michael@0 | 483 | *qptr++ += qval; |
michael@0 | 484 | } while (--bigCount > 0); |
michael@0 | 485 | |
michael@0 | 486 | middleCount &= 3; |
michael@0 | 487 | alpha = reinterpret_cast<uint8_t*> (qptr); |
michael@0 | 488 | // fall through to the following while-loop |
michael@0 | 489 | } |
michael@0 | 490 | |
michael@0 | 491 | while (--middleCount >= 0) { |
michael@0 | 492 | alpha[0] = SkToU8(alpha[0] + maxValue); |
michael@0 | 493 | alpha += 1; |
michael@0 | 494 | } |
michael@0 | 495 | |
michael@0 | 496 | // potentially this can be off the end of our "legal" alpha values, but that |
michael@0 | 497 | // only happens if stopAlpha is also 0. Rather than test for stopAlpha != 0 |
michael@0 | 498 | // every time (slow), we just do it, and ensure that we've allocated extra space |
michael@0 | 499 | // (see the + 1 comment in fStorage[] |
michael@0 | 500 | saturated_add(alpha, stopAlpha); |
michael@0 | 501 | } |
michael@0 | 502 | |
michael@0 | 503 | void MaskSuperBlitter::blitH(int x, int y, int width) { |
michael@0 | 504 | int iy = (y >> SHIFT); |
michael@0 | 505 | |
michael@0 | 506 | SkASSERT(iy >= fMask.fBounds.fTop && iy < fMask.fBounds.fBottom); |
michael@0 | 507 | iy -= fMask.fBounds.fTop; // make it relative to 0 |
michael@0 | 508 | |
michael@0 | 509 | // This should never happen, but it does. Until the true cause is |
michael@0 | 510 | // discovered, let's skip this span instead of crashing. |
michael@0 | 511 | // See http://crbug.com/17569. |
michael@0 | 512 | if (iy < 0) { |
michael@0 | 513 | return; |
michael@0 | 514 | } |
michael@0 | 515 | |
michael@0 | 516 | #ifdef SK_DEBUG |
michael@0 | 517 | { |
michael@0 | 518 | int ix = x >> SHIFT; |
michael@0 | 519 | SkASSERT(ix >= fMask.fBounds.fLeft && ix < fMask.fBounds.fRight); |
michael@0 | 520 | } |
michael@0 | 521 | #endif |
michael@0 | 522 | |
michael@0 | 523 | x -= (fMask.fBounds.fLeft << SHIFT); |
michael@0 | 524 | |
michael@0 | 525 | // hack, until I figure out why my cubics (I think) go beyond the bounds |
michael@0 | 526 | if (x < 0) { |
michael@0 | 527 | width += x; |
michael@0 | 528 | x = 0; |
michael@0 | 529 | } |
michael@0 | 530 | |
michael@0 | 531 | uint8_t* row = fMask.fImage + iy * fMask.fRowBytes + (x >> SHIFT); |
michael@0 | 532 | |
michael@0 | 533 | int start = x; |
michael@0 | 534 | int stop = x + width; |
michael@0 | 535 | |
michael@0 | 536 | SkASSERT(start >= 0 && stop > start); |
michael@0 | 537 | int fb = start & MASK; |
michael@0 | 538 | int fe = stop & MASK; |
michael@0 | 539 | int n = (stop >> SHIFT) - (start >> SHIFT) - 1; |
michael@0 | 540 | |
michael@0 | 541 | |
michael@0 | 542 | if (n < 0) { |
michael@0 | 543 | SkASSERT(row >= fMask.fImage); |
michael@0 | 544 | SkASSERT(row < fMask.fImage + kMAX_STORAGE + 1); |
michael@0 | 545 | add_aa_span(row, coverage_to_partial_alpha(fe - fb)); |
michael@0 | 546 | } else { |
michael@0 | 547 | fb = SCALE - fb; |
michael@0 | 548 | SkASSERT(row >= fMask.fImage); |
michael@0 | 549 | SkASSERT(row + n + 1 < fMask.fImage + kMAX_STORAGE + 1); |
michael@0 | 550 | add_aa_span(row, coverage_to_partial_alpha(fb), |
michael@0 | 551 | n, coverage_to_partial_alpha(fe), |
michael@0 | 552 | (1 << (8 - SHIFT)) - (((y & MASK) + 1) >> SHIFT)); |
michael@0 | 553 | } |
michael@0 | 554 | |
michael@0 | 555 | #ifdef SK_DEBUG |
michael@0 | 556 | fCurrX = x + width; |
michael@0 | 557 | #endif |
michael@0 | 558 | } |
michael@0 | 559 | |
michael@0 | 560 | /////////////////////////////////////////////////////////////////////////////// |
michael@0 | 561 | |
michael@0 | 562 | static bool fitsInsideLimit(const SkRect& r, SkScalar max) { |
michael@0 | 563 | const SkScalar min = -max; |
michael@0 | 564 | return r.fLeft > min && r.fTop > min && |
michael@0 | 565 | r.fRight < max && r.fBottom < max; |
michael@0 | 566 | } |
michael@0 | 567 | |
michael@0 | 568 | static int overflows_short_shift(int value, int shift) { |
michael@0 | 569 | const int s = 16 + shift; |
michael@0 | 570 | return (value << s >> s) - value; |
michael@0 | 571 | } |
michael@0 | 572 | |
michael@0 | 573 | /** |
michael@0 | 574 | Would any of the coordinates of this rectangle not fit in a short, |
michael@0 | 575 | when left-shifted by shift? |
michael@0 | 576 | */ |
michael@0 | 577 | static int rect_overflows_short_shift(SkIRect rect, int shift) { |
michael@0 | 578 | SkASSERT(!overflows_short_shift(8191, SHIFT)); |
michael@0 | 579 | SkASSERT(overflows_short_shift(8192, SHIFT)); |
michael@0 | 580 | SkASSERT(!overflows_short_shift(32767, 0)); |
michael@0 | 581 | SkASSERT(overflows_short_shift(32768, 0)); |
michael@0 | 582 | |
michael@0 | 583 | // Since we expect these to succeed, we bit-or together |
michael@0 | 584 | // for a tiny extra bit of speed. |
michael@0 | 585 | return overflows_short_shift(rect.fLeft, SHIFT) | |
michael@0 | 586 | overflows_short_shift(rect.fRight, SHIFT) | |
michael@0 | 587 | overflows_short_shift(rect.fTop, SHIFT) | |
michael@0 | 588 | overflows_short_shift(rect.fBottom, SHIFT); |
michael@0 | 589 | } |
michael@0 | 590 | |
michael@0 | 591 | static bool safeRoundOut(const SkRect& src, SkIRect* dst, int32_t maxInt) { |
michael@0 | 592 | const SkScalar maxScalar = SkIntToScalar(maxInt); |
michael@0 | 593 | |
michael@0 | 594 | if (fitsInsideLimit(src, maxScalar)) { |
michael@0 | 595 | src.roundOut(dst); |
michael@0 | 596 | return true; |
michael@0 | 597 | } |
michael@0 | 598 | return false; |
michael@0 | 599 | } |
michael@0 | 600 | |
michael@0 | 601 | void SkScan::AntiFillPath(const SkPath& path, const SkRegion& origClip, |
michael@0 | 602 | SkBlitter* blitter, bool forceRLE) { |
michael@0 | 603 | if (origClip.isEmpty()) { |
michael@0 | 604 | return; |
michael@0 | 605 | } |
michael@0 | 606 | |
michael@0 | 607 | SkIRect ir; |
michael@0 | 608 | |
michael@0 | 609 | if (!safeRoundOut(path.getBounds(), &ir, SK_MaxS32 >> SHIFT)) { |
michael@0 | 610 | #if 0 |
michael@0 | 611 | const SkRect& r = path.getBounds(); |
michael@0 | 612 | SkDebugf("--- bounds can't fit in SkIRect\n", r.fLeft, r.fTop, r.fRight, r.fBottom); |
michael@0 | 613 | #endif |
michael@0 | 614 | return; |
michael@0 | 615 | } |
michael@0 | 616 | if (ir.isEmpty()) { |
michael@0 | 617 | if (path.isInverseFillType()) { |
michael@0 | 618 | blitter->blitRegion(origClip); |
michael@0 | 619 | } |
michael@0 | 620 | return; |
michael@0 | 621 | } |
michael@0 | 622 | |
michael@0 | 623 | // If the intersection of the path bounds and the clip bounds |
michael@0 | 624 | // will overflow 32767 when << by SHIFT, we can't supersample, |
michael@0 | 625 | // so draw without antialiasing. |
michael@0 | 626 | SkIRect clippedIR; |
michael@0 | 627 | if (path.isInverseFillType()) { |
michael@0 | 628 | // If the path is an inverse fill, it's going to fill the entire |
michael@0 | 629 | // clip, and we care whether the entire clip exceeds our limits. |
michael@0 | 630 | clippedIR = origClip.getBounds(); |
michael@0 | 631 | } else { |
michael@0 | 632 | if (!clippedIR.intersect(ir, origClip.getBounds())) { |
michael@0 | 633 | return; |
michael@0 | 634 | } |
michael@0 | 635 | } |
michael@0 | 636 | if (rect_overflows_short_shift(clippedIR, SHIFT)) { |
michael@0 | 637 | SkScan::FillPath(path, origClip, blitter); |
michael@0 | 638 | return; |
michael@0 | 639 | } |
michael@0 | 640 | |
michael@0 | 641 | // Our antialiasing can't handle a clip larger than 32767, so we restrict |
michael@0 | 642 | // the clip to that limit here. (the runs[] uses int16_t for its index). |
michael@0 | 643 | // |
michael@0 | 644 | // A more general solution (one that could also eliminate the need to |
michael@0 | 645 | // disable aa based on ir bounds (see overflows_short_shift) would be |
michael@0 | 646 | // to tile the clip/target... |
michael@0 | 647 | SkRegion tmpClipStorage; |
michael@0 | 648 | const SkRegion* clipRgn = &origClip; |
michael@0 | 649 | { |
michael@0 | 650 | static const int32_t kMaxClipCoord = 32767; |
michael@0 | 651 | const SkIRect& bounds = origClip.getBounds(); |
michael@0 | 652 | if (bounds.fRight > kMaxClipCoord || bounds.fBottom > kMaxClipCoord) { |
michael@0 | 653 | SkIRect limit = { 0, 0, kMaxClipCoord, kMaxClipCoord }; |
michael@0 | 654 | tmpClipStorage.op(origClip, limit, SkRegion::kIntersect_Op); |
michael@0 | 655 | clipRgn = &tmpClipStorage; |
michael@0 | 656 | } |
michael@0 | 657 | } |
michael@0 | 658 | // for here down, use clipRgn, not origClip |
michael@0 | 659 | |
michael@0 | 660 | SkScanClipper clipper(blitter, clipRgn, ir); |
michael@0 | 661 | const SkIRect* clipRect = clipper.getClipRect(); |
michael@0 | 662 | |
michael@0 | 663 | if (clipper.getBlitter() == NULL) { // clipped out |
michael@0 | 664 | if (path.isInverseFillType()) { |
michael@0 | 665 | blitter->blitRegion(*clipRgn); |
michael@0 | 666 | } |
michael@0 | 667 | return; |
michael@0 | 668 | } |
michael@0 | 669 | |
michael@0 | 670 | // now use the (possibly wrapped) blitter |
michael@0 | 671 | blitter = clipper.getBlitter(); |
michael@0 | 672 | |
michael@0 | 673 | if (path.isInverseFillType()) { |
michael@0 | 674 | sk_blit_above(blitter, ir, *clipRgn); |
michael@0 | 675 | } |
michael@0 | 676 | |
michael@0 | 677 | SkIRect superRect, *superClipRect = NULL; |
michael@0 | 678 | |
michael@0 | 679 | if (clipRect) { |
michael@0 | 680 | superRect.set( clipRect->fLeft << SHIFT, clipRect->fTop << SHIFT, |
michael@0 | 681 | clipRect->fRight << SHIFT, clipRect->fBottom << SHIFT); |
michael@0 | 682 | superClipRect = &superRect; |
michael@0 | 683 | } |
michael@0 | 684 | |
michael@0 | 685 | SkASSERT(SkIntToScalar(ir.fTop) <= path.getBounds().fTop); |
michael@0 | 686 | |
michael@0 | 687 | // MaskSuperBlitter can't handle drawing outside of ir, so we can't use it |
michael@0 | 688 | // if we're an inverse filltype |
michael@0 | 689 | if (!path.isInverseFillType() && MaskSuperBlitter::CanHandleRect(ir) && !forceRLE) { |
michael@0 | 690 | MaskSuperBlitter superBlit(blitter, ir, *clipRgn); |
michael@0 | 691 | SkASSERT(SkIntToScalar(ir.fTop) <= path.getBounds().fTop); |
michael@0 | 692 | sk_fill_path(path, superClipRect, &superBlit, ir.fTop, ir.fBottom, SHIFT, *clipRgn); |
michael@0 | 693 | } else { |
michael@0 | 694 | SuperBlitter superBlit(blitter, ir, *clipRgn); |
michael@0 | 695 | sk_fill_path(path, superClipRect, &superBlit, ir.fTop, ir.fBottom, SHIFT, *clipRgn); |
michael@0 | 696 | } |
michael@0 | 697 | |
michael@0 | 698 | if (path.isInverseFillType()) { |
michael@0 | 699 | sk_blit_below(blitter, ir, *clipRgn); |
michael@0 | 700 | } |
michael@0 | 701 | } |
michael@0 | 702 | |
michael@0 | 703 | /////////////////////////////////////////////////////////////////////////////// |
michael@0 | 704 | |
michael@0 | 705 | #include "SkRasterClip.h" |
michael@0 | 706 | |
michael@0 | 707 | void SkScan::FillPath(const SkPath& path, const SkRasterClip& clip, |
michael@0 | 708 | SkBlitter* blitter) { |
michael@0 | 709 | if (clip.isEmpty()) { |
michael@0 | 710 | return; |
michael@0 | 711 | } |
michael@0 | 712 | |
michael@0 | 713 | if (clip.isBW()) { |
michael@0 | 714 | FillPath(path, clip.bwRgn(), blitter); |
michael@0 | 715 | } else { |
michael@0 | 716 | SkRegion tmp; |
michael@0 | 717 | SkAAClipBlitter aaBlitter; |
michael@0 | 718 | |
michael@0 | 719 | tmp.setRect(clip.getBounds()); |
michael@0 | 720 | aaBlitter.init(blitter, &clip.aaRgn()); |
michael@0 | 721 | SkScan::FillPath(path, tmp, &aaBlitter); |
michael@0 | 722 | } |
michael@0 | 723 | } |
michael@0 | 724 | |
michael@0 | 725 | void SkScan::AntiFillPath(const SkPath& path, const SkRasterClip& clip, |
michael@0 | 726 | SkBlitter* blitter) { |
michael@0 | 727 | if (clip.isEmpty()) { |
michael@0 | 728 | return; |
michael@0 | 729 | } |
michael@0 | 730 | |
michael@0 | 731 | if (clip.isBW()) { |
michael@0 | 732 | AntiFillPath(path, clip.bwRgn(), blitter); |
michael@0 | 733 | } else { |
michael@0 | 734 | SkRegion tmp; |
michael@0 | 735 | SkAAClipBlitter aaBlitter; |
michael@0 | 736 | |
michael@0 | 737 | tmp.setRect(clip.getBounds()); |
michael@0 | 738 | aaBlitter.init(blitter, &clip.aaRgn()); |
michael@0 | 739 | SkScan::AntiFillPath(path, tmp, &aaBlitter, true); |
michael@0 | 740 | } |
michael@0 | 741 | } |