Sat, 03 Jan 2015 20:18:00 +0100
Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.
michael@0 | 1 | From f941ea32e44a2436d235e83ef1a434289a9d9c1e Mon Sep 17 00:00:00 2001 |
michael@0 | 2 | From: George Wright <gwright@mozilla.com> |
michael@0 | 3 | Date: Wed, 23 May 2012 11:40:25 -0400 |
michael@0 | 4 | Subject: [PATCH 08/10] Bug 755869 - [11] Re-apply bug 687188 - Skia |
michael@0 | 5 | radial gradients should use the 0/1 color stop values |
michael@0 | 6 | for clamping. r=mattwoodrow |
michael@0 | 7 | |
michael@0 | 8 | --- |
michael@0 | 9 | gfx/skia/src/effects/SkGradientShader.cpp | 76 +++++++++++++++++++++++------ |
michael@0 | 10 | 1 files changed, 61 insertions(+), 15 deletions(-) |
michael@0 | 11 | |
michael@0 | 12 | diff --git a/gfx/skia/src/effects/SkGradientShader.cpp b/gfx/skia/src/effects/SkGradientShader.cpp |
michael@0 | 13 | index 59ba48c..ea05a39 100644 |
michael@0 | 14 | --- a/gfx/skia/src/effects/SkGradientShader.cpp |
michael@0 | 15 | +++ b/gfx/skia/src/effects/SkGradientShader.cpp |
michael@0 | 16 | @@ -204,6 +204,7 @@ private: |
michael@0 | 17 | mutable SkMallocPixelRef* fCache32PixelRef; |
michael@0 | 18 | mutable unsigned fCacheAlpha; // the alpha value we used when we computed the cache. larger than 8bits so we can store uninitialized value |
michael@0 | 19 | |
michael@0 | 20 | + static SkPMColor PremultiplyColor(SkColor c0, U8CPU alpha); |
michael@0 | 21 | static void Build16bitCache(uint16_t[], SkColor c0, SkColor c1, int count); |
michael@0 | 22 | static void Build32bitCache(SkPMColor[], SkColor c0, SkColor c1, int count, |
michael@0 | 23 | U8CPU alpha); |
michael@0 | 24 | @@ -507,6 +508,21 @@ static inline U8CPU dither_ceil_fixed_to_8(SkFixed n) { |
michael@0 | 25 | return ((n << 1) - (n | (n >> 8))) >> 8; |
michael@0 | 26 | } |
michael@0 | 27 | |
michael@0 | 28 | +SkPMColor Gradient_Shader::PremultiplyColor(SkColor c0, U8CPU paintAlpha) |
michael@0 | 29 | +{ |
michael@0 | 30 | + SkFixed a = SkMulDiv255Round(SkColorGetA(c0), paintAlpha); |
michael@0 | 31 | + SkFixed r = SkColorGetR(c0); |
michael@0 | 32 | + SkFixed g = SkColorGetG(c0); |
michael@0 | 33 | + SkFixed b = SkColorGetB(c0); |
michael@0 | 34 | + |
michael@0 | 35 | + a = SkIntToFixed(a) + 0x8000; |
michael@0 | 36 | + r = SkIntToFixed(r) + 0x8000; |
michael@0 | 37 | + g = SkIntToFixed(g) + 0x8000; |
michael@0 | 38 | + b = SkIntToFixed(b) + 0x8000; |
michael@0 | 39 | + |
michael@0 | 40 | + return SkPremultiplyARGBInline(a >> 16, r >> 16, g >> 16, b >> 16); |
michael@0 | 41 | +} |
michael@0 | 42 | + |
michael@0 | 43 | void Gradient_Shader::Build32bitCache(SkPMColor cache[], SkColor c0, SkColor c1, |
michael@0 | 44 | int count, U8CPU paintAlpha) { |
michael@0 | 45 | SkASSERT(count > 1); |
michael@0 | 46 | @@ -628,14 +644,14 @@ static void complete_32bit_cache(SkPMColor* cache, int stride) { |
michael@0 | 47 | const SkPMColor* Gradient_Shader::getCache32() const { |
michael@0 | 48 | if (fCache32 == NULL) { |
michael@0 | 49 | // double the count for dither entries |
michael@0 | 50 | - const int entryCount = kCache32Count * 2; |
michael@0 | 51 | + const int entryCount = kCache32Count * 2 + 2; |
michael@0 | 52 | const size_t allocSize = sizeof(SkPMColor) * entryCount; |
michael@0 | 53 | |
michael@0 | 54 | if (NULL == fCache32PixelRef) { |
michael@0 | 55 | fCache32PixelRef = SkNEW_ARGS(SkMallocPixelRef, |
michael@0 | 56 | (NULL, allocSize, NULL)); |
michael@0 | 57 | } |
michael@0 | 58 | - fCache32 = (SkPMColor*)fCache32PixelRef->getAddr(); |
michael@0 | 59 | + fCache32 = (SkPMColor*)fCache32PixelRef->getAddr() + 1; |
michael@0 | 60 | if (fColorCount == 2) { |
michael@0 | 61 | Build32bitCache(fCache32, fOrigColors[0], fOrigColors[1], |
michael@0 | 62 | kGradient32Length, fCacheAlpha); |
michael@0 | 63 | @@ -659,7 +675,7 @@ const SkPMColor* Gradient_Shader::getCache32() const { |
michael@0 | 64 | SkMallocPixelRef* newPR = SkNEW_ARGS(SkMallocPixelRef, |
michael@0 | 65 | (NULL, allocSize, NULL)); |
michael@0 | 66 | SkPMColor* linear = fCache32; // just computed linear data |
michael@0 | 67 | - SkPMColor* mapped = (SkPMColor*)newPR->getAddr(); // storage for mapped data |
michael@0 | 68 | + SkPMColor* mapped = (SkPMColor*)newPR->getAddr() + 1; // storage for mapped data |
michael@0 | 69 | SkUnitMapper* map = fMapper; |
michael@0 | 70 | for (int i = 0; i < kGradient32Length; i++) { |
michael@0 | 71 | int index = map->mapUnit16((i << 8) | i) >> 8; |
michael@0 | 72 | @@ -668,10 +684,13 @@ const SkPMColor* Gradient_Shader::getCache32() const { |
michael@0 | 73 | } |
michael@0 | 74 | fCache32PixelRef->unref(); |
michael@0 | 75 | fCache32PixelRef = newPR; |
michael@0 | 76 | - fCache32 = (SkPMColor*)newPR->getAddr(); |
michael@0 | 77 | + fCache32 = (SkPMColor*)newPR->getAddr() + 1; |
michael@0 | 78 | } |
michael@0 | 79 | complete_32bit_cache(fCache32, kCache32Count); |
michael@0 | 80 | } |
michael@0 | 81 | + //Write the clamp colours into the first and last entries of fCache32 |
michael@0 | 82 | + fCache32[-1] = PremultiplyColor(fOrigColors[0], fCacheAlpha); |
michael@0 | 83 | + fCache32[kCache32Count * 2] = PremultiplyColor(fOrigColors[fColorCount - 1], fCacheAlpha); |
michael@0 | 84 | return fCache32; |
michael@0 | 85 | } |
michael@0 | 86 | |
michael@0 | 87 | @@ -857,6 +876,18 @@ void shadeSpan_linear_vertical(TileProc proc, SkFixed dx, SkFixed fx, |
michael@0 | 88 | SkPMColor* SK_RESTRICT dstC, |
michael@0 | 89 | const SkPMColor* SK_RESTRICT cache, |
michael@0 | 90 | int toggle, int count) { |
michael@0 | 91 | + if (proc == clamp_tileproc) { |
michael@0 | 92 | + // Read out clamp values from beginning/end of the cache. No need to lerp |
michael@0 | 93 | + // or dither |
michael@0 | 94 | + if (fx < 0) { |
michael@0 | 95 | + sk_memset32(dstC, cache[-1], count); |
michael@0 | 96 | + return; |
michael@0 | 97 | + } else if (fx > 0xFFFF) { |
michael@0 | 98 | + sk_memset32(dstC, cache[Gradient_Shader::kCache32Count * 2], count); |
michael@0 | 99 | + return; |
michael@0 | 100 | + } |
michael@0 | 101 | + } |
michael@0 | 102 | + |
michael@0 | 103 | // We're a vertical gradient, so no change in a span. |
michael@0 | 104 | // If colors change sharply across the gradient, dithering is |
michael@0 | 105 | // insufficient (it subsamples the color space) and we need to lerp. |
michael@0 | 106 | @@ -875,6 +906,18 @@ void shadeSpan_linear_vertical_lerp(TileProc proc, SkFixed dx, SkFixed fx, |
michael@0 | 107 | SkPMColor* SK_RESTRICT dstC, |
michael@0 | 108 | const SkPMColor* SK_RESTRICT cache, |
michael@0 | 109 | int toggle, int count) { |
michael@0 | 110 | + if (proc == clamp_tileproc) { |
michael@0 | 111 | + // Read out clamp values from beginning/end of the cache. No need to lerp |
michael@0 | 112 | + // or dither |
michael@0 | 113 | + if (fx < 0) { |
michael@0 | 114 | + sk_memset32(dstC, cache[-1], count); |
michael@0 | 115 | + return; |
michael@0 | 116 | + } else if (fx > 0xFFFF) { |
michael@0 | 117 | + sk_memset32(dstC, cache[Gradient_Shader::kCache32Count * 2], count); |
michael@0 | 118 | + return; |
michael@0 | 119 | + } |
michael@0 | 120 | + } |
michael@0 | 121 | + |
michael@0 | 122 | // We're a vertical gradient, so no change in a span. |
michael@0 | 123 | // If colors change sharply across the gradient, dithering is |
michael@0 | 124 | // insufficient (it subsamples the color space) and we need to lerp. |
michael@0 | 125 | @@ -900,10 +943,8 @@ void shadeSpan_linear_clamp(TileProc proc, SkFixed dx, SkFixed fx, |
michael@0 | 126 | range.init(fx, dx, count, 0, Gradient_Shader::kGradient32Length); |
michael@0 | 127 | |
michael@0 | 128 | if ((count = range.fCount0) > 0) { |
michael@0 | 129 | - sk_memset32_dither(dstC, |
michael@0 | 130 | - cache[toggle + range.fV0], |
michael@0 | 131 | - cache[(toggle ^ Gradient_Shader::kDitherStride32) + range.fV0], |
michael@0 | 132 | - count); |
michael@0 | 133 | + // Shouldn't be any need to dither for clamping? |
michael@0 | 134 | + sk_memset32(dstC, cache[-1], count); |
michael@0 | 135 | dstC += count; |
michael@0 | 136 | } |
michael@0 | 137 | if ((count = range.fCount1) > 0) { |
michael@0 | 138 | @@ -922,10 +963,8 @@ void shadeSpan_linear_clamp(TileProc proc, SkFixed dx, SkFixed fx, |
michael@0 | 139 | } |
michael@0 | 140 | } |
michael@0 | 141 | if ((count = range.fCount2) > 0) { |
michael@0 | 142 | - sk_memset32_dither(dstC, |
michael@0 | 143 | - cache[toggle + range.fV1], |
michael@0 | 144 | - cache[(toggle ^ Gradient_Shader::kDitherStride32) + range.fV1], |
michael@0 | 145 | - count); |
michael@0 | 146 | + // Shouldn't be any need to dither for clamping? |
michael@0 | 147 | + sk_memset32(dstC, cache[Gradient_Shader::kCache32Count * 2], count); |
michael@0 | 148 | } |
michael@0 | 149 | } |
michael@0 | 150 | |
michael@0 | 151 | @@ -1796,9 +1835,16 @@ void shadeSpan_twopoint_clamp(SkScalar fx, SkScalar dx, |
michael@0 | 152 | for (; count > 0; --count) { |
michael@0 | 153 | SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura, |
michael@0 | 154 | fOneOverTwoA, posRoot); |
michael@0 | 155 | - SkFixed index = SkClampMax(t, 0xFFFF); |
michael@0 | 156 | - SkASSERT(index <= 0xFFFF); |
michael@0 | 157 | - *dstC++ = cache[index >> Gradient_Shader::kCache32Shift]; |
michael@0 | 158 | + |
michael@0 | 159 | + if (t < 0) { |
michael@0 | 160 | + *dstC++ = cache[-1]; |
michael@0 | 161 | + } else if (t > 0xFFFF) { |
michael@0 | 162 | + *dstC++ = cache[Gradient_Shader::kCache32Count * 2]; |
michael@0 | 163 | + } else { |
michael@0 | 164 | + SkASSERT(t <= 0xFFFF); |
michael@0 | 165 | + *dstC++ = cache[t >> Gradient_Shader::kCache32Shift]; |
michael@0 | 166 | + } |
michael@0 | 167 | + |
michael@0 | 168 | fx += dx; |
michael@0 | 169 | fy += dy; |
michael@0 | 170 | b += db; |
michael@0 | 171 | -- |
michael@0 | 172 | 1.7.5.4 |
michael@0 | 173 |