gfx/skia/patches/archive/fix-gradient-clamp.patch

Wed, 31 Dec 2014 06:09:35 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:09:35 +0100
changeset 0
6474c204b198
permissions
-rw-r--r--

Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.

michael@0 1 diff --git a/gfx/skia/src/effects/SkGradientShader.cpp b/gfx/skia/src/effects/SkGradientShader.cpp
michael@0 2 --- a/gfx/skia/src/effects/SkGradientShader.cpp
michael@0 3 +++ b/gfx/skia/src/effects/SkGradientShader.cpp
michael@0 4 @@ -167,16 +167,17 @@ private:
michael@0 5
michael@0 6 mutable uint16_t* fCache16; // working ptr. If this is NULL, we need to recompute the cache values
michael@0 7 mutable SkPMColor* fCache32; // working ptr. If this is NULL, we need to recompute the cache values
michael@0 8
michael@0 9 mutable uint16_t* fCache16Storage; // storage for fCache16, allocated on demand
michael@0 10 mutable SkMallocPixelRef* fCache32PixelRef;
michael@0 11 mutable unsigned fCacheAlpha; // the alpha value we used when we computed the cache. larger than 8bits so we can store uninitialized value
michael@0 12
michael@0 13 + static SkPMColor PremultiplyColor(SkColor c0, U8CPU alpha);
michael@0 14 static void Build16bitCache(uint16_t[], SkColor c0, SkColor c1, int count);
michael@0 15 static void Build32bitCache(SkPMColor[], SkColor c0, SkColor c1, int count,
michael@0 16 U8CPU alpha);
michael@0 17 void setCacheAlpha(U8CPU alpha) const;
michael@0 18 void initCommon();
michael@0 19
michael@0 20 typedef SkShader INHERITED;
michael@0 21 };
michael@0 22 @@ -512,16 +513,31 @@ static inline U8CPU dither_fixed_to_8(Sk
michael@0 23 * For dithering with premultiply, we want to ceiling the alpha component,
michael@0 24 * to ensure that it is always >= any color component.
michael@0 25 */
michael@0 26 static inline U8CPU dither_ceil_fixed_to_8(SkFixed n) {
michael@0 27 n >>= 8;
michael@0 28 return ((n << 1) - (n | (n >> 8))) >> 8;
michael@0 29 }
michael@0 30
michael@0 31 +SkPMColor Gradient_Shader::PremultiplyColor(SkColor c0, U8CPU paintAlpha)
michael@0 32 +{
michael@0 33 + SkFixed a = SkMulDiv255Round(SkColorGetA(c0), paintAlpha);
michael@0 34 + SkFixed r = SkColorGetR(c0);
michael@0 35 + SkFixed g = SkColorGetG(c0);
michael@0 36 + SkFixed b = SkColorGetB(c0);
michael@0 37 +
michael@0 38 + a = SkIntToFixed(a) + 0x8000;
michael@0 39 + r = SkIntToFixed(r) + 0x8000;
michael@0 40 + g = SkIntToFixed(g) + 0x8000;
michael@0 41 + b = SkIntToFixed(b) + 0x8000;
michael@0 42 +
michael@0 43 + return SkPremultiplyARGBInline(a >> 16, r >> 16, g >> 16, b >> 16);
michael@0 44 +}
michael@0 45 +
michael@0 46 void Gradient_Shader::Build32bitCache(SkPMColor cache[], SkColor c0, SkColor c1,
michael@0 47 int count, U8CPU paintAlpha) {
michael@0 48 SkASSERT(count > 1);
michael@0 49
michael@0 50 // need to apply paintAlpha to our two endpoints
michael@0 51 SkFixed a = SkMulDiv255Round(SkColorGetA(c0), paintAlpha);
michael@0 52 SkFixed da;
michael@0 53 {
michael@0 54 @@ -613,24 +629,24 @@ const uint16_t* Gradient_Shader::getCach
michael@0 55 }
michael@0 56 }
michael@0 57 return fCache16;
michael@0 58 }
michael@0 59
michael@0 60 const SkPMColor* Gradient_Shader::getCache32() const {
michael@0 61 if (fCache32 == NULL) {
michael@0 62 // double the count for dither entries
michael@0 63 - const int entryCount = kCache32Count * 2;
michael@0 64 + const int entryCount = kCache32Count * 2 + 2;
michael@0 65 const size_t allocSize = sizeof(SkPMColor) * entryCount;
michael@0 66
michael@0 67 if (NULL == fCache32PixelRef) {
michael@0 68 fCache32PixelRef = SkNEW_ARGS(SkMallocPixelRef,
michael@0 69 (NULL, allocSize, NULL));
michael@0 70 }
michael@0 71 - fCache32 = (SkPMColor*)fCache32PixelRef->getAddr();
michael@0 72 + fCache32 = (SkPMColor*)fCache32PixelRef->getAddr() + 1;
michael@0 73 if (fColorCount == 2) {
michael@0 74 Build32bitCache(fCache32, fOrigColors[0], fOrigColors[1],
michael@0 75 kCache32Count, fCacheAlpha);
michael@0 76 } else {
michael@0 77 Rec* rec = fRecs;
michael@0 78 int prevIndex = 0;
michael@0 79 for (int i = 1; i < fColorCount; i++) {
michael@0 80 int nextIndex = SkFixedToFFFF(rec[i].fPos) >> (16 - kCache32Bits);
michael@0 81 @@ -644,28 +660,31 @@ const SkPMColor* Gradient_Shader::getCac
michael@0 82 }
michael@0 83 SkASSERT(prevIndex == kCache32Count - 1);
michael@0 84 }
michael@0 85
michael@0 86 if (fMapper) {
michael@0 87 SkMallocPixelRef* newPR = SkNEW_ARGS(SkMallocPixelRef,
michael@0 88 (NULL, allocSize, NULL));
michael@0 89 SkPMColor* linear = fCache32; // just computed linear data
michael@0 90 - SkPMColor* mapped = (SkPMColor*)newPR->getAddr(); // storage for mapped data
michael@0 91 + SkPMColor* mapped = (SkPMColor*)newPR->getAddr() + 1; // storage for mapped data
michael@0 92 SkUnitMapper* map = fMapper;
michael@0 93 for (int i = 0; i < kCache32Count; i++) {
michael@0 94 int index = map->mapUnit16((i << 8) | i) >> 8;
michael@0 95 mapped[i] = linear[index];
michael@0 96 mapped[i + kCache32Count] = linear[index + kCache32Count];
michael@0 97 }
michael@0 98 fCache32PixelRef->unref();
michael@0 99 fCache32PixelRef = newPR;
michael@0 100 - fCache32 = (SkPMColor*)newPR->getAddr();
michael@0 101 + fCache32 = (SkPMColor*)newPR->getAddr() + 1;
michael@0 102 }
michael@0 103 }
michael@0 104 + //Write the clamp colours into the first and last entries of fCache32
michael@0 105 + fCache32[-1] = PremultiplyColor(fOrigColors[0], fCacheAlpha);
michael@0 106 + fCache32[kCache32Count * 2] = PremultiplyColor(fOrigColors[fColorCount - 1], fCacheAlpha);
michael@0 107 return fCache32;
michael@0 108 }
michael@0 109
michael@0 110 /*
michael@0 111 * Because our caller might rebuild the same (logically the same) gradient
michael@0 112 * over and over, we'd like to return exactly the same "bitmap" if possible,
michael@0 113 * allowing the client to utilize a cache of our bitmap (e.g. with a GPU).
michael@0 114 * To do that, we maintain a private cache of built-bitmaps, based on our
michael@0 115 @@ -875,28 +894,38 @@ void Linear_Gradient::shadeSpan(int x, i
michael@0 116 dx = dxStorage[0];
michael@0 117 } else {
michael@0 118 SkASSERT(fDstToIndexClass == kLinear_MatrixClass);
michael@0 119 dx = SkScalarToFixed(fDstToIndex.getScaleX());
michael@0 120 }
michael@0 121
michael@0 122 if (SkFixedNearlyZero(dx)) {
michael@0 123 // we're a vertical gradient, so no change in a span
michael@0 124 - unsigned fi = proc(fx) >> (16 - kCache32Bits);
michael@0 125 - sk_memset32_dither(dstC, cache[toggle + fi],
michael@0 126 - cache[(toggle ^ TOGGLE_MASK) + fi], count);
michael@0 127 + if (proc == clamp_tileproc) {
michael@0 128 + if (fx < 0) {
michael@0 129 + sk_memset32(dstC, cache[-1], count);
michael@0 130 + } else if (fx > 0xFFFF) {
michael@0 131 + sk_memset32(dstC, cache[kCache32Count * 2], count);
michael@0 132 + } else {
michael@0 133 + unsigned fi = proc(fx) >> (16 - kCache32Bits);
michael@0 134 + sk_memset32_dither(dstC, cache[toggle + fi],
michael@0 135 + cache[(toggle ^ TOGGLE_MASK) + fi], count);
michael@0 136 + }
michael@0 137 + } else {
michael@0 138 + unsigned fi = proc(fx) >> (16 - kCache32Bits);
michael@0 139 + sk_memset32_dither(dstC, cache[toggle + fi],
michael@0 140 + cache[(toggle ^ TOGGLE_MASK) + fi], count);
michael@0 141 + }
michael@0 142 } else if (proc == clamp_tileproc) {
michael@0 143 SkClampRange range;
michael@0 144 - range.init(fx, dx, count, 0, 0xFF);
michael@0 145 + range.init(fx, dx, count, cache[-1], cache[kCache32Count * 2]);
michael@0 146
michael@0 147 if ((count = range.fCount0) > 0) {
michael@0 148 - sk_memset32_dither(dstC,
michael@0 149 - cache[toggle + range.fV0],
michael@0 150 - cache[(toggle ^ TOGGLE_MASK) + range.fV0],
michael@0 151 - count);
michael@0 152 + // Do we really want to dither the clamp values?
michael@0 153 + sk_memset32(dstC, range.fV0, count);
michael@0 154 dstC += count;
michael@0 155 }
michael@0 156 if ((count = range.fCount1) > 0) {
michael@0 157 int unroll = count >> 3;
michael@0 158 fx = range.fFx1;
michael@0 159 for (int i = 0; i < unroll; i++) {
michael@0 160 NO_CHECK_ITER; NO_CHECK_ITER;
michael@0 161 NO_CHECK_ITER; NO_CHECK_ITER;
michael@0 162 @@ -905,20 +934,17 @@ void Linear_Gradient::shadeSpan(int x, i
michael@0 163 }
michael@0 164 if ((count &= 7) > 0) {
michael@0 165 do {
michael@0 166 NO_CHECK_ITER;
michael@0 167 } while (--count != 0);
michael@0 168 }
michael@0 169 }
michael@0 170 if ((count = range.fCount2) > 0) {
michael@0 171 - sk_memset32_dither(dstC,
michael@0 172 - cache[toggle + range.fV1],
michael@0 173 - cache[(toggle ^ TOGGLE_MASK) + range.fV1],
michael@0 174 - count);
michael@0 175 + sk_memset32(dstC, range.fV1, count);
michael@0 176 }
michael@0 177 } else if (proc == mirror_tileproc) {
michael@0 178 do {
michael@0 179 unsigned fi = mirror_8bits(fx >> 8);
michael@0 180 SkASSERT(fi <= 0xFF);
michael@0 181 fx += dx;
michael@0 182 *dstC++ = cache[toggle + fi];
michael@0 183 toggle ^= TOGGLE_MASK;
michael@0 184 @@ -1670,19 +1699,24 @@ public:
michael@0 185 }
michael@0 186 SkScalar b = (SkScalarMul(fDiff.fX, fx) +
michael@0 187 SkScalarMul(fDiff.fY, fy) - fStartRadius) * 2;
michael@0 188 SkScalar db = (SkScalarMul(fDiff.fX, dx) +
michael@0 189 SkScalarMul(fDiff.fY, dy)) * 2;
michael@0 190 if (proc == clamp_tileproc) {
michael@0 191 for (; count > 0; --count) {
michael@0 192 SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, posRoot);
michael@0 193 - SkFixed index = SkClampMax(t, 0xFFFF);
michael@0 194 - SkASSERT(index <= 0xFFFF);
michael@0 195 - *dstC++ = cache[index >> (16 - kCache32Bits)];
michael@0 196 + if (t < 0) {
michael@0 197 + *dstC++ = cache[-1];
michael@0 198 + } else if (t > 0xFFFF) {
michael@0 199 + *dstC++ = cache[kCache32Count * 2];
michael@0 200 + } else {
michael@0 201 + SkASSERT(t <= 0xFFFF);
michael@0 202 + *dstC++ = cache[t >> (16 - kCache32Bits)];
michael@0 203 + }
michael@0 204 fx += dx;
michael@0 205 fy += dy;
michael@0 206 b += db;
michael@0 207 }
michael@0 208 } else if (proc == mirror_tileproc) {
michael@0 209 for (; count > 0; --count) {
michael@0 210 SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, posRoot);
michael@0 211 SkFixed index = mirror_tileproc(t);

mercurial