michael@0: diff --git a/gfx/skia/src/effects/SkGradientShader.cpp b/gfx/skia/src/effects/SkGradientShader.cpp michael@0: --- a/gfx/skia/src/effects/SkGradientShader.cpp michael@0: +++ b/gfx/skia/src/effects/SkGradientShader.cpp michael@0: @@ -167,16 +167,17 @@ private: michael@0: michael@0: mutable uint16_t* fCache16; // working ptr. If this is NULL, we need to recompute the cache values michael@0: mutable SkPMColor* fCache32; // working ptr. If this is NULL, we need to recompute the cache values michael@0: michael@0: mutable uint16_t* fCache16Storage; // storage for fCache16, allocated on demand michael@0: mutable SkMallocPixelRef* fCache32PixelRef; michael@0: mutable unsigned fCacheAlpha; // the alpha value we used when we computed the cache. larger than 8bits so we can store uninitialized value michael@0: michael@0: + static SkPMColor PremultiplyColor(SkColor c0, U8CPU alpha); michael@0: static void Build16bitCache(uint16_t[], SkColor c0, SkColor c1, int count); michael@0: static void Build32bitCache(SkPMColor[], SkColor c0, SkColor c1, int count, michael@0: U8CPU alpha); michael@0: void setCacheAlpha(U8CPU alpha) const; michael@0: void initCommon(); michael@0: michael@0: typedef SkShader INHERITED; michael@0: }; michael@0: @@ -512,16 +513,31 @@ static inline U8CPU dither_fixed_to_8(Sk michael@0: * For dithering with premultiply, we want to ceiling the alpha component, michael@0: * to ensure that it is always >= any color component. michael@0: */ michael@0: static inline U8CPU dither_ceil_fixed_to_8(SkFixed n) { michael@0: n >>= 8; michael@0: return ((n << 1) - (n | (n >> 8))) >> 8; michael@0: } michael@0: michael@0: +SkPMColor Gradient_Shader::PremultiplyColor(SkColor c0, U8CPU paintAlpha) michael@0: +{ michael@0: + SkFixed a = SkMulDiv255Round(SkColorGetA(c0), paintAlpha); michael@0: + SkFixed r = SkColorGetR(c0); michael@0: + SkFixed g = SkColorGetG(c0); michael@0: + SkFixed b = SkColorGetB(c0); michael@0: + michael@0: + a = SkIntToFixed(a) + 0x8000; michael@0: + r = SkIntToFixed(r) + 0x8000; michael@0: + g = SkIntToFixed(g) + 0x8000; michael@0: + b = SkIntToFixed(b) + 0x8000; michael@0: + michael@0: + return SkPremultiplyARGBInline(a >> 16, r >> 16, g >> 16, b >> 16); michael@0: +} michael@0: + michael@0: void Gradient_Shader::Build32bitCache(SkPMColor cache[], SkColor c0, SkColor c1, michael@0: int count, U8CPU paintAlpha) { michael@0: SkASSERT(count > 1); michael@0: michael@0: // need to apply paintAlpha to our two endpoints michael@0: SkFixed a = SkMulDiv255Round(SkColorGetA(c0), paintAlpha); michael@0: SkFixed da; michael@0: { michael@0: @@ -613,24 +629,24 @@ const uint16_t* Gradient_Shader::getCach michael@0: } michael@0: } michael@0: return fCache16; michael@0: } michael@0: michael@0: const SkPMColor* Gradient_Shader::getCache32() const { michael@0: if (fCache32 == NULL) { michael@0: // double the count for dither entries michael@0: - const int entryCount = kCache32Count * 2; michael@0: + const int entryCount = kCache32Count * 2 + 2; michael@0: const size_t allocSize = sizeof(SkPMColor) * entryCount; michael@0: michael@0: if (NULL == fCache32PixelRef) { michael@0: fCache32PixelRef = SkNEW_ARGS(SkMallocPixelRef, michael@0: (NULL, allocSize, NULL)); michael@0: } michael@0: - fCache32 = (SkPMColor*)fCache32PixelRef->getAddr(); michael@0: + fCache32 = (SkPMColor*)fCache32PixelRef->getAddr() + 1; michael@0: if (fColorCount == 2) { michael@0: Build32bitCache(fCache32, fOrigColors[0], fOrigColors[1], michael@0: kCache32Count, fCacheAlpha); michael@0: } else { michael@0: Rec* rec = fRecs; michael@0: int prevIndex = 0; michael@0: for (int i = 1; i < fColorCount; i++) { michael@0: int nextIndex = SkFixedToFFFF(rec[i].fPos) >> (16 - kCache32Bits); michael@0: @@ -644,28 +660,31 @@ const SkPMColor* Gradient_Shader::getCac michael@0: } michael@0: SkASSERT(prevIndex == kCache32Count - 1); michael@0: } michael@0: michael@0: if (fMapper) { michael@0: SkMallocPixelRef* newPR = SkNEW_ARGS(SkMallocPixelRef, michael@0: (NULL, allocSize, NULL)); michael@0: SkPMColor* linear = fCache32; // just computed linear data michael@0: - SkPMColor* mapped = (SkPMColor*)newPR->getAddr(); // storage for mapped data michael@0: + SkPMColor* mapped = (SkPMColor*)newPR->getAddr() + 1; // storage for mapped data michael@0: SkUnitMapper* map = fMapper; michael@0: for (int i = 0; i < kCache32Count; i++) { michael@0: int index = map->mapUnit16((i << 8) | i) >> 8; michael@0: mapped[i] = linear[index]; michael@0: mapped[i + kCache32Count] = linear[index + kCache32Count]; michael@0: } michael@0: fCache32PixelRef->unref(); michael@0: fCache32PixelRef = newPR; michael@0: - fCache32 = (SkPMColor*)newPR->getAddr(); michael@0: + fCache32 = (SkPMColor*)newPR->getAddr() + 1; michael@0: } michael@0: } michael@0: + //Write the clamp colours into the first and last entries of fCache32 michael@0: + fCache32[-1] = PremultiplyColor(fOrigColors[0], fCacheAlpha); michael@0: + fCache32[kCache32Count * 2] = PremultiplyColor(fOrigColors[fColorCount - 1], fCacheAlpha); michael@0: return fCache32; michael@0: } michael@0: michael@0: /* michael@0: * Because our caller might rebuild the same (logically the same) gradient michael@0: * over and over, we'd like to return exactly the same "bitmap" if possible, michael@0: * allowing the client to utilize a cache of our bitmap (e.g. with a GPU). michael@0: * To do that, we maintain a private cache of built-bitmaps, based on our michael@0: @@ -875,28 +894,38 @@ void Linear_Gradient::shadeSpan(int x, i michael@0: dx = dxStorage[0]; michael@0: } else { michael@0: SkASSERT(fDstToIndexClass == kLinear_MatrixClass); michael@0: dx = SkScalarToFixed(fDstToIndex.getScaleX()); michael@0: } michael@0: michael@0: if (SkFixedNearlyZero(dx)) { michael@0: // we're a vertical gradient, so no change in a span michael@0: - unsigned fi = proc(fx) >> (16 - kCache32Bits); michael@0: - sk_memset32_dither(dstC, cache[toggle + fi], michael@0: - cache[(toggle ^ TOGGLE_MASK) + fi], count); michael@0: + if (proc == clamp_tileproc) { michael@0: + if (fx < 0) { michael@0: + sk_memset32(dstC, cache[-1], count); michael@0: + } else if (fx > 0xFFFF) { michael@0: + sk_memset32(dstC, cache[kCache32Count * 2], count); michael@0: + } else { michael@0: + unsigned fi = proc(fx) >> (16 - kCache32Bits); michael@0: + sk_memset32_dither(dstC, cache[toggle + fi], michael@0: + cache[(toggle ^ TOGGLE_MASK) + fi], count); michael@0: + } michael@0: + } else { michael@0: + unsigned fi = proc(fx) >> (16 - kCache32Bits); michael@0: + sk_memset32_dither(dstC, cache[toggle + fi], michael@0: + cache[(toggle ^ TOGGLE_MASK) + fi], count); michael@0: + } michael@0: } else if (proc == clamp_tileproc) { michael@0: SkClampRange range; michael@0: - range.init(fx, dx, count, 0, 0xFF); michael@0: + range.init(fx, dx, count, cache[-1], cache[kCache32Count * 2]); michael@0: michael@0: if ((count = range.fCount0) > 0) { michael@0: - sk_memset32_dither(dstC, michael@0: - cache[toggle + range.fV0], michael@0: - cache[(toggle ^ TOGGLE_MASK) + range.fV0], michael@0: - count); michael@0: + // Do we really want to dither the clamp values? michael@0: + sk_memset32(dstC, range.fV0, count); michael@0: dstC += count; michael@0: } michael@0: if ((count = range.fCount1) > 0) { michael@0: int unroll = count >> 3; michael@0: fx = range.fFx1; michael@0: for (int i = 0; i < unroll; i++) { michael@0: NO_CHECK_ITER; NO_CHECK_ITER; michael@0: NO_CHECK_ITER; NO_CHECK_ITER; michael@0: @@ -905,20 +934,17 @@ void Linear_Gradient::shadeSpan(int x, i michael@0: } michael@0: if ((count &= 7) > 0) { michael@0: do { michael@0: NO_CHECK_ITER; michael@0: } while (--count != 0); michael@0: } michael@0: } michael@0: if ((count = range.fCount2) > 0) { michael@0: - sk_memset32_dither(dstC, michael@0: - cache[toggle + range.fV1], michael@0: - cache[(toggle ^ TOGGLE_MASK) + range.fV1], michael@0: - count); michael@0: + sk_memset32(dstC, range.fV1, count); michael@0: } michael@0: } else if (proc == mirror_tileproc) { michael@0: do { michael@0: unsigned fi = mirror_8bits(fx >> 8); michael@0: SkASSERT(fi <= 0xFF); michael@0: fx += dx; michael@0: *dstC++ = cache[toggle + fi]; michael@0: toggle ^= TOGGLE_MASK; michael@0: @@ -1670,19 +1699,24 @@ public: michael@0: } michael@0: SkScalar b = (SkScalarMul(fDiff.fX, fx) + michael@0: SkScalarMul(fDiff.fY, fy) - fStartRadius) * 2; michael@0: SkScalar db = (SkScalarMul(fDiff.fX, dx) + michael@0: SkScalarMul(fDiff.fY, dy)) * 2; michael@0: if (proc == clamp_tileproc) { michael@0: for (; count > 0; --count) { michael@0: SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, posRoot); michael@0: - SkFixed index = SkClampMax(t, 0xFFFF); michael@0: - SkASSERT(index <= 0xFFFF); michael@0: - *dstC++ = cache[index >> (16 - kCache32Bits)]; michael@0: + if (t < 0) { michael@0: + *dstC++ = cache[-1]; michael@0: + } else if (t > 0xFFFF) { michael@0: + *dstC++ = cache[kCache32Count * 2]; michael@0: + } else { michael@0: + SkASSERT(t <= 0xFFFF); michael@0: + *dstC++ = cache[t >> (16 - kCache32Bits)]; michael@0: + } michael@0: fx += dx; michael@0: fy += dy; michael@0: b += db; michael@0: } michael@0: } else if (proc == mirror_tileproc) { michael@0: for (; count > 0; --count) { michael@0: SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, posRoot); michael@0: SkFixed index = mirror_tileproc(t);