1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/gfx/skia/trunk/src/gpu/GrContext.cpp Wed Dec 31 06:09:35 2014 +0100 1.3 @@ -0,0 +1,1815 @@ 1.4 + 1.5 +/* 1.6 + * Copyright 2011 Google Inc. 1.7 + * 1.8 + * Use of this source code is governed by a BSD-style license that can be 1.9 + * found in the LICENSE file. 1.10 + */ 1.11 + 1.12 + 1.13 +#include "GrContext.h" 1.14 + 1.15 +#include "effects/GrSingleTextureEffect.h" 1.16 +#include "effects/GrConfigConversionEffect.h" 1.17 + 1.18 +#include "GrAARectRenderer.h" 1.19 +#include "GrBufferAllocPool.h" 1.20 +#include "GrGpu.h" 1.21 +#include "GrDrawTargetCaps.h" 1.22 +#include "GrIndexBuffer.h" 1.23 +#include "GrInOrderDrawBuffer.h" 1.24 +#include "GrOvalRenderer.h" 1.25 +#include "GrPathRenderer.h" 1.26 +#include "GrPathUtils.h" 1.27 +#include "GrResourceCache.h" 1.28 +#include "GrSoftwarePathRenderer.h" 1.29 +#include "GrStencilBuffer.h" 1.30 +#include "GrTextStrike.h" 1.31 +#include "SkRTConf.h" 1.32 +#include "SkRRect.h" 1.33 +#include "SkStrokeRec.h" 1.34 +#include "SkTLazy.h" 1.35 +#include "SkTLS.h" 1.36 +#include "SkTrace.h" 1.37 + 1.38 +// It can be useful to set this to false to test whether a bug is caused by using the 1.39 +// InOrderDrawBuffer, to compare performance of using/not using InOrderDrawBuffer, or to make 1.40 +// debugging simpler. 1.41 +SK_CONF_DECLARE(bool, c_Defer, "gpu.deferContext", true, 1.42 + "Defers rendering in GrContext via GrInOrderDrawBuffer."); 1.43 + 1.44 +#define BUFFERED_DRAW (c_Defer ? kYes_BufferedDraw : kNo_BufferedDraw) 1.45 + 1.46 +#ifdef SK_DEBUG 1.47 + // change this to a 1 to see notifications when partial coverage fails 1.48 + #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0 1.49 +#else 1.50 + #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0 1.51 +#endif 1.52 + 1.53 +static const size_t MAX_RESOURCE_CACHE_COUNT = GR_DEFAULT_RESOURCE_CACHE_COUNT_LIMIT; 1.54 +static const size_t MAX_RESOURCE_CACHE_BYTES = GR_DEFAULT_RESOURCE_CACHE_MB_LIMIT * 1024 * 1024; 1.55 + 1.56 +static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 15; 1.57 +static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4; 1.58 + 1.59 +static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = 1 << 11; 1.60 +static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = 4; 1.61 + 1.62 +#define ASSERT_OWNED_RESOURCE(R) SkASSERT(!(R) || (R)->getContext() == this) 1.63 + 1.64 +// Glorified typedef to avoid including GrDrawState.h in GrContext.h 1.65 +class GrContext::AutoRestoreEffects : public GrDrawState::AutoRestoreEffects {}; 1.66 + 1.67 +class GrContext::AutoCheckFlush { 1.68 +public: 1.69 + AutoCheckFlush(GrContext* context) : fContext(context) { SkASSERT(NULL != context); } 1.70 + 1.71 + ~AutoCheckFlush() { 1.72 + if (fContext->fFlushToReduceCacheSize) { 1.73 + fContext->flush(); 1.74 + } 1.75 + } 1.76 + 1.77 +private: 1.78 + GrContext* fContext; 1.79 +}; 1.80 + 1.81 +GrContext* GrContext::Create(GrBackend backend, GrBackendContext backendContext) { 1.82 + GrContext* context = SkNEW(GrContext); 1.83 + if (context->init(backend, backendContext)) { 1.84 + return context; 1.85 + } else { 1.86 + context->unref(); 1.87 + return NULL; 1.88 + } 1.89 +} 1.90 + 1.91 +GrContext::GrContext() { 1.92 + fDrawState = NULL; 1.93 + fGpu = NULL; 1.94 + fClip = NULL; 1.95 + fPathRendererChain = NULL; 1.96 + fSoftwarePathRenderer = NULL; 1.97 + fTextureCache = NULL; 1.98 + fFontCache = NULL; 1.99 + fDrawBuffer = NULL; 1.100 + fDrawBufferVBAllocPool = NULL; 1.101 + fDrawBufferIBAllocPool = NULL; 1.102 + fFlushToReduceCacheSize = false; 1.103 + fAARectRenderer = NULL; 1.104 + fOvalRenderer = NULL; 1.105 + fViewMatrix.reset(); 1.106 + fMaxTextureSizeOverride = 1 << 20; 1.107 +} 1.108 + 1.109 +bool GrContext::init(GrBackend backend, GrBackendContext backendContext) { 1.110 + SkASSERT(NULL == fGpu); 1.111 + 1.112 + fGpu = GrGpu::Create(backend, backendContext, this); 1.113 + if (NULL == fGpu) { 1.114 + return false; 1.115 + } 1.116 + 1.117 + fDrawState = SkNEW(GrDrawState); 1.118 + fGpu->setDrawState(fDrawState); 1.119 + 1.120 + fTextureCache = SkNEW_ARGS(GrResourceCache, 1.121 + (MAX_RESOURCE_CACHE_COUNT, 1.122 + MAX_RESOURCE_CACHE_BYTES)); 1.123 + fTextureCache->setOverbudgetCallback(OverbudgetCB, this); 1.124 + 1.125 + fFontCache = SkNEW_ARGS(GrFontCache, (fGpu)); 1.126 + 1.127 + fLastDrawWasBuffered = kNo_BufferedDraw; 1.128 + 1.129 + fAARectRenderer = SkNEW(GrAARectRenderer); 1.130 + fOvalRenderer = SkNEW(GrOvalRenderer); 1.131 + 1.132 + fDidTestPMConversions = false; 1.133 + 1.134 + this->setupDrawBuffer(); 1.135 + 1.136 + return true; 1.137 +} 1.138 + 1.139 +GrContext::~GrContext() { 1.140 + if (NULL == fGpu) { 1.141 + return; 1.142 + } 1.143 + 1.144 + this->flush(); 1.145 + 1.146 + for (int i = 0; i < fCleanUpData.count(); ++i) { 1.147 + (*fCleanUpData[i].fFunc)(this, fCleanUpData[i].fInfo); 1.148 + } 1.149 + 1.150 + // Since the gpu can hold scratch textures, give it a chance to let go 1.151 + // of them before freeing the texture cache 1.152 + fGpu->purgeResources(); 1.153 + 1.154 + delete fTextureCache; 1.155 + fTextureCache = NULL; 1.156 + delete fFontCache; 1.157 + delete fDrawBuffer; 1.158 + delete fDrawBufferVBAllocPool; 1.159 + delete fDrawBufferIBAllocPool; 1.160 + 1.161 + fAARectRenderer->unref(); 1.162 + fOvalRenderer->unref(); 1.163 + 1.164 + fGpu->unref(); 1.165 + SkSafeUnref(fPathRendererChain); 1.166 + SkSafeUnref(fSoftwarePathRenderer); 1.167 + fDrawState->unref(); 1.168 +} 1.169 + 1.170 +void GrContext::contextLost() { 1.171 + this->contextDestroyed(); 1.172 + this->setupDrawBuffer(); 1.173 +} 1.174 + 1.175 +void GrContext::contextDestroyed() { 1.176 + // abandon first to so destructors 1.177 + // don't try to free the resources in the API. 1.178 + fGpu->abandonResources(); 1.179 + 1.180 + // a path renderer may be holding onto resources that 1.181 + // are now unusable 1.182 + SkSafeSetNull(fPathRendererChain); 1.183 + SkSafeSetNull(fSoftwarePathRenderer); 1.184 + 1.185 + delete fDrawBuffer; 1.186 + fDrawBuffer = NULL; 1.187 + 1.188 + delete fDrawBufferVBAllocPool; 1.189 + fDrawBufferVBAllocPool = NULL; 1.190 + 1.191 + delete fDrawBufferIBAllocPool; 1.192 + fDrawBufferIBAllocPool = NULL; 1.193 + 1.194 + fAARectRenderer->reset(); 1.195 + fOvalRenderer->reset(); 1.196 + 1.197 + fTextureCache->purgeAllUnlocked(); 1.198 + 1.199 + fFontCache->freeAll(); 1.200 + fGpu->markContextDirty(); 1.201 +} 1.202 + 1.203 +void GrContext::resetContext(uint32_t state) { 1.204 + fGpu->markContextDirty(state); 1.205 +} 1.206 + 1.207 +void GrContext::freeGpuResources() { 1.208 + this->flush(); 1.209 + 1.210 + fGpu->purgeResources(); 1.211 + 1.212 + fAARectRenderer->reset(); 1.213 + fOvalRenderer->reset(); 1.214 + 1.215 + fTextureCache->purgeAllUnlocked(); 1.216 + fFontCache->freeAll(); 1.217 + // a path renderer may be holding onto resources 1.218 + SkSafeSetNull(fPathRendererChain); 1.219 + SkSafeSetNull(fSoftwarePathRenderer); 1.220 +} 1.221 + 1.222 +size_t GrContext::getGpuTextureCacheBytes() const { 1.223 + return fTextureCache->getCachedResourceBytes(); 1.224 +} 1.225 + 1.226 +//////////////////////////////////////////////////////////////////////////////// 1.227 + 1.228 +GrTexture* GrContext::findAndRefTexture(const GrTextureDesc& desc, 1.229 + const GrCacheID& cacheID, 1.230 + const GrTextureParams* params) { 1.231 + GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID); 1.232 + GrResource* resource = fTextureCache->find(resourceKey); 1.233 + SkSafeRef(resource); 1.234 + return static_cast<GrTexture*>(resource); 1.235 +} 1.236 + 1.237 +bool GrContext::isTextureInCache(const GrTextureDesc& desc, 1.238 + const GrCacheID& cacheID, 1.239 + const GrTextureParams* params) const { 1.240 + GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID); 1.241 + return fTextureCache->hasKey(resourceKey); 1.242 +} 1.243 + 1.244 +void GrContext::addStencilBuffer(GrStencilBuffer* sb) { 1.245 + ASSERT_OWNED_RESOURCE(sb); 1.246 + 1.247 + GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(sb->width(), 1.248 + sb->height(), 1.249 + sb->numSamples()); 1.250 + fTextureCache->addResource(resourceKey, sb); 1.251 +} 1.252 + 1.253 +GrStencilBuffer* GrContext::findStencilBuffer(int width, int height, 1.254 + int sampleCnt) { 1.255 + GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(width, 1.256 + height, 1.257 + sampleCnt); 1.258 + GrResource* resource = fTextureCache->find(resourceKey); 1.259 + return static_cast<GrStencilBuffer*>(resource); 1.260 +} 1.261 + 1.262 +static void stretchImage(void* dst, 1.263 + int dstW, 1.264 + int dstH, 1.265 + void* src, 1.266 + int srcW, 1.267 + int srcH, 1.268 + size_t bpp) { 1.269 + GrFixed dx = (srcW << 16) / dstW; 1.270 + GrFixed dy = (srcH << 16) / dstH; 1.271 + 1.272 + GrFixed y = dy >> 1; 1.273 + 1.274 + size_t dstXLimit = dstW*bpp; 1.275 + for (int j = 0; j < dstH; ++j) { 1.276 + GrFixed x = dx >> 1; 1.277 + void* srcRow = (uint8_t*)src + (y>>16)*srcW*bpp; 1.278 + void* dstRow = (uint8_t*)dst + j*dstW*bpp; 1.279 + for (size_t i = 0; i < dstXLimit; i += bpp) { 1.280 + memcpy((uint8_t*) dstRow + i, 1.281 + (uint8_t*) srcRow + (x>>16)*bpp, 1.282 + bpp); 1.283 + x += dx; 1.284 + } 1.285 + y += dy; 1.286 + } 1.287 +} 1.288 + 1.289 +namespace { 1.290 + 1.291 +// position + local coordinate 1.292 +extern const GrVertexAttrib gVertexAttribs[] = { 1.293 + {kVec2f_GrVertexAttribType, 0, kPosition_GrVertexAttribBinding}, 1.294 + {kVec2f_GrVertexAttribType, sizeof(GrPoint), kLocalCoord_GrVertexAttribBinding} 1.295 +}; 1.296 + 1.297 +}; 1.298 + 1.299 +// The desired texture is NPOT and tiled but that isn't supported by 1.300 +// the current hardware. Resize the texture to be a POT 1.301 +GrTexture* GrContext::createResizedTexture(const GrTextureDesc& desc, 1.302 + const GrCacheID& cacheID, 1.303 + void* srcData, 1.304 + size_t rowBytes, 1.305 + bool filter) { 1.306 + SkAutoTUnref<GrTexture> clampedTexture(this->findAndRefTexture(desc, cacheID, NULL)); 1.307 + if (NULL == clampedTexture) { 1.308 + clampedTexture.reset(this->createTexture(NULL, desc, cacheID, srcData, rowBytes)); 1.309 + 1.310 + if (NULL == clampedTexture) { 1.311 + return NULL; 1.312 + } 1.313 + } 1.314 + 1.315 + GrTextureDesc rtDesc = desc; 1.316 + rtDesc.fFlags = rtDesc.fFlags | 1.317 + kRenderTarget_GrTextureFlagBit | 1.318 + kNoStencil_GrTextureFlagBit; 1.319 + rtDesc.fWidth = GrNextPow2(desc.fWidth); 1.320 + rtDesc.fHeight = GrNextPow2(desc.fHeight); 1.321 + 1.322 + GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0); 1.323 + 1.324 + if (NULL != texture) { 1.325 + GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit); 1.326 + GrDrawState* drawState = fGpu->drawState(); 1.327 + drawState->setRenderTarget(texture->asRenderTarget()); 1.328 + 1.329 + // if filtering is not desired then we want to ensure all 1.330 + // texels in the resampled image are copies of texels from 1.331 + // the original. 1.332 + GrTextureParams params(SkShader::kClamp_TileMode, filter ? GrTextureParams::kBilerp_FilterMode : 1.333 + GrTextureParams::kNone_FilterMode); 1.334 + drawState->addColorTextureEffect(clampedTexture, SkMatrix::I(), params); 1.335 + 1.336 + drawState->setVertexAttribs<gVertexAttribs>(SK_ARRAY_COUNT(gVertexAttribs)); 1.337 + 1.338 + GrDrawTarget::AutoReleaseGeometry arg(fGpu, 4, 0); 1.339 + 1.340 + if (arg.succeeded()) { 1.341 + GrPoint* verts = (GrPoint*) arg.vertices(); 1.342 + verts[0].setIRectFan(0, 0, texture->width(), texture->height(), 2 * sizeof(GrPoint)); 1.343 + verts[1].setIRectFan(0, 0, 1, 1, 2 * sizeof(GrPoint)); 1.344 + fGpu->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, 4); 1.345 + } 1.346 + } else { 1.347 + // TODO: Our CPU stretch doesn't filter. But we create separate 1.348 + // stretched textures when the texture params is either filtered or 1.349 + // not. Either implement filtered stretch blit on CPU or just create 1.350 + // one when FBO case fails. 1.351 + 1.352 + rtDesc.fFlags = kNone_GrTextureFlags; 1.353 + // no longer need to clamp at min RT size. 1.354 + rtDesc.fWidth = GrNextPow2(desc.fWidth); 1.355 + rtDesc.fHeight = GrNextPow2(desc.fHeight); 1.356 + size_t bpp = GrBytesPerPixel(desc.fConfig); 1.357 + SkAutoSMalloc<128*128*4> stretchedPixels(bpp * rtDesc.fWidth * rtDesc.fHeight); 1.358 + stretchImage(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight, 1.359 + srcData, desc.fWidth, desc.fHeight, bpp); 1.360 + 1.361 + size_t stretchedRowBytes = rtDesc.fWidth * bpp; 1.362 + 1.363 + SkDEBUGCODE(GrTexture* texture = )fGpu->createTexture(rtDesc, stretchedPixels.get(), 1.364 + stretchedRowBytes); 1.365 + SkASSERT(NULL != texture); 1.366 + } 1.367 + 1.368 + return texture; 1.369 +} 1.370 + 1.371 +GrTexture* GrContext::createTexture(const GrTextureParams* params, 1.372 + const GrTextureDesc& desc, 1.373 + const GrCacheID& cacheID, 1.374 + void* srcData, 1.375 + size_t rowBytes, 1.376 + GrResourceKey* cacheKey) { 1.377 + SK_TRACE_EVENT0("GrContext::createTexture"); 1.378 + 1.379 + GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID); 1.380 + 1.381 + GrTexture* texture; 1.382 + if (GrTexture::NeedsResizing(resourceKey)) { 1.383 + texture = this->createResizedTexture(desc, cacheID, 1.384 + srcData, rowBytes, 1.385 + GrTexture::NeedsBilerp(resourceKey)); 1.386 + } else { 1.387 + texture= fGpu->createTexture(desc, srcData, rowBytes); 1.388 + } 1.389 + 1.390 + if (NULL != texture) { 1.391 + // Adding a resource could put us overbudget. Try to free up the 1.392 + // necessary space before adding it. 1.393 + fTextureCache->purgeAsNeeded(1, texture->sizeInBytes()); 1.394 + fTextureCache->addResource(resourceKey, texture); 1.395 + 1.396 + if (NULL != cacheKey) { 1.397 + *cacheKey = resourceKey; 1.398 + } 1.399 + } 1.400 + 1.401 + return texture; 1.402 +} 1.403 + 1.404 +static GrTexture* create_scratch_texture(GrGpu* gpu, 1.405 + GrResourceCache* textureCache, 1.406 + const GrTextureDesc& desc) { 1.407 + GrTexture* texture = gpu->createTexture(desc, NULL, 0); 1.408 + if (NULL != texture) { 1.409 + GrResourceKey key = GrTexture::ComputeScratchKey(texture->desc()); 1.410 + // Adding a resource could put us overbudget. Try to free up the 1.411 + // necessary space before adding it. 1.412 + textureCache->purgeAsNeeded(1, texture->sizeInBytes()); 1.413 + // Make the resource exclusive so future 'find' calls don't return it 1.414 + textureCache->addResource(key, texture, GrResourceCache::kHide_OwnershipFlag); 1.415 + } 1.416 + return texture; 1.417 +} 1.418 + 1.419 +GrTexture* GrContext::lockAndRefScratchTexture(const GrTextureDesc& inDesc, ScratchTexMatch match) { 1.420 + 1.421 + SkASSERT((inDesc.fFlags & kRenderTarget_GrTextureFlagBit) || 1.422 + !(inDesc.fFlags & kNoStencil_GrTextureFlagBit)); 1.423 + 1.424 + // Renderable A8 targets are not universally supported (e.g., not on ANGLE) 1.425 + SkASSERT(this->isConfigRenderable(kAlpha_8_GrPixelConfig, inDesc.fSampleCnt > 0) || 1.426 + !(inDesc.fFlags & kRenderTarget_GrTextureFlagBit) || 1.427 + (inDesc.fConfig != kAlpha_8_GrPixelConfig)); 1.428 + 1.429 + if (!fGpu->caps()->reuseScratchTextures() && 1.430 + !(inDesc.fFlags & kRenderTarget_GrTextureFlagBit)) { 1.431 + // If we're never recycling this texture we can always make it the right size 1.432 + return create_scratch_texture(fGpu, fTextureCache, inDesc); 1.433 + } 1.434 + 1.435 + GrTextureDesc desc = inDesc; 1.436 + 1.437 + if (kApprox_ScratchTexMatch == match) { 1.438 + // bin by pow2 with a reasonable min 1.439 + static const int MIN_SIZE = 16; 1.440 + desc.fWidth = GrMax(MIN_SIZE, GrNextPow2(desc.fWidth)); 1.441 + desc.fHeight = GrMax(MIN_SIZE, GrNextPow2(desc.fHeight)); 1.442 + } 1.443 + 1.444 + GrResource* resource = NULL; 1.445 + int origWidth = desc.fWidth; 1.446 + int origHeight = desc.fHeight; 1.447 + 1.448 + do { 1.449 + GrResourceKey key = GrTexture::ComputeScratchKey(desc); 1.450 + // Ensure we have exclusive access to the texture so future 'find' calls don't return it 1.451 + resource = fTextureCache->find(key, GrResourceCache::kHide_OwnershipFlag); 1.452 + if (NULL != resource) { 1.453 + resource->ref(); 1.454 + break; 1.455 + } 1.456 + if (kExact_ScratchTexMatch == match) { 1.457 + break; 1.458 + } 1.459 + // We had a cache miss and we are in approx mode, relax the fit of the flags. 1.460 + 1.461 + // We no longer try to reuse textures that were previously used as render targets in 1.462 + // situations where no RT is needed; doing otherwise can confuse the video driver and 1.463 + // cause significant performance problems in some cases. 1.464 + if (desc.fFlags & kNoStencil_GrTextureFlagBit) { 1.465 + desc.fFlags = desc.fFlags & ~kNoStencil_GrTextureFlagBit; 1.466 + } else { 1.467 + break; 1.468 + } 1.469 + 1.470 + } while (true); 1.471 + 1.472 + if (NULL == resource) { 1.473 + desc.fFlags = inDesc.fFlags; 1.474 + desc.fWidth = origWidth; 1.475 + desc.fHeight = origHeight; 1.476 + resource = create_scratch_texture(fGpu, fTextureCache, desc); 1.477 + } 1.478 + 1.479 + return static_cast<GrTexture*>(resource); 1.480 +} 1.481 + 1.482 +void GrContext::addExistingTextureToCache(GrTexture* texture) { 1.483 + 1.484 + if (NULL == texture) { 1.485 + return; 1.486 + } 1.487 + 1.488 + // This texture should already have a cache entry since it was once 1.489 + // attached 1.490 + SkASSERT(NULL != texture->getCacheEntry()); 1.491 + 1.492 + // Conceptually, the cache entry is going to assume responsibility 1.493 + // for the creation ref. Assert refcnt == 1. 1.494 + SkASSERT(texture->unique()); 1.495 + 1.496 + if (fGpu->caps()->reuseScratchTextures() || NULL != texture->asRenderTarget()) { 1.497 + // Since this texture came from an AutoScratchTexture it should 1.498 + // still be in the exclusive pile. Recycle it. 1.499 + fTextureCache->makeNonExclusive(texture->getCacheEntry()); 1.500 + this->purgeCache(); 1.501 + } else if (texture->getDeferredRefCount() <= 0) { 1.502 + // When we aren't reusing textures we know this scratch texture 1.503 + // will never be reused and would be just wasting time in the cache 1.504 + fTextureCache->makeNonExclusive(texture->getCacheEntry()); 1.505 + fTextureCache->deleteResource(texture->getCacheEntry()); 1.506 + } else { 1.507 + // In this case (fDeferredRefCount > 0) but the cache is the only 1.508 + // one holding a real ref. Mark the object so when the deferred 1.509 + // ref count goes to 0 the texture will be deleted (remember 1.510 + // in this code path scratch textures aren't getting reused). 1.511 + texture->setNeedsDeferredUnref(); 1.512 + } 1.513 +} 1.514 + 1.515 + 1.516 +void GrContext::unlockScratchTexture(GrTexture* texture) { 1.517 + ASSERT_OWNED_RESOURCE(texture); 1.518 + SkASSERT(NULL != texture->getCacheEntry()); 1.519 + 1.520 + // If this is a scratch texture we detached it from the cache 1.521 + // while it was locked (to avoid two callers simultaneously getting 1.522 + // the same texture). 1.523 + if (texture->getCacheEntry()->key().isScratch()) { 1.524 + if (fGpu->caps()->reuseScratchTextures() || NULL != texture->asRenderTarget()) { 1.525 + fTextureCache->makeNonExclusive(texture->getCacheEntry()); 1.526 + this->purgeCache(); 1.527 + } else if (texture->unique() && texture->getDeferredRefCount() <= 0) { 1.528 + // Only the cache now knows about this texture. Since we're never 1.529 + // reusing scratch textures (in this code path) it would just be 1.530 + // wasting time sitting in the cache. 1.531 + fTextureCache->makeNonExclusive(texture->getCacheEntry()); 1.532 + fTextureCache->deleteResource(texture->getCacheEntry()); 1.533 + } else { 1.534 + // In this case (fRefCnt > 1 || defRefCnt > 0) but we don't really 1.535 + // want to readd it to the cache (since it will never be reused). 1.536 + // Instead, give up the cache's ref and leave the decision up to 1.537 + // addExistingTextureToCache once its ref count reaches 0. For 1.538 + // this to work we need to leave it in the exclusive list. 1.539 + texture->setFlag((GrTextureFlags) GrTexture::kReturnToCache_FlagBit); 1.540 + // Give up the cache's ref to the texture 1.541 + texture->unref(); 1.542 + } 1.543 + } 1.544 +} 1.545 + 1.546 +void GrContext::purgeCache() { 1.547 + if (NULL != fTextureCache) { 1.548 + fTextureCache->purgeAsNeeded(); 1.549 + } 1.550 +} 1.551 + 1.552 +bool GrContext::OverbudgetCB(void* data) { 1.553 + SkASSERT(NULL != data); 1.554 + 1.555 + GrContext* context = reinterpret_cast<GrContext*>(data); 1.556 + 1.557 + // Flush the InOrderDrawBuffer to possibly free up some textures 1.558 + context->fFlushToReduceCacheSize = true; 1.559 + 1.560 + return true; 1.561 +} 1.562 + 1.563 + 1.564 +GrTexture* GrContext::createUncachedTexture(const GrTextureDesc& descIn, 1.565 + void* srcData, 1.566 + size_t rowBytes) { 1.567 + GrTextureDesc descCopy = descIn; 1.568 + return fGpu->createTexture(descCopy, srcData, rowBytes); 1.569 +} 1.570 + 1.571 +void GrContext::getTextureCacheLimits(int* maxTextures, 1.572 + size_t* maxTextureBytes) const { 1.573 + fTextureCache->getLimits(maxTextures, maxTextureBytes); 1.574 +} 1.575 + 1.576 +void GrContext::setTextureCacheLimits(int maxTextures, size_t maxTextureBytes) { 1.577 + fTextureCache->setLimits(maxTextures, maxTextureBytes); 1.578 +} 1.579 + 1.580 +int GrContext::getMaxTextureSize() const { 1.581 + return GrMin(fGpu->caps()->maxTextureSize(), fMaxTextureSizeOverride); 1.582 +} 1.583 + 1.584 +int GrContext::getMaxRenderTargetSize() const { 1.585 + return fGpu->caps()->maxRenderTargetSize(); 1.586 +} 1.587 + 1.588 +int GrContext::getMaxSampleCount() const { 1.589 + return fGpu->caps()->maxSampleCount(); 1.590 +} 1.591 + 1.592 +/////////////////////////////////////////////////////////////////////////////// 1.593 + 1.594 +GrTexture* GrContext::wrapBackendTexture(const GrBackendTextureDesc& desc) { 1.595 + return fGpu->wrapBackendTexture(desc); 1.596 +} 1.597 + 1.598 +GrRenderTarget* GrContext::wrapBackendRenderTarget(const GrBackendRenderTargetDesc& desc) { 1.599 + return fGpu->wrapBackendRenderTarget(desc); 1.600 +} 1.601 + 1.602 +/////////////////////////////////////////////////////////////////////////////// 1.603 + 1.604 +bool GrContext::supportsIndex8PixelConfig(const GrTextureParams* params, 1.605 + int width, int height) const { 1.606 + const GrDrawTargetCaps* caps = fGpu->caps(); 1.607 + if (!caps->eightBitPaletteSupport()) { 1.608 + return false; 1.609 + } 1.610 + 1.611 + bool isPow2 = GrIsPow2(width) && GrIsPow2(height); 1.612 + 1.613 + if (!isPow2) { 1.614 + bool tiled = NULL != params && params->isTiled(); 1.615 + if (tiled && !caps->npotTextureTileSupport()) { 1.616 + return false; 1.617 + } 1.618 + } 1.619 + return true; 1.620 +} 1.621 + 1.622 + 1.623 +//////////////////////////////////////////////////////////////////////////////// 1.624 + 1.625 +void GrContext::clear(const SkIRect* rect, 1.626 + const GrColor color, 1.627 + bool canIgnoreRect, 1.628 + GrRenderTarget* target) { 1.629 + AutoRestoreEffects are; 1.630 + AutoCheckFlush acf(this); 1.631 + this->prepareToDraw(NULL, BUFFERED_DRAW, &are, &acf)->clear(rect, color, 1.632 + canIgnoreRect, target); 1.633 +} 1.634 + 1.635 +void GrContext::drawPaint(const GrPaint& origPaint) { 1.636 + // set rect to be big enough to fill the space, but not super-huge, so we 1.637 + // don't overflow fixed-point implementations 1.638 + SkRect r; 1.639 + r.setLTRB(0, 0, 1.640 + SkIntToScalar(getRenderTarget()->width()), 1.641 + SkIntToScalar(getRenderTarget()->height())); 1.642 + SkMatrix inverse; 1.643 + SkTCopyOnFirstWrite<GrPaint> paint(origPaint); 1.644 + AutoMatrix am; 1.645 + 1.646 + // We attempt to map r by the inverse matrix and draw that. mapRect will 1.647 + // map the four corners and bound them with a new rect. This will not 1.648 + // produce a correct result for some perspective matrices. 1.649 + if (!this->getMatrix().hasPerspective()) { 1.650 + if (!fViewMatrix.invert(&inverse)) { 1.651 + GrPrintf("Could not invert matrix\n"); 1.652 + return; 1.653 + } 1.654 + inverse.mapRect(&r); 1.655 + } else { 1.656 + if (!am.setIdentity(this, paint.writable())) { 1.657 + GrPrintf("Could not invert matrix\n"); 1.658 + return; 1.659 + } 1.660 + } 1.661 + // by definition this fills the entire clip, no need for AA 1.662 + if (paint->isAntiAlias()) { 1.663 + paint.writable()->setAntiAlias(false); 1.664 + } 1.665 + this->drawRect(*paint, r); 1.666 +} 1.667 + 1.668 +#ifdef SK_DEVELOPER 1.669 +void GrContext::dumpFontCache() const { 1.670 + fFontCache->dump(); 1.671 +} 1.672 +#endif 1.673 + 1.674 +//////////////////////////////////////////////////////////////////////////////// 1.675 + 1.676 +/* create a triangle strip that strokes the specified triangle. There are 8 1.677 + unique vertices, but we repreat the last 2 to close up. Alternatively we 1.678 + could use an indices array, and then only send 8 verts, but not sure that 1.679 + would be faster. 1.680 + */ 1.681 +static void setStrokeRectStrip(GrPoint verts[10], SkRect rect, 1.682 + SkScalar width) { 1.683 + const SkScalar rad = SkScalarHalf(width); 1.684 + rect.sort(); 1.685 + 1.686 + verts[0].set(rect.fLeft + rad, rect.fTop + rad); 1.687 + verts[1].set(rect.fLeft - rad, rect.fTop - rad); 1.688 + verts[2].set(rect.fRight - rad, rect.fTop + rad); 1.689 + verts[3].set(rect.fRight + rad, rect.fTop - rad); 1.690 + verts[4].set(rect.fRight - rad, rect.fBottom - rad); 1.691 + verts[5].set(rect.fRight + rad, rect.fBottom + rad); 1.692 + verts[6].set(rect.fLeft + rad, rect.fBottom - rad); 1.693 + verts[7].set(rect.fLeft - rad, rect.fBottom + rad); 1.694 + verts[8] = verts[0]; 1.695 + verts[9] = verts[1]; 1.696 +} 1.697 + 1.698 +static bool isIRect(const SkRect& r) { 1.699 + return SkScalarIsInt(r.fLeft) && SkScalarIsInt(r.fTop) && 1.700 + SkScalarIsInt(r.fRight) && SkScalarIsInt(r.fBottom); 1.701 +} 1.702 + 1.703 +static bool apply_aa_to_rect(GrDrawTarget* target, 1.704 + const SkRect& rect, 1.705 + SkScalar strokeWidth, 1.706 + const SkMatrix& combinedMatrix, 1.707 + SkRect* devBoundRect, 1.708 + bool* useVertexCoverage) { 1.709 + // we use a simple coverage ramp to do aa on axis-aligned rects 1.710 + // we check if the rect will be axis-aligned, and the rect won't land on 1.711 + // integer coords. 1.712 + 1.713 + // we are keeping around the "tweak the alpha" trick because 1.714 + // it is our only hope for the fixed-pipe implementation. 1.715 + // In a shader implementation we can give a separate coverage input 1.716 + // TODO: remove this ugliness when we drop the fixed-pipe impl 1.717 + *useVertexCoverage = false; 1.718 + if (!target->getDrawState().canTweakAlphaForCoverage()) { 1.719 + if (target->shouldDisableCoverageAAForBlend()) { 1.720 +#ifdef SK_DEBUG 1.721 + //GrPrintf("Turning off AA to correctly apply blend.\n"); 1.722 +#endif 1.723 + return false; 1.724 + } else { 1.725 + *useVertexCoverage = true; 1.726 + } 1.727 + } 1.728 + const GrDrawState& drawState = target->getDrawState(); 1.729 + if (drawState.getRenderTarget()->isMultisampled()) { 1.730 + return false; 1.731 + } 1.732 + 1.733 + if (0 == strokeWidth && target->willUseHWAALines()) { 1.734 + return false; 1.735 + } 1.736 + 1.737 +#if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT) 1.738 + if (strokeWidth >= 0) { 1.739 +#endif 1.740 + if (!combinedMatrix.preservesAxisAlignment()) { 1.741 + return false; 1.742 + } 1.743 + 1.744 +#if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT) 1.745 + } else { 1.746 + if (!combinedMatrix.preservesRightAngles()) { 1.747 + return false; 1.748 + } 1.749 + } 1.750 +#endif 1.751 + 1.752 + combinedMatrix.mapRect(devBoundRect, rect); 1.753 + 1.754 + if (strokeWidth < 0) { 1.755 + return !isIRect(*devBoundRect); 1.756 + } else { 1.757 + return true; 1.758 + } 1.759 +} 1.760 + 1.761 +static inline bool rect_contains_inclusive(const SkRect& rect, const SkPoint& point) { 1.762 + return point.fX >= rect.fLeft && point.fX <= rect.fRight && 1.763 + point.fY >= rect.fTop && point.fY <= rect.fBottom; 1.764 +} 1.765 + 1.766 +void GrContext::drawRect(const GrPaint& paint, 1.767 + const SkRect& rect, 1.768 + const SkStrokeRec* stroke, 1.769 + const SkMatrix* matrix) { 1.770 + SK_TRACE_EVENT0("GrContext::drawRect"); 1.771 + 1.772 + AutoRestoreEffects are; 1.773 + AutoCheckFlush acf(this); 1.774 + GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf); 1.775 + 1.776 + SkScalar width = stroke == NULL ? -1 : stroke->getWidth(); 1.777 + SkMatrix combinedMatrix = target->drawState()->getViewMatrix(); 1.778 + if (NULL != matrix) { 1.779 + combinedMatrix.preConcat(*matrix); 1.780 + } 1.781 + 1.782 + // Check if this is a full RT draw and can be replaced with a clear. We don't bother checking 1.783 + // cases where the RT is fully inside a stroke. 1.784 + if (width < 0) { 1.785 + SkRect rtRect; 1.786 + target->getDrawState().getRenderTarget()->getBoundsRect(&rtRect); 1.787 + SkRect clipSpaceRTRect = rtRect; 1.788 + bool checkClip = false; 1.789 + if (NULL != this->getClip()) { 1.790 + checkClip = true; 1.791 + clipSpaceRTRect.offset(SkIntToScalar(this->getClip()->fOrigin.fX), 1.792 + SkIntToScalar(this->getClip()->fOrigin.fY)); 1.793 + } 1.794 + // Does the clip contain the entire RT? 1.795 + if (!checkClip || target->getClip()->fClipStack->quickContains(clipSpaceRTRect)) { 1.796 + SkMatrix invM; 1.797 + if (!combinedMatrix.invert(&invM)) { 1.798 + return; 1.799 + } 1.800 + // Does the rect bound the RT? 1.801 + SkPoint srcSpaceRTQuad[4]; 1.802 + invM.mapRectToQuad(srcSpaceRTQuad, rtRect); 1.803 + if (rect_contains_inclusive(rect, srcSpaceRTQuad[0]) && 1.804 + rect_contains_inclusive(rect, srcSpaceRTQuad[1]) && 1.805 + rect_contains_inclusive(rect, srcSpaceRTQuad[2]) && 1.806 + rect_contains_inclusive(rect, srcSpaceRTQuad[3])) { 1.807 + // Will it blend? 1.808 + GrColor clearColor; 1.809 + if (paint.isOpaqueAndConstantColor(&clearColor)) { 1.810 + target->clear(NULL, clearColor, true); 1.811 + return; 1.812 + } 1.813 + } 1.814 + } 1.815 + } 1.816 + 1.817 + SkRect devBoundRect; 1.818 + bool useVertexCoverage; 1.819 + bool needAA = paint.isAntiAlias() && 1.820 + !target->getDrawState().getRenderTarget()->isMultisampled(); 1.821 + bool doAA = needAA && apply_aa_to_rect(target, rect, width, combinedMatrix, &devBoundRect, 1.822 + &useVertexCoverage); 1.823 + if (doAA) { 1.824 + GrDrawState::AutoViewMatrixRestore avmr; 1.825 + if (!avmr.setIdentity(target->drawState())) { 1.826 + return; 1.827 + } 1.828 + if (width >= 0) { 1.829 + fAARectRenderer->strokeAARect(this->getGpu(), target, rect, 1.830 + combinedMatrix, devBoundRect, 1.831 + stroke, useVertexCoverage); 1.832 + } else { 1.833 + // filled AA rect 1.834 + fAARectRenderer->fillAARect(this->getGpu(), target, 1.835 + rect, combinedMatrix, devBoundRect, 1.836 + useVertexCoverage); 1.837 + } 1.838 + return; 1.839 + } 1.840 + 1.841 + if (width >= 0) { 1.842 + // TODO: consider making static vertex buffers for these cases. 1.843 + // Hairline could be done by just adding closing vertex to 1.844 + // unitSquareVertexBuffer() 1.845 + 1.846 + static const int worstCaseVertCount = 10; 1.847 + target->drawState()->setDefaultVertexAttribs(); 1.848 + GrDrawTarget::AutoReleaseGeometry geo(target, worstCaseVertCount, 0); 1.849 + 1.850 + if (!geo.succeeded()) { 1.851 + GrPrintf("Failed to get space for vertices!\n"); 1.852 + return; 1.853 + } 1.854 + 1.855 + GrPrimitiveType primType; 1.856 + int vertCount; 1.857 + GrPoint* vertex = geo.positions(); 1.858 + 1.859 + if (width > 0) { 1.860 + vertCount = 10; 1.861 + primType = kTriangleStrip_GrPrimitiveType; 1.862 + setStrokeRectStrip(vertex, rect, width); 1.863 + } else { 1.864 + // hairline 1.865 + vertCount = 5; 1.866 + primType = kLineStrip_GrPrimitiveType; 1.867 + vertex[0].set(rect.fLeft, rect.fTop); 1.868 + vertex[1].set(rect.fRight, rect.fTop); 1.869 + vertex[2].set(rect.fRight, rect.fBottom); 1.870 + vertex[3].set(rect.fLeft, rect.fBottom); 1.871 + vertex[4].set(rect.fLeft, rect.fTop); 1.872 + } 1.873 + 1.874 + GrDrawState::AutoViewMatrixRestore avmr; 1.875 + if (NULL != matrix) { 1.876 + GrDrawState* drawState = target->drawState(); 1.877 + avmr.set(drawState, *matrix); 1.878 + } 1.879 + 1.880 + target->drawNonIndexed(primType, 0, vertCount); 1.881 + } else { 1.882 + // filled BW rect 1.883 + target->drawSimpleRect(rect, matrix); 1.884 + } 1.885 +} 1.886 + 1.887 +void GrContext::drawRectToRect(const GrPaint& paint, 1.888 + const SkRect& dstRect, 1.889 + const SkRect& localRect, 1.890 + const SkMatrix* dstMatrix, 1.891 + const SkMatrix* localMatrix) { 1.892 + SK_TRACE_EVENT0("GrContext::drawRectToRect"); 1.893 + AutoRestoreEffects are; 1.894 + AutoCheckFlush acf(this); 1.895 + GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf); 1.896 + 1.897 + target->drawRect(dstRect, dstMatrix, &localRect, localMatrix); 1.898 +} 1.899 + 1.900 +namespace { 1.901 + 1.902 +extern const GrVertexAttrib gPosUVColorAttribs[] = { 1.903 + {kVec2f_GrVertexAttribType, 0, kPosition_GrVertexAttribBinding }, 1.904 + {kVec2f_GrVertexAttribType, sizeof(GrPoint), kLocalCoord_GrVertexAttribBinding }, 1.905 + {kVec4ub_GrVertexAttribType, 2*sizeof(GrPoint), kColor_GrVertexAttribBinding} 1.906 +}; 1.907 + 1.908 +extern const GrVertexAttrib gPosColorAttribs[] = { 1.909 + {kVec2f_GrVertexAttribType, 0, kPosition_GrVertexAttribBinding}, 1.910 + {kVec4ub_GrVertexAttribType, sizeof(GrPoint), kColor_GrVertexAttribBinding}, 1.911 +}; 1.912 + 1.913 +static void set_vertex_attributes(GrDrawState* drawState, 1.914 + const GrPoint* texCoords, 1.915 + const GrColor* colors, 1.916 + int* colorOffset, 1.917 + int* texOffset) { 1.918 + *texOffset = -1; 1.919 + *colorOffset = -1; 1.920 + 1.921 + if (NULL != texCoords && NULL != colors) { 1.922 + *texOffset = sizeof(GrPoint); 1.923 + *colorOffset = 2*sizeof(GrPoint); 1.924 + drawState->setVertexAttribs<gPosUVColorAttribs>(3); 1.925 + } else if (NULL != texCoords) { 1.926 + *texOffset = sizeof(GrPoint); 1.927 + drawState->setVertexAttribs<gPosUVColorAttribs>(2); 1.928 + } else if (NULL != colors) { 1.929 + *colorOffset = sizeof(GrPoint); 1.930 + drawState->setVertexAttribs<gPosColorAttribs>(2); 1.931 + } else { 1.932 + drawState->setVertexAttribs<gPosColorAttribs>(1); 1.933 + } 1.934 +} 1.935 + 1.936 +}; 1.937 + 1.938 +void GrContext::drawVertices(const GrPaint& paint, 1.939 + GrPrimitiveType primitiveType, 1.940 + int vertexCount, 1.941 + const GrPoint positions[], 1.942 + const GrPoint texCoords[], 1.943 + const GrColor colors[], 1.944 + const uint16_t indices[], 1.945 + int indexCount) { 1.946 + SK_TRACE_EVENT0("GrContext::drawVertices"); 1.947 + 1.948 + AutoRestoreEffects are; 1.949 + AutoCheckFlush acf(this); 1.950 + GrDrawTarget::AutoReleaseGeometry geo; // must be inside AutoCheckFlush scope 1.951 + 1.952 + GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf); 1.953 + 1.954 + GrDrawState* drawState = target->drawState(); 1.955 + 1.956 + int colorOffset = -1, texOffset = -1; 1.957 + set_vertex_attributes(drawState, texCoords, colors, &colorOffset, &texOffset); 1.958 + 1.959 + size_t vertexSize = drawState->getVertexSize(); 1.960 + if (sizeof(GrPoint) != vertexSize) { 1.961 + if (!geo.set(target, vertexCount, 0)) { 1.962 + GrPrintf("Failed to get space for vertices!\n"); 1.963 + return; 1.964 + } 1.965 + void* curVertex = geo.vertices(); 1.966 + 1.967 + for (int i = 0; i < vertexCount; ++i) { 1.968 + *((GrPoint*)curVertex) = positions[i]; 1.969 + 1.970 + if (texOffset >= 0) { 1.971 + *(GrPoint*)((intptr_t)curVertex + texOffset) = texCoords[i]; 1.972 + } 1.973 + if (colorOffset >= 0) { 1.974 + *(GrColor*)((intptr_t)curVertex + colorOffset) = colors[i]; 1.975 + } 1.976 + curVertex = (void*)((intptr_t)curVertex + vertexSize); 1.977 + } 1.978 + } else { 1.979 + target->setVertexSourceToArray(positions, vertexCount); 1.980 + } 1.981 + 1.982 + // we don't currently apply offscreen AA to this path. Need improved 1.983 + // management of GrDrawTarget's geometry to avoid copying points per-tile. 1.984 + 1.985 + if (NULL != indices) { 1.986 + target->setIndexSourceToArray(indices, indexCount); 1.987 + target->drawIndexed(primitiveType, 0, 0, vertexCount, indexCount); 1.988 + target->resetIndexSource(); 1.989 + } else { 1.990 + target->drawNonIndexed(primitiveType, 0, vertexCount); 1.991 + } 1.992 +} 1.993 + 1.994 +/////////////////////////////////////////////////////////////////////////////// 1.995 + 1.996 +void GrContext::drawRRect(const GrPaint& paint, 1.997 + const SkRRect& rect, 1.998 + const SkStrokeRec& stroke) { 1.999 + if (rect.isEmpty()) { 1.1000 + return; 1.1001 + } 1.1002 + 1.1003 + AutoRestoreEffects are; 1.1004 + AutoCheckFlush acf(this); 1.1005 + GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf); 1.1006 + 1.1007 + if (!fOvalRenderer->drawSimpleRRect(target, this, paint.isAntiAlias(), rect, stroke)) { 1.1008 + SkPath path; 1.1009 + path.addRRect(rect); 1.1010 + this->internalDrawPath(target, paint.isAntiAlias(), path, stroke); 1.1011 + } 1.1012 +} 1.1013 + 1.1014 +/////////////////////////////////////////////////////////////////////////////// 1.1015 + 1.1016 +void GrContext::drawOval(const GrPaint& paint, 1.1017 + const SkRect& oval, 1.1018 + const SkStrokeRec& stroke) { 1.1019 + if (oval.isEmpty()) { 1.1020 + return; 1.1021 + } 1.1022 + 1.1023 + AutoRestoreEffects are; 1.1024 + AutoCheckFlush acf(this); 1.1025 + GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf); 1.1026 + 1.1027 + if (!fOvalRenderer->drawOval(target, this, paint.isAntiAlias(), oval, stroke)) { 1.1028 + SkPath path; 1.1029 + path.addOval(oval); 1.1030 + this->internalDrawPath(target, paint.isAntiAlias(), path, stroke); 1.1031 + } 1.1032 +} 1.1033 + 1.1034 +// Can 'path' be drawn as a pair of filled nested rectangles? 1.1035 +static bool is_nested_rects(GrDrawTarget* target, 1.1036 + const SkPath& path, 1.1037 + const SkStrokeRec& stroke, 1.1038 + SkRect rects[2], 1.1039 + bool* useVertexCoverage) { 1.1040 + SkASSERT(stroke.isFillStyle()); 1.1041 + 1.1042 + if (path.isInverseFillType()) { 1.1043 + return false; 1.1044 + } 1.1045 + 1.1046 + const GrDrawState& drawState = target->getDrawState(); 1.1047 + 1.1048 + // TODO: this restriction could be lifted if we were willing to apply 1.1049 + // the matrix to all the points individually rather than just to the rect 1.1050 + if (!drawState.getViewMatrix().preservesAxisAlignment()) { 1.1051 + return false; 1.1052 + } 1.1053 + 1.1054 + *useVertexCoverage = false; 1.1055 + if (!target->getDrawState().canTweakAlphaForCoverage()) { 1.1056 + if (target->shouldDisableCoverageAAForBlend()) { 1.1057 + return false; 1.1058 + } else { 1.1059 + *useVertexCoverage = true; 1.1060 + } 1.1061 + } 1.1062 + 1.1063 + SkPath::Direction dirs[2]; 1.1064 + if (!path.isNestedRects(rects, dirs)) { 1.1065 + return false; 1.1066 + } 1.1067 + 1.1068 + if (SkPath::kWinding_FillType == path.getFillType() && dirs[0] == dirs[1]) { 1.1069 + // The two rects need to be wound opposite to each other 1.1070 + return false; 1.1071 + } 1.1072 + 1.1073 + // Right now, nested rects where the margin is not the same width 1.1074 + // all around do not render correctly 1.1075 + const SkScalar* outer = rects[0].asScalars(); 1.1076 + const SkScalar* inner = rects[1].asScalars(); 1.1077 + 1.1078 + SkScalar margin = SkScalarAbs(outer[0] - inner[0]); 1.1079 + for (int i = 1; i < 4; ++i) { 1.1080 + SkScalar temp = SkScalarAbs(outer[i] - inner[i]); 1.1081 + if (!SkScalarNearlyEqual(margin, temp)) { 1.1082 + return false; 1.1083 + } 1.1084 + } 1.1085 + 1.1086 + return true; 1.1087 +} 1.1088 + 1.1089 +void GrContext::drawPath(const GrPaint& paint, const SkPath& path, const SkStrokeRec& stroke) { 1.1090 + 1.1091 + if (path.isEmpty()) { 1.1092 + if (path.isInverseFillType()) { 1.1093 + this->drawPaint(paint); 1.1094 + } 1.1095 + return; 1.1096 + } 1.1097 + 1.1098 + // Note that internalDrawPath may sw-rasterize the path into a scratch texture. 1.1099 + // Scratch textures can be recycled after they are returned to the texture 1.1100 + // cache. This presents a potential hazard for buffered drawing. However, 1.1101 + // the writePixels that uploads to the scratch will perform a flush so we're 1.1102 + // OK. 1.1103 + AutoRestoreEffects are; 1.1104 + AutoCheckFlush acf(this); 1.1105 + GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf); 1.1106 + GrDrawState* drawState = target->drawState(); 1.1107 + 1.1108 + bool useCoverageAA = paint.isAntiAlias() && !drawState->getRenderTarget()->isMultisampled(); 1.1109 + 1.1110 + if (useCoverageAA && stroke.getWidth() < 0 && !path.isConvex()) { 1.1111 + // Concave AA paths are expensive - try to avoid them for special cases 1.1112 + bool useVertexCoverage; 1.1113 + SkRect rects[2]; 1.1114 + 1.1115 + if (is_nested_rects(target, path, stroke, rects, &useVertexCoverage)) { 1.1116 + SkMatrix origViewMatrix = drawState->getViewMatrix(); 1.1117 + GrDrawState::AutoViewMatrixRestore avmr; 1.1118 + if (!avmr.setIdentity(target->drawState())) { 1.1119 + return; 1.1120 + } 1.1121 + 1.1122 + fAARectRenderer->fillAANestedRects(this->getGpu(), target, 1.1123 + rects, 1.1124 + origViewMatrix, 1.1125 + useVertexCoverage); 1.1126 + return; 1.1127 + } 1.1128 + } 1.1129 + 1.1130 + SkRect ovalRect; 1.1131 + bool isOval = path.isOval(&ovalRect); 1.1132 + 1.1133 + if (!isOval || path.isInverseFillType() 1.1134 + || !fOvalRenderer->drawOval(target, this, paint.isAntiAlias(), ovalRect, stroke)) { 1.1135 + this->internalDrawPath(target, paint.isAntiAlias(), path, stroke); 1.1136 + } 1.1137 +} 1.1138 + 1.1139 +void GrContext::internalDrawPath(GrDrawTarget* target, bool useAA, const SkPath& path, 1.1140 + const SkStrokeRec& origStroke) { 1.1141 + SkASSERT(!path.isEmpty()); 1.1142 + 1.1143 + // An Assumption here is that path renderer would use some form of tweaking 1.1144 + // the src color (either the input alpha or in the frag shader) to implement 1.1145 + // aa. If we have some future driver-mojo path AA that can do the right 1.1146 + // thing WRT to the blend then we'll need some query on the PR. 1.1147 + bool useCoverageAA = useAA && 1.1148 + !target->getDrawState().getRenderTarget()->isMultisampled() && 1.1149 + !target->shouldDisableCoverageAAForBlend(); 1.1150 + 1.1151 + 1.1152 + GrPathRendererChain::DrawType type = 1.1153 + useCoverageAA ? GrPathRendererChain::kColorAntiAlias_DrawType : 1.1154 + GrPathRendererChain::kColor_DrawType; 1.1155 + 1.1156 + const SkPath* pathPtr = &path; 1.1157 + SkTLazy<SkPath> tmpPath; 1.1158 + SkTCopyOnFirstWrite<SkStrokeRec> stroke(origStroke); 1.1159 + 1.1160 + // Try a 1st time without stroking the path and without allowing the SW renderer 1.1161 + GrPathRenderer* pr = this->getPathRenderer(*pathPtr, *stroke, target, false, type); 1.1162 + 1.1163 + if (NULL == pr) { 1.1164 + if (!GrPathRenderer::IsStrokeHairlineOrEquivalent(*stroke, this->getMatrix(), NULL)) { 1.1165 + // It didn't work the 1st time, so try again with the stroked path 1.1166 + if (stroke->applyToPath(tmpPath.init(), *pathPtr)) { 1.1167 + pathPtr = tmpPath.get(); 1.1168 + stroke.writable()->setFillStyle(); 1.1169 + if (pathPtr->isEmpty()) { 1.1170 + return; 1.1171 + } 1.1172 + } 1.1173 + } 1.1174 + 1.1175 + // This time, allow SW renderer 1.1176 + pr = this->getPathRenderer(*pathPtr, *stroke, target, true, type); 1.1177 + } 1.1178 + 1.1179 + if (NULL == pr) { 1.1180 +#ifdef SK_DEBUG 1.1181 + GrPrintf("Unable to find path renderer compatible with path.\n"); 1.1182 +#endif 1.1183 + return; 1.1184 + } 1.1185 + 1.1186 + pr->drawPath(*pathPtr, *stroke, target, useCoverageAA); 1.1187 +} 1.1188 + 1.1189 +//////////////////////////////////////////////////////////////////////////////// 1.1190 + 1.1191 +void GrContext::flush(int flagsBitfield) { 1.1192 + if (NULL == fDrawBuffer) { 1.1193 + return; 1.1194 + } 1.1195 + 1.1196 + if (kDiscard_FlushBit & flagsBitfield) { 1.1197 + fDrawBuffer->reset(); 1.1198 + } else { 1.1199 + fDrawBuffer->flush(); 1.1200 + } 1.1201 + fFlushToReduceCacheSize = false; 1.1202 +} 1.1203 + 1.1204 +bool GrContext::writeTexturePixels(GrTexture* texture, 1.1205 + int left, int top, int width, int height, 1.1206 + GrPixelConfig config, const void* buffer, size_t rowBytes, 1.1207 + uint32_t flags) { 1.1208 + SK_TRACE_EVENT0("GrContext::writeTexturePixels"); 1.1209 + ASSERT_OWNED_RESOURCE(texture); 1.1210 + 1.1211 + if ((kUnpremul_PixelOpsFlag & flags) || !fGpu->canWriteTexturePixels(texture, config)) { 1.1212 + if (NULL != texture->asRenderTarget()) { 1.1213 + return this->writeRenderTargetPixels(texture->asRenderTarget(), 1.1214 + left, top, width, height, 1.1215 + config, buffer, rowBytes, flags); 1.1216 + } else { 1.1217 + return false; 1.1218 + } 1.1219 + } 1.1220 + 1.1221 + if (!(kDontFlush_PixelOpsFlag & flags)) { 1.1222 + this->flush(); 1.1223 + } 1.1224 + 1.1225 + return fGpu->writeTexturePixels(texture, left, top, width, height, 1.1226 + config, buffer, rowBytes); 1.1227 +} 1.1228 + 1.1229 +bool GrContext::readTexturePixels(GrTexture* texture, 1.1230 + int left, int top, int width, int height, 1.1231 + GrPixelConfig config, void* buffer, size_t rowBytes, 1.1232 + uint32_t flags) { 1.1233 + SK_TRACE_EVENT0("GrContext::readTexturePixels"); 1.1234 + ASSERT_OWNED_RESOURCE(texture); 1.1235 + 1.1236 + GrRenderTarget* target = texture->asRenderTarget(); 1.1237 + if (NULL != target) { 1.1238 + return this->readRenderTargetPixels(target, 1.1239 + left, top, width, height, 1.1240 + config, buffer, rowBytes, 1.1241 + flags); 1.1242 + } else { 1.1243 + // TODO: make this more efficient for cases where we're reading the entire 1.1244 + // texture, i.e., use GetTexImage() instead 1.1245 + 1.1246 + // create scratch rendertarget and read from that 1.1247 + GrAutoScratchTexture ast; 1.1248 + GrTextureDesc desc; 1.1249 + desc.fFlags = kRenderTarget_GrTextureFlagBit; 1.1250 + desc.fWidth = width; 1.1251 + desc.fHeight = height; 1.1252 + desc.fConfig = config; 1.1253 + desc.fOrigin = kTopLeft_GrSurfaceOrigin; 1.1254 + ast.set(this, desc, kExact_ScratchTexMatch); 1.1255 + GrTexture* dst = ast.texture(); 1.1256 + if (NULL != dst && NULL != (target = dst->asRenderTarget())) { 1.1257 + this->copyTexture(texture, target, NULL); 1.1258 + return this->readRenderTargetPixels(target, 1.1259 + left, top, width, height, 1.1260 + config, buffer, rowBytes, 1.1261 + flags); 1.1262 + } 1.1263 + 1.1264 + return false; 1.1265 + } 1.1266 +} 1.1267 + 1.1268 +#include "SkConfig8888.h" 1.1269 + 1.1270 +namespace { 1.1271 +/** 1.1272 + * Converts a GrPixelConfig to a SkCanvas::Config8888. Only byte-per-channel 1.1273 + * formats are representable as Config8888 and so the function returns false 1.1274 + * if the GrPixelConfig has no equivalent Config8888. 1.1275 + */ 1.1276 +bool grconfig_to_config8888(GrPixelConfig config, 1.1277 + bool unpremul, 1.1278 + SkCanvas::Config8888* config8888) { 1.1279 + switch (config) { 1.1280 + case kRGBA_8888_GrPixelConfig: 1.1281 + if (unpremul) { 1.1282 + *config8888 = SkCanvas::kRGBA_Unpremul_Config8888; 1.1283 + } else { 1.1284 + *config8888 = SkCanvas::kRGBA_Premul_Config8888; 1.1285 + } 1.1286 + return true; 1.1287 + case kBGRA_8888_GrPixelConfig: 1.1288 + if (unpremul) { 1.1289 + *config8888 = SkCanvas::kBGRA_Unpremul_Config8888; 1.1290 + } else { 1.1291 + *config8888 = SkCanvas::kBGRA_Premul_Config8888; 1.1292 + } 1.1293 + return true; 1.1294 + default: 1.1295 + return false; 1.1296 + } 1.1297 +} 1.1298 + 1.1299 +// It returns a configuration with where the byte position of the R & B components are swapped in 1.1300 +// relation to the input config. This should only be called with the result of 1.1301 +// grconfig_to_config8888 as it will fail for other configs. 1.1302 +SkCanvas::Config8888 swap_config8888_red_and_blue(SkCanvas::Config8888 config8888) { 1.1303 + switch (config8888) { 1.1304 + case SkCanvas::kBGRA_Premul_Config8888: 1.1305 + return SkCanvas::kRGBA_Premul_Config8888; 1.1306 + case SkCanvas::kBGRA_Unpremul_Config8888: 1.1307 + return SkCanvas::kRGBA_Unpremul_Config8888; 1.1308 + case SkCanvas::kRGBA_Premul_Config8888: 1.1309 + return SkCanvas::kBGRA_Premul_Config8888; 1.1310 + case SkCanvas::kRGBA_Unpremul_Config8888: 1.1311 + return SkCanvas::kBGRA_Unpremul_Config8888; 1.1312 + default: 1.1313 + GrCrash("Unexpected input"); 1.1314 + return SkCanvas::kBGRA_Unpremul_Config8888;; 1.1315 + } 1.1316 +} 1.1317 +} 1.1318 + 1.1319 +bool GrContext::readRenderTargetPixels(GrRenderTarget* target, 1.1320 + int left, int top, int width, int height, 1.1321 + GrPixelConfig dstConfig, void* buffer, size_t rowBytes, 1.1322 + uint32_t flags) { 1.1323 + SK_TRACE_EVENT0("GrContext::readRenderTargetPixels"); 1.1324 + ASSERT_OWNED_RESOURCE(target); 1.1325 + 1.1326 + if (NULL == target) { 1.1327 + target = fRenderTarget.get(); 1.1328 + if (NULL == target) { 1.1329 + return false; 1.1330 + } 1.1331 + } 1.1332 + 1.1333 + if (!(kDontFlush_PixelOpsFlag & flags)) { 1.1334 + this->flush(); 1.1335 + } 1.1336 + 1.1337 + // Determine which conversions have to be applied: flipY, swapRAnd, and/or unpremul. 1.1338 + 1.1339 + // If fGpu->readPixels would incur a y-flip cost then we will read the pixels upside down. We'll 1.1340 + // either do the flipY by drawing into a scratch with a matrix or on the cpu after the read. 1.1341 + bool flipY = fGpu->readPixelsWillPayForYFlip(target, left, top, 1.1342 + width, height, dstConfig, 1.1343 + rowBytes); 1.1344 + // We ignore the preferred config if it is different than our config unless it is an R/B swap. 1.1345 + // In that case we'll perform an R and B swap while drawing to a scratch texture of the swapped 1.1346 + // config. Then we will call readPixels on the scratch with the swapped config. The swaps during 1.1347 + // the draw cancels out the fact that we call readPixels with a config that is R/B swapped from 1.1348 + // dstConfig. 1.1349 + GrPixelConfig readConfig = dstConfig; 1.1350 + bool swapRAndB = false; 1.1351 + if (GrPixelConfigSwapRAndB(dstConfig) == 1.1352 + fGpu->preferredReadPixelsConfig(dstConfig, target->config())) { 1.1353 + readConfig = GrPixelConfigSwapRAndB(readConfig); 1.1354 + swapRAndB = true; 1.1355 + } 1.1356 + 1.1357 + bool unpremul = SkToBool(kUnpremul_PixelOpsFlag & flags); 1.1358 + 1.1359 + if (unpremul && !GrPixelConfigIs8888(dstConfig)) { 1.1360 + // The unpremul flag is only allowed for these two configs. 1.1361 + return false; 1.1362 + } 1.1363 + 1.1364 + // If the src is a texture and we would have to do conversions after read pixels, we instead 1.1365 + // do the conversions by drawing the src to a scratch texture. If we handle any of the 1.1366 + // conversions in the draw we set the corresponding bool to false so that we don't reapply it 1.1367 + // on the read back pixels. 1.1368 + GrTexture* src = target->asTexture(); 1.1369 + GrAutoScratchTexture ast; 1.1370 + if (NULL != src && (swapRAndB || unpremul || flipY)) { 1.1371 + // Make the scratch a render target because we don't have a robust readTexturePixels as of 1.1372 + // yet. It calls this function. 1.1373 + GrTextureDesc desc; 1.1374 + desc.fFlags = kRenderTarget_GrTextureFlagBit; 1.1375 + desc.fWidth = width; 1.1376 + desc.fHeight = height; 1.1377 + desc.fConfig = readConfig; 1.1378 + desc.fOrigin = kTopLeft_GrSurfaceOrigin; 1.1379 + 1.1380 + // When a full read back is faster than a partial we could always make the scratch exactly 1.1381 + // match the passed rect. However, if we see many different size rectangles we will trash 1.1382 + // our texture cache and pay the cost of creating and destroying many textures. So, we only 1.1383 + // request an exact match when the caller is reading an entire RT. 1.1384 + ScratchTexMatch match = kApprox_ScratchTexMatch; 1.1385 + if (0 == left && 1.1386 + 0 == top && 1.1387 + target->width() == width && 1.1388 + target->height() == height && 1.1389 + fGpu->fullReadPixelsIsFasterThanPartial()) { 1.1390 + match = kExact_ScratchTexMatch; 1.1391 + } 1.1392 + ast.set(this, desc, match); 1.1393 + GrTexture* texture = ast.texture(); 1.1394 + if (texture) { 1.1395 + // compute a matrix to perform the draw 1.1396 + SkMatrix textureMatrix; 1.1397 + textureMatrix.setTranslate(SK_Scalar1 *left, SK_Scalar1 *top); 1.1398 + textureMatrix.postIDiv(src->width(), src->height()); 1.1399 + 1.1400 + SkAutoTUnref<const GrEffectRef> effect; 1.1401 + if (unpremul) { 1.1402 + effect.reset(this->createPMToUPMEffect(src, swapRAndB, textureMatrix)); 1.1403 + if (NULL != effect) { 1.1404 + unpremul = false; // we no longer need to do this on CPU after the read back. 1.1405 + } 1.1406 + } 1.1407 + // If we failed to create a PM->UPM effect and have no other conversions to perform then 1.1408 + // there is no longer any point to using the scratch. 1.1409 + if (NULL != effect || flipY || swapRAndB) { 1.1410 + if (!effect) { 1.1411 + effect.reset(GrConfigConversionEffect::Create( 1.1412 + src, 1.1413 + swapRAndB, 1.1414 + GrConfigConversionEffect::kNone_PMConversion, 1.1415 + textureMatrix)); 1.1416 + } 1.1417 + swapRAndB = false; // we will handle the swap in the draw. 1.1418 + 1.1419 + // We protect the existing geometry here since it may not be 1.1420 + // clear to the caller that a draw operation (i.e., drawSimpleRect) 1.1421 + // can be invoked in this method 1.1422 + GrDrawTarget::AutoGeometryAndStatePush agasp(fGpu, GrDrawTarget::kReset_ASRInit); 1.1423 + GrDrawState* drawState = fGpu->drawState(); 1.1424 + SkASSERT(effect); 1.1425 + drawState->addColorEffect(effect); 1.1426 + 1.1427 + drawState->setRenderTarget(texture->asRenderTarget()); 1.1428 + SkRect rect = SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height)); 1.1429 + fGpu->drawSimpleRect(rect, NULL); 1.1430 + // we want to read back from the scratch's origin 1.1431 + left = 0; 1.1432 + top = 0; 1.1433 + target = texture->asRenderTarget(); 1.1434 + } 1.1435 + } 1.1436 + } 1.1437 + if (!fGpu->readPixels(target, 1.1438 + left, top, width, height, 1.1439 + readConfig, buffer, rowBytes)) { 1.1440 + return false; 1.1441 + } 1.1442 + // Perform any conversions we weren't able to perform using a scratch texture. 1.1443 + if (unpremul || swapRAndB) { 1.1444 + // These are initialized to suppress a warning 1.1445 + SkCanvas::Config8888 srcC8888 = SkCanvas::kNative_Premul_Config8888; 1.1446 + SkCanvas::Config8888 dstC8888 = SkCanvas::kNative_Premul_Config8888; 1.1447 + 1.1448 + SkDEBUGCODE(bool c8888IsValid =) grconfig_to_config8888(dstConfig, false, &srcC8888); 1.1449 + grconfig_to_config8888(dstConfig, unpremul, &dstC8888); 1.1450 + 1.1451 + if (swapRAndB) { 1.1452 + SkASSERT(c8888IsValid); // we should only do r/b swap on 8888 configs 1.1453 + srcC8888 = swap_config8888_red_and_blue(srcC8888); 1.1454 + } 1.1455 + SkASSERT(c8888IsValid); 1.1456 + uint32_t* b32 = reinterpret_cast<uint32_t*>(buffer); 1.1457 + SkConvertConfig8888Pixels(b32, rowBytes, dstC8888, 1.1458 + b32, rowBytes, srcC8888, 1.1459 + width, height); 1.1460 + } 1.1461 + return true; 1.1462 +} 1.1463 + 1.1464 +void GrContext::resolveRenderTarget(GrRenderTarget* target) { 1.1465 + SkASSERT(target); 1.1466 + ASSERT_OWNED_RESOURCE(target); 1.1467 + // In the future we may track whether there are any pending draws to this 1.1468 + // target. We don't today so we always perform a flush. We don't promise 1.1469 + // this to our clients, though. 1.1470 + this->flush(); 1.1471 + fGpu->resolveRenderTarget(target); 1.1472 +} 1.1473 + 1.1474 +void GrContext::copyTexture(GrTexture* src, GrRenderTarget* dst, const SkIPoint* topLeft) { 1.1475 + if (NULL == src || NULL == dst) { 1.1476 + return; 1.1477 + } 1.1478 + ASSERT_OWNED_RESOURCE(src); 1.1479 + 1.1480 + // Writes pending to the source texture are not tracked, so a flush 1.1481 + // is required to ensure that the copy captures the most recent contents 1.1482 + // of the source texture. See similar behavior in 1.1483 + // GrContext::resolveRenderTarget. 1.1484 + this->flush(); 1.1485 + 1.1486 + GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit); 1.1487 + GrDrawState* drawState = fGpu->drawState(); 1.1488 + drawState->setRenderTarget(dst); 1.1489 + SkMatrix sampleM; 1.1490 + sampleM.setIDiv(src->width(), src->height()); 1.1491 + SkIRect srcRect = SkIRect::MakeWH(dst->width(), dst->height()); 1.1492 + if (NULL != topLeft) { 1.1493 + srcRect.offset(*topLeft); 1.1494 + } 1.1495 + SkIRect srcBounds = SkIRect::MakeWH(src->width(), src->height()); 1.1496 + if (!srcRect.intersect(srcBounds)) { 1.1497 + return; 1.1498 + } 1.1499 + sampleM.preTranslate(SkIntToScalar(srcRect.fLeft), SkIntToScalar(srcRect.fTop)); 1.1500 + drawState->addColorTextureEffect(src, sampleM); 1.1501 + SkRect dstR = SkRect::MakeWH(SkIntToScalar(srcRect.width()), SkIntToScalar(srcRect.height())); 1.1502 + fGpu->drawSimpleRect(dstR, NULL); 1.1503 +} 1.1504 + 1.1505 +bool GrContext::writeRenderTargetPixels(GrRenderTarget* target, 1.1506 + int left, int top, int width, int height, 1.1507 + GrPixelConfig srcConfig, 1.1508 + const void* buffer, 1.1509 + size_t rowBytes, 1.1510 + uint32_t flags) { 1.1511 + SK_TRACE_EVENT0("GrContext::writeRenderTargetPixels"); 1.1512 + ASSERT_OWNED_RESOURCE(target); 1.1513 + 1.1514 + if (NULL == target) { 1.1515 + target = fRenderTarget.get(); 1.1516 + if (NULL == target) { 1.1517 + return false; 1.1518 + } 1.1519 + } 1.1520 + 1.1521 + // TODO: when underlying api has a direct way to do this we should use it (e.g. glDrawPixels on 1.1522 + // desktop GL). 1.1523 + 1.1524 + // We will always call some form of writeTexturePixels and we will pass our flags on to it. 1.1525 + // Thus, we don't perform a flush here since that call will do it (if the kNoFlush flag isn't 1.1526 + // set.) 1.1527 + 1.1528 + // If the RT is also a texture and we don't have to premultiply then take the texture path. 1.1529 + // We expect to be at least as fast or faster since it doesn't use an intermediate texture as 1.1530 + // we do below. 1.1531 + 1.1532 +#if !defined(SK_BUILD_FOR_MAC) 1.1533 + // At least some drivers on the Mac get confused when glTexImage2D is called on a texture 1.1534 + // attached to an FBO. The FBO still sees the old image. TODO: determine what OS versions and/or 1.1535 + // HW is affected. 1.1536 + if (NULL != target->asTexture() && !(kUnpremul_PixelOpsFlag & flags) && 1.1537 + fGpu->canWriteTexturePixels(target->asTexture(), srcConfig)) { 1.1538 + return this->writeTexturePixels(target->asTexture(), 1.1539 + left, top, width, height, 1.1540 + srcConfig, buffer, rowBytes, flags); 1.1541 + } 1.1542 +#endif 1.1543 + 1.1544 + // We ignore the preferred config unless it is a R/B swap of the src config. In that case 1.1545 + // we will upload the original src data to a scratch texture but we will spoof it as the swapped 1.1546 + // config. This scratch will then have R and B swapped. We correct for this by swapping again 1.1547 + // when drawing the scratch to the dst using a conversion effect. 1.1548 + bool swapRAndB = false; 1.1549 + GrPixelConfig writeConfig = srcConfig; 1.1550 + if (GrPixelConfigSwapRAndB(srcConfig) == 1.1551 + fGpu->preferredWritePixelsConfig(srcConfig, target->config())) { 1.1552 + writeConfig = GrPixelConfigSwapRAndB(srcConfig); 1.1553 + swapRAndB = true; 1.1554 + } 1.1555 + 1.1556 + GrTextureDesc desc; 1.1557 + desc.fWidth = width; 1.1558 + desc.fHeight = height; 1.1559 + desc.fConfig = writeConfig; 1.1560 + GrAutoScratchTexture ast(this, desc); 1.1561 + GrTexture* texture = ast.texture(); 1.1562 + if (NULL == texture) { 1.1563 + return false; 1.1564 + } 1.1565 + 1.1566 + SkAutoTUnref<const GrEffectRef> effect; 1.1567 + SkMatrix textureMatrix; 1.1568 + textureMatrix.setIDiv(texture->width(), texture->height()); 1.1569 + 1.1570 + // allocate a tmp buffer and sw convert the pixels to premul 1.1571 + SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(0); 1.1572 + 1.1573 + if (kUnpremul_PixelOpsFlag & flags) { 1.1574 + if (!GrPixelConfigIs8888(srcConfig)) { 1.1575 + return false; 1.1576 + } 1.1577 + effect.reset(this->createUPMToPMEffect(texture, swapRAndB, textureMatrix)); 1.1578 + // handle the unpremul step on the CPU if we couldn't create an effect to do it. 1.1579 + if (NULL == effect) { 1.1580 + SkCanvas::Config8888 srcConfig8888, dstConfig8888; 1.1581 + SkDEBUGCODE(bool success = ) 1.1582 + grconfig_to_config8888(srcConfig, true, &srcConfig8888); 1.1583 + SkASSERT(success); 1.1584 + SkDEBUGCODE(success = ) 1.1585 + grconfig_to_config8888(srcConfig, false, &dstConfig8888); 1.1586 + SkASSERT(success); 1.1587 + const uint32_t* src = reinterpret_cast<const uint32_t*>(buffer); 1.1588 + tmpPixels.reset(width * height); 1.1589 + SkConvertConfig8888Pixels(tmpPixels.get(), 4 * width, dstConfig8888, 1.1590 + src, rowBytes, srcConfig8888, 1.1591 + width, height); 1.1592 + buffer = tmpPixels.get(); 1.1593 + rowBytes = 4 * width; 1.1594 + } 1.1595 + } 1.1596 + if (NULL == effect) { 1.1597 + effect.reset(GrConfigConversionEffect::Create(texture, 1.1598 + swapRAndB, 1.1599 + GrConfigConversionEffect::kNone_PMConversion, 1.1600 + textureMatrix)); 1.1601 + } 1.1602 + 1.1603 + if (!this->writeTexturePixels(texture, 1.1604 + 0, 0, width, height, 1.1605 + writeConfig, buffer, rowBytes, 1.1606 + flags & ~kUnpremul_PixelOpsFlag)) { 1.1607 + return false; 1.1608 + } 1.1609 + 1.1610 + // writeRenderTargetPixels can be called in the midst of drawing another 1.1611 + // object (e.g., when uploading a SW path rendering to the gpu while 1.1612 + // drawing a rect) so preserve the current geometry. 1.1613 + SkMatrix matrix; 1.1614 + matrix.setTranslate(SkIntToScalar(left), SkIntToScalar(top)); 1.1615 + GrDrawTarget::AutoGeometryAndStatePush agasp(fGpu, GrDrawTarget::kReset_ASRInit, &matrix); 1.1616 + GrDrawState* drawState = fGpu->drawState(); 1.1617 + SkASSERT(effect); 1.1618 + drawState->addColorEffect(effect); 1.1619 + 1.1620 + drawState->setRenderTarget(target); 1.1621 + 1.1622 + fGpu->drawSimpleRect(SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height)), NULL); 1.1623 + return true; 1.1624 +} 1.1625 +//////////////////////////////////////////////////////////////////////////////// 1.1626 + 1.1627 +GrDrawTarget* GrContext::prepareToDraw(const GrPaint* paint, 1.1628 + BufferedDraw buffered, 1.1629 + AutoRestoreEffects* are, 1.1630 + AutoCheckFlush* acf) { 1.1631 + // All users of this draw state should be freeing up all effects when they're done. 1.1632 + // Otherwise effects that own resources may keep those resources alive indefinitely. 1.1633 + SkASSERT(0 == fDrawState->numColorStages() && 0 == fDrawState->numCoverageStages()); 1.1634 + 1.1635 + if (kNo_BufferedDraw == buffered && kYes_BufferedDraw == fLastDrawWasBuffered) { 1.1636 + fDrawBuffer->flush(); 1.1637 + fLastDrawWasBuffered = kNo_BufferedDraw; 1.1638 + } 1.1639 + ASSERT_OWNED_RESOURCE(fRenderTarget.get()); 1.1640 + if (NULL != paint) { 1.1641 + SkASSERT(NULL != are); 1.1642 + SkASSERT(NULL != acf); 1.1643 + are->set(fDrawState); 1.1644 + fDrawState->setFromPaint(*paint, fViewMatrix, fRenderTarget.get()); 1.1645 +#if GR_DEBUG_PARTIAL_COVERAGE_CHECK 1.1646 + if ((paint->hasMask() || 0xff != paint->fCoverage) && 1.1647 + !fGpu->canApplyCoverage()) { 1.1648 + GrPrintf("Partial pixel coverage will be incorrectly blended.\n"); 1.1649 + } 1.1650 +#endif 1.1651 + } else { 1.1652 + fDrawState->reset(fViewMatrix); 1.1653 + fDrawState->setRenderTarget(fRenderTarget.get()); 1.1654 + } 1.1655 + GrDrawTarget* target; 1.1656 + if (kYes_BufferedDraw == buffered) { 1.1657 + fLastDrawWasBuffered = kYes_BufferedDraw; 1.1658 + target = fDrawBuffer; 1.1659 + } else { 1.1660 + SkASSERT(kNo_BufferedDraw == buffered); 1.1661 + fLastDrawWasBuffered = kNo_BufferedDraw; 1.1662 + target = fGpu; 1.1663 + } 1.1664 + fDrawState->setState(GrDrawState::kClip_StateBit, NULL != fClip && 1.1665 + !fClip->fClipStack->isWideOpen()); 1.1666 + target->setClip(fClip); 1.1667 + SkASSERT(fDrawState == target->drawState()); 1.1668 + return target; 1.1669 +} 1.1670 + 1.1671 +/* 1.1672 + * This method finds a path renderer that can draw the specified path on 1.1673 + * the provided target. 1.1674 + * Due to its expense, the software path renderer has split out so it can 1.1675 + * can be individually allowed/disallowed via the "allowSW" boolean. 1.1676 + */ 1.1677 +GrPathRenderer* GrContext::getPathRenderer(const SkPath& path, 1.1678 + const SkStrokeRec& stroke, 1.1679 + const GrDrawTarget* target, 1.1680 + bool allowSW, 1.1681 + GrPathRendererChain::DrawType drawType, 1.1682 + GrPathRendererChain::StencilSupport* stencilSupport) { 1.1683 + 1.1684 + if (NULL == fPathRendererChain) { 1.1685 + fPathRendererChain = SkNEW_ARGS(GrPathRendererChain, (this)); 1.1686 + } 1.1687 + 1.1688 + GrPathRenderer* pr = fPathRendererChain->getPathRenderer(path, 1.1689 + stroke, 1.1690 + target, 1.1691 + drawType, 1.1692 + stencilSupport); 1.1693 + 1.1694 + if (NULL == pr && allowSW) { 1.1695 + if (NULL == fSoftwarePathRenderer) { 1.1696 + fSoftwarePathRenderer = SkNEW_ARGS(GrSoftwarePathRenderer, (this)); 1.1697 + } 1.1698 + pr = fSoftwarePathRenderer; 1.1699 + } 1.1700 + 1.1701 + return pr; 1.1702 +} 1.1703 + 1.1704 +//////////////////////////////////////////////////////////////////////////////// 1.1705 +bool GrContext::isConfigRenderable(GrPixelConfig config, bool withMSAA) const { 1.1706 + return fGpu->caps()->isConfigRenderable(config, withMSAA); 1.1707 +} 1.1708 + 1.1709 +int GrContext::getRecommendedSampleCount(GrPixelConfig config, 1.1710 + SkScalar dpi) const { 1.1711 + if (!this->isConfigRenderable(config, true)) { 1.1712 + return 0; 1.1713 + } 1.1714 + int chosenSampleCount = 0; 1.1715 + if (fGpu->caps()->pathRenderingSupport()) { 1.1716 + if (dpi >= 250.0f) { 1.1717 + chosenSampleCount = 4; 1.1718 + } else { 1.1719 + chosenSampleCount = 16; 1.1720 + } 1.1721 + } 1.1722 + return chosenSampleCount <= fGpu->caps()->maxSampleCount() ? 1.1723 + chosenSampleCount : 0; 1.1724 +} 1.1725 + 1.1726 +void GrContext::setupDrawBuffer() { 1.1727 + SkASSERT(NULL == fDrawBuffer); 1.1728 + SkASSERT(NULL == fDrawBufferVBAllocPool); 1.1729 + SkASSERT(NULL == fDrawBufferIBAllocPool); 1.1730 + 1.1731 + fDrawBufferVBAllocPool = 1.1732 + SkNEW_ARGS(GrVertexBufferAllocPool, (fGpu, false, 1.1733 + DRAW_BUFFER_VBPOOL_BUFFER_SIZE, 1.1734 + DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS)); 1.1735 + fDrawBufferIBAllocPool = 1.1736 + SkNEW_ARGS(GrIndexBufferAllocPool, (fGpu, false, 1.1737 + DRAW_BUFFER_IBPOOL_BUFFER_SIZE, 1.1738 + DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS)); 1.1739 + 1.1740 + fDrawBuffer = SkNEW_ARGS(GrInOrderDrawBuffer, (fGpu, 1.1741 + fDrawBufferVBAllocPool, 1.1742 + fDrawBufferIBAllocPool)); 1.1743 + 1.1744 + fDrawBuffer->setDrawState(fDrawState); 1.1745 +} 1.1746 + 1.1747 +GrDrawTarget* GrContext::getTextTarget() { 1.1748 + return this->prepareToDraw(NULL, BUFFERED_DRAW, NULL, NULL); 1.1749 +} 1.1750 + 1.1751 +const GrIndexBuffer* GrContext::getQuadIndexBuffer() const { 1.1752 + return fGpu->getQuadIndexBuffer(); 1.1753 +} 1.1754 + 1.1755 +namespace { 1.1756 +void test_pm_conversions(GrContext* ctx, int* pmToUPMValue, int* upmToPMValue) { 1.1757 + GrConfigConversionEffect::PMConversion pmToUPM; 1.1758 + GrConfigConversionEffect::PMConversion upmToPM; 1.1759 + GrConfigConversionEffect::TestForPreservingPMConversions(ctx, &pmToUPM, &upmToPM); 1.1760 + *pmToUPMValue = pmToUPM; 1.1761 + *upmToPMValue = upmToPM; 1.1762 +} 1.1763 +} 1.1764 + 1.1765 +const GrEffectRef* GrContext::createPMToUPMEffect(GrTexture* texture, 1.1766 + bool swapRAndB, 1.1767 + const SkMatrix& matrix) { 1.1768 + if (!fDidTestPMConversions) { 1.1769 + test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion); 1.1770 + fDidTestPMConversions = true; 1.1771 + } 1.1772 + GrConfigConversionEffect::PMConversion pmToUPM = 1.1773 + static_cast<GrConfigConversionEffect::PMConversion>(fPMToUPMConversion); 1.1774 + if (GrConfigConversionEffect::kNone_PMConversion != pmToUPM) { 1.1775 + return GrConfigConversionEffect::Create(texture, swapRAndB, pmToUPM, matrix); 1.1776 + } else { 1.1777 + return NULL; 1.1778 + } 1.1779 +} 1.1780 + 1.1781 +const GrEffectRef* GrContext::createUPMToPMEffect(GrTexture* texture, 1.1782 + bool swapRAndB, 1.1783 + const SkMatrix& matrix) { 1.1784 + if (!fDidTestPMConversions) { 1.1785 + test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion); 1.1786 + fDidTestPMConversions = true; 1.1787 + } 1.1788 + GrConfigConversionEffect::PMConversion upmToPM = 1.1789 + static_cast<GrConfigConversionEffect::PMConversion>(fUPMToPMConversion); 1.1790 + if (GrConfigConversionEffect::kNone_PMConversion != upmToPM) { 1.1791 + return GrConfigConversionEffect::Create(texture, swapRAndB, upmToPM, matrix); 1.1792 + } else { 1.1793 + return NULL; 1.1794 + } 1.1795 +} 1.1796 + 1.1797 +GrPath* GrContext::createPath(const SkPath& inPath, const SkStrokeRec& stroke) { 1.1798 + SkASSERT(fGpu->caps()->pathRenderingSupport()); 1.1799 + 1.1800 + // TODO: now we add to fTextureCache. This should change to fResourceCache. 1.1801 + GrResourceKey resourceKey = GrPath::ComputeKey(inPath, stroke); 1.1802 + GrPath* path = static_cast<GrPath*>(fTextureCache->find(resourceKey)); 1.1803 + if (NULL != path && path->isEqualTo(inPath, stroke)) { 1.1804 + path->ref(); 1.1805 + } else { 1.1806 + path = fGpu->createPath(inPath, stroke); 1.1807 + fTextureCache->purgeAsNeeded(1, path->sizeInBytes()); 1.1808 + fTextureCache->addResource(resourceKey, path); 1.1809 + } 1.1810 + return path; 1.1811 +} 1.1812 + 1.1813 +/////////////////////////////////////////////////////////////////////////////// 1.1814 +#if GR_CACHE_STATS 1.1815 +void GrContext::printCacheStats() const { 1.1816 + fTextureCache->printStats(); 1.1817 +} 1.1818 +#endif