michael@0: michael@0: /* michael@0: * Copyright 2011 Google Inc. michael@0: * michael@0: * Use of this source code is governed by a BSD-style license that can be michael@0: * found in the LICENSE file. michael@0: */ michael@0: michael@0: michael@0: #include "GrContext.h" michael@0: michael@0: #include "effects/GrSingleTextureEffect.h" michael@0: #include "effects/GrConfigConversionEffect.h" michael@0: michael@0: #include "GrAARectRenderer.h" michael@0: #include "GrBufferAllocPool.h" michael@0: #include "GrGpu.h" michael@0: #include "GrDrawTargetCaps.h" michael@0: #include "GrIndexBuffer.h" michael@0: #include "GrInOrderDrawBuffer.h" michael@0: #include "GrOvalRenderer.h" michael@0: #include "GrPathRenderer.h" michael@0: #include "GrPathUtils.h" michael@0: #include "GrResourceCache.h" michael@0: #include "GrSoftwarePathRenderer.h" michael@0: #include "GrStencilBuffer.h" michael@0: #include "GrTextStrike.h" michael@0: #include "SkRTConf.h" michael@0: #include "SkRRect.h" michael@0: #include "SkStrokeRec.h" michael@0: #include "SkTLazy.h" michael@0: #include "SkTLS.h" michael@0: #include "SkTrace.h" michael@0: michael@0: // It can be useful to set this to false to test whether a bug is caused by using the michael@0: // InOrderDrawBuffer, to compare performance of using/not using InOrderDrawBuffer, or to make michael@0: // debugging simpler. michael@0: SK_CONF_DECLARE(bool, c_Defer, "gpu.deferContext", true, michael@0: "Defers rendering in GrContext via GrInOrderDrawBuffer."); michael@0: michael@0: #define BUFFERED_DRAW (c_Defer ? kYes_BufferedDraw : kNo_BufferedDraw) michael@0: michael@0: #ifdef SK_DEBUG michael@0: // change this to a 1 to see notifications when partial coverage fails michael@0: #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0 michael@0: #else michael@0: #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0 michael@0: #endif michael@0: michael@0: static const size_t MAX_RESOURCE_CACHE_COUNT = GR_DEFAULT_RESOURCE_CACHE_COUNT_LIMIT; michael@0: static const size_t MAX_RESOURCE_CACHE_BYTES = GR_DEFAULT_RESOURCE_CACHE_MB_LIMIT * 1024 * 1024; michael@0: michael@0: static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 15; michael@0: static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4; michael@0: michael@0: static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = 1 << 11; michael@0: static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = 4; michael@0: michael@0: #define ASSERT_OWNED_RESOURCE(R) SkASSERT(!(R) || (R)->getContext() == this) michael@0: michael@0: // Glorified typedef to avoid including GrDrawState.h in GrContext.h michael@0: class GrContext::AutoRestoreEffects : public GrDrawState::AutoRestoreEffects {}; michael@0: michael@0: class GrContext::AutoCheckFlush { michael@0: public: michael@0: AutoCheckFlush(GrContext* context) : fContext(context) { SkASSERT(NULL != context); } michael@0: michael@0: ~AutoCheckFlush() { michael@0: if (fContext->fFlushToReduceCacheSize) { michael@0: fContext->flush(); michael@0: } michael@0: } michael@0: michael@0: private: michael@0: GrContext* fContext; michael@0: }; michael@0: michael@0: GrContext* GrContext::Create(GrBackend backend, GrBackendContext backendContext) { michael@0: GrContext* context = SkNEW(GrContext); michael@0: if (context->init(backend, backendContext)) { michael@0: return context; michael@0: } else { michael@0: context->unref(); michael@0: return NULL; michael@0: } michael@0: } michael@0: michael@0: GrContext::GrContext() { michael@0: fDrawState = NULL; michael@0: fGpu = NULL; michael@0: fClip = NULL; michael@0: fPathRendererChain = NULL; michael@0: fSoftwarePathRenderer = NULL; michael@0: fTextureCache = NULL; michael@0: fFontCache = NULL; michael@0: fDrawBuffer = NULL; michael@0: fDrawBufferVBAllocPool = NULL; michael@0: fDrawBufferIBAllocPool = NULL; michael@0: fFlushToReduceCacheSize = false; michael@0: fAARectRenderer = NULL; michael@0: fOvalRenderer = NULL; michael@0: fViewMatrix.reset(); michael@0: fMaxTextureSizeOverride = 1 << 20; michael@0: } michael@0: michael@0: bool GrContext::init(GrBackend backend, GrBackendContext backendContext) { michael@0: SkASSERT(NULL == fGpu); michael@0: michael@0: fGpu = GrGpu::Create(backend, backendContext, this); michael@0: if (NULL == fGpu) { michael@0: return false; michael@0: } michael@0: michael@0: fDrawState = SkNEW(GrDrawState); michael@0: fGpu->setDrawState(fDrawState); michael@0: michael@0: fTextureCache = SkNEW_ARGS(GrResourceCache, michael@0: (MAX_RESOURCE_CACHE_COUNT, michael@0: MAX_RESOURCE_CACHE_BYTES)); michael@0: fTextureCache->setOverbudgetCallback(OverbudgetCB, this); michael@0: michael@0: fFontCache = SkNEW_ARGS(GrFontCache, (fGpu)); michael@0: michael@0: fLastDrawWasBuffered = kNo_BufferedDraw; michael@0: michael@0: fAARectRenderer = SkNEW(GrAARectRenderer); michael@0: fOvalRenderer = SkNEW(GrOvalRenderer); michael@0: michael@0: fDidTestPMConversions = false; michael@0: michael@0: this->setupDrawBuffer(); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: GrContext::~GrContext() { michael@0: if (NULL == fGpu) { michael@0: return; michael@0: } michael@0: michael@0: this->flush(); michael@0: michael@0: for (int i = 0; i < fCleanUpData.count(); ++i) { michael@0: (*fCleanUpData[i].fFunc)(this, fCleanUpData[i].fInfo); michael@0: } michael@0: michael@0: // Since the gpu can hold scratch textures, give it a chance to let go michael@0: // of them before freeing the texture cache michael@0: fGpu->purgeResources(); michael@0: michael@0: delete fTextureCache; michael@0: fTextureCache = NULL; michael@0: delete fFontCache; michael@0: delete fDrawBuffer; michael@0: delete fDrawBufferVBAllocPool; michael@0: delete fDrawBufferIBAllocPool; michael@0: michael@0: fAARectRenderer->unref(); michael@0: fOvalRenderer->unref(); michael@0: michael@0: fGpu->unref(); michael@0: SkSafeUnref(fPathRendererChain); michael@0: SkSafeUnref(fSoftwarePathRenderer); michael@0: fDrawState->unref(); michael@0: } michael@0: michael@0: void GrContext::contextLost() { michael@0: this->contextDestroyed(); michael@0: this->setupDrawBuffer(); michael@0: } michael@0: michael@0: void GrContext::contextDestroyed() { michael@0: // abandon first to so destructors michael@0: // don't try to free the resources in the API. michael@0: fGpu->abandonResources(); michael@0: michael@0: // a path renderer may be holding onto resources that michael@0: // are now unusable michael@0: SkSafeSetNull(fPathRendererChain); michael@0: SkSafeSetNull(fSoftwarePathRenderer); michael@0: michael@0: delete fDrawBuffer; michael@0: fDrawBuffer = NULL; michael@0: michael@0: delete fDrawBufferVBAllocPool; michael@0: fDrawBufferVBAllocPool = NULL; michael@0: michael@0: delete fDrawBufferIBAllocPool; michael@0: fDrawBufferIBAllocPool = NULL; michael@0: michael@0: fAARectRenderer->reset(); michael@0: fOvalRenderer->reset(); michael@0: michael@0: fTextureCache->purgeAllUnlocked(); michael@0: michael@0: fFontCache->freeAll(); michael@0: fGpu->markContextDirty(); michael@0: } michael@0: michael@0: void GrContext::resetContext(uint32_t state) { michael@0: fGpu->markContextDirty(state); michael@0: } michael@0: michael@0: void GrContext::freeGpuResources() { michael@0: this->flush(); michael@0: michael@0: fGpu->purgeResources(); michael@0: michael@0: fAARectRenderer->reset(); michael@0: fOvalRenderer->reset(); michael@0: michael@0: fTextureCache->purgeAllUnlocked(); michael@0: fFontCache->freeAll(); michael@0: // a path renderer may be holding onto resources michael@0: SkSafeSetNull(fPathRendererChain); michael@0: SkSafeSetNull(fSoftwarePathRenderer); michael@0: } michael@0: michael@0: size_t GrContext::getGpuTextureCacheBytes() const { michael@0: return fTextureCache->getCachedResourceBytes(); michael@0: } michael@0: michael@0: //////////////////////////////////////////////////////////////////////////////// michael@0: michael@0: GrTexture* GrContext::findAndRefTexture(const GrTextureDesc& desc, michael@0: const GrCacheID& cacheID, michael@0: const GrTextureParams* params) { michael@0: GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID); michael@0: GrResource* resource = fTextureCache->find(resourceKey); michael@0: SkSafeRef(resource); michael@0: return static_cast(resource); michael@0: } michael@0: michael@0: bool GrContext::isTextureInCache(const GrTextureDesc& desc, michael@0: const GrCacheID& cacheID, michael@0: const GrTextureParams* params) const { michael@0: GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID); michael@0: return fTextureCache->hasKey(resourceKey); michael@0: } michael@0: michael@0: void GrContext::addStencilBuffer(GrStencilBuffer* sb) { michael@0: ASSERT_OWNED_RESOURCE(sb); michael@0: michael@0: GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(sb->width(), michael@0: sb->height(), michael@0: sb->numSamples()); michael@0: fTextureCache->addResource(resourceKey, sb); michael@0: } michael@0: michael@0: GrStencilBuffer* GrContext::findStencilBuffer(int width, int height, michael@0: int sampleCnt) { michael@0: GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(width, michael@0: height, michael@0: sampleCnt); michael@0: GrResource* resource = fTextureCache->find(resourceKey); michael@0: return static_cast(resource); michael@0: } michael@0: michael@0: static void stretchImage(void* dst, michael@0: int dstW, michael@0: int dstH, michael@0: void* src, michael@0: int srcW, michael@0: int srcH, michael@0: size_t bpp) { michael@0: GrFixed dx = (srcW << 16) / dstW; michael@0: GrFixed dy = (srcH << 16) / dstH; michael@0: michael@0: GrFixed y = dy >> 1; michael@0: michael@0: size_t dstXLimit = dstW*bpp; michael@0: for (int j = 0; j < dstH; ++j) { michael@0: GrFixed x = dx >> 1; michael@0: void* srcRow = (uint8_t*)src + (y>>16)*srcW*bpp; michael@0: void* dstRow = (uint8_t*)dst + j*dstW*bpp; michael@0: for (size_t i = 0; i < dstXLimit; i += bpp) { michael@0: memcpy((uint8_t*) dstRow + i, michael@0: (uint8_t*) srcRow + (x>>16)*bpp, michael@0: bpp); michael@0: x += dx; michael@0: } michael@0: y += dy; michael@0: } michael@0: } michael@0: michael@0: namespace { michael@0: michael@0: // position + local coordinate michael@0: extern const GrVertexAttrib gVertexAttribs[] = { michael@0: {kVec2f_GrVertexAttribType, 0, kPosition_GrVertexAttribBinding}, michael@0: {kVec2f_GrVertexAttribType, sizeof(GrPoint), kLocalCoord_GrVertexAttribBinding} michael@0: }; michael@0: michael@0: }; michael@0: michael@0: // The desired texture is NPOT and tiled but that isn't supported by michael@0: // the current hardware. Resize the texture to be a POT michael@0: GrTexture* GrContext::createResizedTexture(const GrTextureDesc& desc, michael@0: const GrCacheID& cacheID, michael@0: void* srcData, michael@0: size_t rowBytes, michael@0: bool filter) { michael@0: SkAutoTUnref clampedTexture(this->findAndRefTexture(desc, cacheID, NULL)); michael@0: if (NULL == clampedTexture) { michael@0: clampedTexture.reset(this->createTexture(NULL, desc, cacheID, srcData, rowBytes)); michael@0: michael@0: if (NULL == clampedTexture) { michael@0: return NULL; michael@0: } michael@0: } michael@0: michael@0: GrTextureDesc rtDesc = desc; michael@0: rtDesc.fFlags = rtDesc.fFlags | michael@0: kRenderTarget_GrTextureFlagBit | michael@0: kNoStencil_GrTextureFlagBit; michael@0: rtDesc.fWidth = GrNextPow2(desc.fWidth); michael@0: rtDesc.fHeight = GrNextPow2(desc.fHeight); michael@0: michael@0: GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0); michael@0: michael@0: if (NULL != texture) { michael@0: GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit); michael@0: GrDrawState* drawState = fGpu->drawState(); michael@0: drawState->setRenderTarget(texture->asRenderTarget()); michael@0: michael@0: // if filtering is not desired then we want to ensure all michael@0: // texels in the resampled image are copies of texels from michael@0: // the original. michael@0: GrTextureParams params(SkShader::kClamp_TileMode, filter ? GrTextureParams::kBilerp_FilterMode : michael@0: GrTextureParams::kNone_FilterMode); michael@0: drawState->addColorTextureEffect(clampedTexture, SkMatrix::I(), params); michael@0: michael@0: drawState->setVertexAttribs(SK_ARRAY_COUNT(gVertexAttribs)); michael@0: michael@0: GrDrawTarget::AutoReleaseGeometry arg(fGpu, 4, 0); michael@0: michael@0: if (arg.succeeded()) { michael@0: GrPoint* verts = (GrPoint*) arg.vertices(); michael@0: verts[0].setIRectFan(0, 0, texture->width(), texture->height(), 2 * sizeof(GrPoint)); michael@0: verts[1].setIRectFan(0, 0, 1, 1, 2 * sizeof(GrPoint)); michael@0: fGpu->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, 4); michael@0: } michael@0: } else { michael@0: // TODO: Our CPU stretch doesn't filter. But we create separate michael@0: // stretched textures when the texture params is either filtered or michael@0: // not. Either implement filtered stretch blit on CPU or just create michael@0: // one when FBO case fails. michael@0: michael@0: rtDesc.fFlags = kNone_GrTextureFlags; michael@0: // no longer need to clamp at min RT size. michael@0: rtDesc.fWidth = GrNextPow2(desc.fWidth); michael@0: rtDesc.fHeight = GrNextPow2(desc.fHeight); michael@0: size_t bpp = GrBytesPerPixel(desc.fConfig); michael@0: SkAutoSMalloc<128*128*4> stretchedPixels(bpp * rtDesc.fWidth * rtDesc.fHeight); michael@0: stretchImage(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight, michael@0: srcData, desc.fWidth, desc.fHeight, bpp); michael@0: michael@0: size_t stretchedRowBytes = rtDesc.fWidth * bpp; michael@0: michael@0: SkDEBUGCODE(GrTexture* texture = )fGpu->createTexture(rtDesc, stretchedPixels.get(), michael@0: stretchedRowBytes); michael@0: SkASSERT(NULL != texture); michael@0: } michael@0: michael@0: return texture; michael@0: } michael@0: michael@0: GrTexture* GrContext::createTexture(const GrTextureParams* params, michael@0: const GrTextureDesc& desc, michael@0: const GrCacheID& cacheID, michael@0: void* srcData, michael@0: size_t rowBytes, michael@0: GrResourceKey* cacheKey) { michael@0: SK_TRACE_EVENT0("GrContext::createTexture"); michael@0: michael@0: GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID); michael@0: michael@0: GrTexture* texture; michael@0: if (GrTexture::NeedsResizing(resourceKey)) { michael@0: texture = this->createResizedTexture(desc, cacheID, michael@0: srcData, rowBytes, michael@0: GrTexture::NeedsBilerp(resourceKey)); michael@0: } else { michael@0: texture= fGpu->createTexture(desc, srcData, rowBytes); michael@0: } michael@0: michael@0: if (NULL != texture) { michael@0: // Adding a resource could put us overbudget. Try to free up the michael@0: // necessary space before adding it. michael@0: fTextureCache->purgeAsNeeded(1, texture->sizeInBytes()); michael@0: fTextureCache->addResource(resourceKey, texture); michael@0: michael@0: if (NULL != cacheKey) { michael@0: *cacheKey = resourceKey; michael@0: } michael@0: } michael@0: michael@0: return texture; michael@0: } michael@0: michael@0: static GrTexture* create_scratch_texture(GrGpu* gpu, michael@0: GrResourceCache* textureCache, michael@0: const GrTextureDesc& desc) { michael@0: GrTexture* texture = gpu->createTexture(desc, NULL, 0); michael@0: if (NULL != texture) { michael@0: GrResourceKey key = GrTexture::ComputeScratchKey(texture->desc()); michael@0: // Adding a resource could put us overbudget. Try to free up the michael@0: // necessary space before adding it. michael@0: textureCache->purgeAsNeeded(1, texture->sizeInBytes()); michael@0: // Make the resource exclusive so future 'find' calls don't return it michael@0: textureCache->addResource(key, texture, GrResourceCache::kHide_OwnershipFlag); michael@0: } michael@0: return texture; michael@0: } michael@0: michael@0: GrTexture* GrContext::lockAndRefScratchTexture(const GrTextureDesc& inDesc, ScratchTexMatch match) { michael@0: michael@0: SkASSERT((inDesc.fFlags & kRenderTarget_GrTextureFlagBit) || michael@0: !(inDesc.fFlags & kNoStencil_GrTextureFlagBit)); michael@0: michael@0: // Renderable A8 targets are not universally supported (e.g., not on ANGLE) michael@0: SkASSERT(this->isConfigRenderable(kAlpha_8_GrPixelConfig, inDesc.fSampleCnt > 0) || michael@0: !(inDesc.fFlags & kRenderTarget_GrTextureFlagBit) || michael@0: (inDesc.fConfig != kAlpha_8_GrPixelConfig)); michael@0: michael@0: if (!fGpu->caps()->reuseScratchTextures() && michael@0: !(inDesc.fFlags & kRenderTarget_GrTextureFlagBit)) { michael@0: // If we're never recycling this texture we can always make it the right size michael@0: return create_scratch_texture(fGpu, fTextureCache, inDesc); michael@0: } michael@0: michael@0: GrTextureDesc desc = inDesc; michael@0: michael@0: if (kApprox_ScratchTexMatch == match) { michael@0: // bin by pow2 with a reasonable min michael@0: static const int MIN_SIZE = 16; michael@0: desc.fWidth = GrMax(MIN_SIZE, GrNextPow2(desc.fWidth)); michael@0: desc.fHeight = GrMax(MIN_SIZE, GrNextPow2(desc.fHeight)); michael@0: } michael@0: michael@0: GrResource* resource = NULL; michael@0: int origWidth = desc.fWidth; michael@0: int origHeight = desc.fHeight; michael@0: michael@0: do { michael@0: GrResourceKey key = GrTexture::ComputeScratchKey(desc); michael@0: // Ensure we have exclusive access to the texture so future 'find' calls don't return it michael@0: resource = fTextureCache->find(key, GrResourceCache::kHide_OwnershipFlag); michael@0: if (NULL != resource) { michael@0: resource->ref(); michael@0: break; michael@0: } michael@0: if (kExact_ScratchTexMatch == match) { michael@0: break; michael@0: } michael@0: // We had a cache miss and we are in approx mode, relax the fit of the flags. michael@0: michael@0: // We no longer try to reuse textures that were previously used as render targets in michael@0: // situations where no RT is needed; doing otherwise can confuse the video driver and michael@0: // cause significant performance problems in some cases. michael@0: if (desc.fFlags & kNoStencil_GrTextureFlagBit) { michael@0: desc.fFlags = desc.fFlags & ~kNoStencil_GrTextureFlagBit; michael@0: } else { michael@0: break; michael@0: } michael@0: michael@0: } while (true); michael@0: michael@0: if (NULL == resource) { michael@0: desc.fFlags = inDesc.fFlags; michael@0: desc.fWidth = origWidth; michael@0: desc.fHeight = origHeight; michael@0: resource = create_scratch_texture(fGpu, fTextureCache, desc); michael@0: } michael@0: michael@0: return static_cast(resource); michael@0: } michael@0: michael@0: void GrContext::addExistingTextureToCache(GrTexture* texture) { michael@0: michael@0: if (NULL == texture) { michael@0: return; michael@0: } michael@0: michael@0: // This texture should already have a cache entry since it was once michael@0: // attached michael@0: SkASSERT(NULL != texture->getCacheEntry()); michael@0: michael@0: // Conceptually, the cache entry is going to assume responsibility michael@0: // for the creation ref. Assert refcnt == 1. michael@0: SkASSERT(texture->unique()); michael@0: michael@0: if (fGpu->caps()->reuseScratchTextures() || NULL != texture->asRenderTarget()) { michael@0: // Since this texture came from an AutoScratchTexture it should michael@0: // still be in the exclusive pile. Recycle it. michael@0: fTextureCache->makeNonExclusive(texture->getCacheEntry()); michael@0: this->purgeCache(); michael@0: } else if (texture->getDeferredRefCount() <= 0) { michael@0: // When we aren't reusing textures we know this scratch texture michael@0: // will never be reused and would be just wasting time in the cache michael@0: fTextureCache->makeNonExclusive(texture->getCacheEntry()); michael@0: fTextureCache->deleteResource(texture->getCacheEntry()); michael@0: } else { michael@0: // In this case (fDeferredRefCount > 0) but the cache is the only michael@0: // one holding a real ref. Mark the object so when the deferred michael@0: // ref count goes to 0 the texture will be deleted (remember michael@0: // in this code path scratch textures aren't getting reused). michael@0: texture->setNeedsDeferredUnref(); michael@0: } michael@0: } michael@0: michael@0: michael@0: void GrContext::unlockScratchTexture(GrTexture* texture) { michael@0: ASSERT_OWNED_RESOURCE(texture); michael@0: SkASSERT(NULL != texture->getCacheEntry()); michael@0: michael@0: // If this is a scratch texture we detached it from the cache michael@0: // while it was locked (to avoid two callers simultaneously getting michael@0: // the same texture). michael@0: if (texture->getCacheEntry()->key().isScratch()) { michael@0: if (fGpu->caps()->reuseScratchTextures() || NULL != texture->asRenderTarget()) { michael@0: fTextureCache->makeNonExclusive(texture->getCacheEntry()); michael@0: this->purgeCache(); michael@0: } else if (texture->unique() && texture->getDeferredRefCount() <= 0) { michael@0: // Only the cache now knows about this texture. Since we're never michael@0: // reusing scratch textures (in this code path) it would just be michael@0: // wasting time sitting in the cache. michael@0: fTextureCache->makeNonExclusive(texture->getCacheEntry()); michael@0: fTextureCache->deleteResource(texture->getCacheEntry()); michael@0: } else { michael@0: // In this case (fRefCnt > 1 || defRefCnt > 0) but we don't really michael@0: // want to readd it to the cache (since it will never be reused). michael@0: // Instead, give up the cache's ref and leave the decision up to michael@0: // addExistingTextureToCache once its ref count reaches 0. For michael@0: // this to work we need to leave it in the exclusive list. michael@0: texture->setFlag((GrTextureFlags) GrTexture::kReturnToCache_FlagBit); michael@0: // Give up the cache's ref to the texture michael@0: texture->unref(); michael@0: } michael@0: } michael@0: } michael@0: michael@0: void GrContext::purgeCache() { michael@0: if (NULL != fTextureCache) { michael@0: fTextureCache->purgeAsNeeded(); michael@0: } michael@0: } michael@0: michael@0: bool GrContext::OverbudgetCB(void* data) { michael@0: SkASSERT(NULL != data); michael@0: michael@0: GrContext* context = reinterpret_cast(data); michael@0: michael@0: // Flush the InOrderDrawBuffer to possibly free up some textures michael@0: context->fFlushToReduceCacheSize = true; michael@0: michael@0: return true; michael@0: } michael@0: michael@0: michael@0: GrTexture* GrContext::createUncachedTexture(const GrTextureDesc& descIn, michael@0: void* srcData, michael@0: size_t rowBytes) { michael@0: GrTextureDesc descCopy = descIn; michael@0: return fGpu->createTexture(descCopy, srcData, rowBytes); michael@0: } michael@0: michael@0: void GrContext::getTextureCacheLimits(int* maxTextures, michael@0: size_t* maxTextureBytes) const { michael@0: fTextureCache->getLimits(maxTextures, maxTextureBytes); michael@0: } michael@0: michael@0: void GrContext::setTextureCacheLimits(int maxTextures, size_t maxTextureBytes) { michael@0: fTextureCache->setLimits(maxTextures, maxTextureBytes); michael@0: } michael@0: michael@0: int GrContext::getMaxTextureSize() const { michael@0: return GrMin(fGpu->caps()->maxTextureSize(), fMaxTextureSizeOverride); michael@0: } michael@0: michael@0: int GrContext::getMaxRenderTargetSize() const { michael@0: return fGpu->caps()->maxRenderTargetSize(); michael@0: } michael@0: michael@0: int GrContext::getMaxSampleCount() const { michael@0: return fGpu->caps()->maxSampleCount(); michael@0: } michael@0: michael@0: /////////////////////////////////////////////////////////////////////////////// michael@0: michael@0: GrTexture* GrContext::wrapBackendTexture(const GrBackendTextureDesc& desc) { michael@0: return fGpu->wrapBackendTexture(desc); michael@0: } michael@0: michael@0: GrRenderTarget* GrContext::wrapBackendRenderTarget(const GrBackendRenderTargetDesc& desc) { michael@0: return fGpu->wrapBackendRenderTarget(desc); michael@0: } michael@0: michael@0: /////////////////////////////////////////////////////////////////////////////// michael@0: michael@0: bool GrContext::supportsIndex8PixelConfig(const GrTextureParams* params, michael@0: int width, int height) const { michael@0: const GrDrawTargetCaps* caps = fGpu->caps(); michael@0: if (!caps->eightBitPaletteSupport()) { michael@0: return false; michael@0: } michael@0: michael@0: bool isPow2 = GrIsPow2(width) && GrIsPow2(height); michael@0: michael@0: if (!isPow2) { michael@0: bool tiled = NULL != params && params->isTiled(); michael@0: if (tiled && !caps->npotTextureTileSupport()) { michael@0: return false; michael@0: } michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: michael@0: //////////////////////////////////////////////////////////////////////////////// michael@0: michael@0: void GrContext::clear(const SkIRect* rect, michael@0: const GrColor color, michael@0: bool canIgnoreRect, michael@0: GrRenderTarget* target) { michael@0: AutoRestoreEffects are; michael@0: AutoCheckFlush acf(this); michael@0: this->prepareToDraw(NULL, BUFFERED_DRAW, &are, &acf)->clear(rect, color, michael@0: canIgnoreRect, target); michael@0: } michael@0: michael@0: void GrContext::drawPaint(const GrPaint& origPaint) { michael@0: // set rect to be big enough to fill the space, but not super-huge, so we michael@0: // don't overflow fixed-point implementations michael@0: SkRect r; michael@0: r.setLTRB(0, 0, michael@0: SkIntToScalar(getRenderTarget()->width()), michael@0: SkIntToScalar(getRenderTarget()->height())); michael@0: SkMatrix inverse; michael@0: SkTCopyOnFirstWrite paint(origPaint); michael@0: AutoMatrix am; michael@0: michael@0: // We attempt to map r by the inverse matrix and draw that. mapRect will michael@0: // map the four corners and bound them with a new rect. This will not michael@0: // produce a correct result for some perspective matrices. michael@0: if (!this->getMatrix().hasPerspective()) { michael@0: if (!fViewMatrix.invert(&inverse)) { michael@0: GrPrintf("Could not invert matrix\n"); michael@0: return; michael@0: } michael@0: inverse.mapRect(&r); michael@0: } else { michael@0: if (!am.setIdentity(this, paint.writable())) { michael@0: GrPrintf("Could not invert matrix\n"); michael@0: return; michael@0: } michael@0: } michael@0: // by definition this fills the entire clip, no need for AA michael@0: if (paint->isAntiAlias()) { michael@0: paint.writable()->setAntiAlias(false); michael@0: } michael@0: this->drawRect(*paint, r); michael@0: } michael@0: michael@0: #ifdef SK_DEVELOPER michael@0: void GrContext::dumpFontCache() const { michael@0: fFontCache->dump(); michael@0: } michael@0: #endif michael@0: michael@0: //////////////////////////////////////////////////////////////////////////////// michael@0: michael@0: /* create a triangle strip that strokes the specified triangle. There are 8 michael@0: unique vertices, but we repreat the last 2 to close up. Alternatively we michael@0: could use an indices array, and then only send 8 verts, but not sure that michael@0: would be faster. michael@0: */ michael@0: static void setStrokeRectStrip(GrPoint verts[10], SkRect rect, michael@0: SkScalar width) { michael@0: const SkScalar rad = SkScalarHalf(width); michael@0: rect.sort(); michael@0: michael@0: verts[0].set(rect.fLeft + rad, rect.fTop + rad); michael@0: verts[1].set(rect.fLeft - rad, rect.fTop - rad); michael@0: verts[2].set(rect.fRight - rad, rect.fTop + rad); michael@0: verts[3].set(rect.fRight + rad, rect.fTop - rad); michael@0: verts[4].set(rect.fRight - rad, rect.fBottom - rad); michael@0: verts[5].set(rect.fRight + rad, rect.fBottom + rad); michael@0: verts[6].set(rect.fLeft + rad, rect.fBottom - rad); michael@0: verts[7].set(rect.fLeft - rad, rect.fBottom + rad); michael@0: verts[8] = verts[0]; michael@0: verts[9] = verts[1]; michael@0: } michael@0: michael@0: static bool isIRect(const SkRect& r) { michael@0: return SkScalarIsInt(r.fLeft) && SkScalarIsInt(r.fTop) && michael@0: SkScalarIsInt(r.fRight) && SkScalarIsInt(r.fBottom); michael@0: } michael@0: michael@0: static bool apply_aa_to_rect(GrDrawTarget* target, michael@0: const SkRect& rect, michael@0: SkScalar strokeWidth, michael@0: const SkMatrix& combinedMatrix, michael@0: SkRect* devBoundRect, michael@0: bool* useVertexCoverage) { michael@0: // we use a simple coverage ramp to do aa on axis-aligned rects michael@0: // we check if the rect will be axis-aligned, and the rect won't land on michael@0: // integer coords. michael@0: michael@0: // we are keeping around the "tweak the alpha" trick because michael@0: // it is our only hope for the fixed-pipe implementation. michael@0: // In a shader implementation we can give a separate coverage input michael@0: // TODO: remove this ugliness when we drop the fixed-pipe impl michael@0: *useVertexCoverage = false; michael@0: if (!target->getDrawState().canTweakAlphaForCoverage()) { michael@0: if (target->shouldDisableCoverageAAForBlend()) { michael@0: #ifdef SK_DEBUG michael@0: //GrPrintf("Turning off AA to correctly apply blend.\n"); michael@0: #endif michael@0: return false; michael@0: } else { michael@0: *useVertexCoverage = true; michael@0: } michael@0: } michael@0: const GrDrawState& drawState = target->getDrawState(); michael@0: if (drawState.getRenderTarget()->isMultisampled()) { michael@0: return false; michael@0: } michael@0: michael@0: if (0 == strokeWidth && target->willUseHWAALines()) { michael@0: return false; michael@0: } michael@0: michael@0: #if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT) michael@0: if (strokeWidth >= 0) { michael@0: #endif michael@0: if (!combinedMatrix.preservesAxisAlignment()) { michael@0: return false; michael@0: } michael@0: michael@0: #if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT) michael@0: } else { michael@0: if (!combinedMatrix.preservesRightAngles()) { michael@0: return false; michael@0: } michael@0: } michael@0: #endif michael@0: michael@0: combinedMatrix.mapRect(devBoundRect, rect); michael@0: michael@0: if (strokeWidth < 0) { michael@0: return !isIRect(*devBoundRect); michael@0: } else { michael@0: return true; michael@0: } michael@0: } michael@0: michael@0: static inline bool rect_contains_inclusive(const SkRect& rect, const SkPoint& point) { michael@0: return point.fX >= rect.fLeft && point.fX <= rect.fRight && michael@0: point.fY >= rect.fTop && point.fY <= rect.fBottom; michael@0: } michael@0: michael@0: void GrContext::drawRect(const GrPaint& paint, michael@0: const SkRect& rect, michael@0: const SkStrokeRec* stroke, michael@0: const SkMatrix* matrix) { michael@0: SK_TRACE_EVENT0("GrContext::drawRect"); michael@0: michael@0: AutoRestoreEffects are; michael@0: AutoCheckFlush acf(this); michael@0: GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf); michael@0: michael@0: SkScalar width = stroke == NULL ? -1 : stroke->getWidth(); michael@0: SkMatrix combinedMatrix = target->drawState()->getViewMatrix(); michael@0: if (NULL != matrix) { michael@0: combinedMatrix.preConcat(*matrix); michael@0: } michael@0: michael@0: // Check if this is a full RT draw and can be replaced with a clear. We don't bother checking michael@0: // cases where the RT is fully inside a stroke. michael@0: if (width < 0) { michael@0: SkRect rtRect; michael@0: target->getDrawState().getRenderTarget()->getBoundsRect(&rtRect); michael@0: SkRect clipSpaceRTRect = rtRect; michael@0: bool checkClip = false; michael@0: if (NULL != this->getClip()) { michael@0: checkClip = true; michael@0: clipSpaceRTRect.offset(SkIntToScalar(this->getClip()->fOrigin.fX), michael@0: SkIntToScalar(this->getClip()->fOrigin.fY)); michael@0: } michael@0: // Does the clip contain the entire RT? michael@0: if (!checkClip || target->getClip()->fClipStack->quickContains(clipSpaceRTRect)) { michael@0: SkMatrix invM; michael@0: if (!combinedMatrix.invert(&invM)) { michael@0: return; michael@0: } michael@0: // Does the rect bound the RT? michael@0: SkPoint srcSpaceRTQuad[4]; michael@0: invM.mapRectToQuad(srcSpaceRTQuad, rtRect); michael@0: if (rect_contains_inclusive(rect, srcSpaceRTQuad[0]) && michael@0: rect_contains_inclusive(rect, srcSpaceRTQuad[1]) && michael@0: rect_contains_inclusive(rect, srcSpaceRTQuad[2]) && michael@0: rect_contains_inclusive(rect, srcSpaceRTQuad[3])) { michael@0: // Will it blend? michael@0: GrColor clearColor; michael@0: if (paint.isOpaqueAndConstantColor(&clearColor)) { michael@0: target->clear(NULL, clearColor, true); michael@0: return; michael@0: } michael@0: } michael@0: } michael@0: } michael@0: michael@0: SkRect devBoundRect; michael@0: bool useVertexCoverage; michael@0: bool needAA = paint.isAntiAlias() && michael@0: !target->getDrawState().getRenderTarget()->isMultisampled(); michael@0: bool doAA = needAA && apply_aa_to_rect(target, rect, width, combinedMatrix, &devBoundRect, michael@0: &useVertexCoverage); michael@0: if (doAA) { michael@0: GrDrawState::AutoViewMatrixRestore avmr; michael@0: if (!avmr.setIdentity(target->drawState())) { michael@0: return; michael@0: } michael@0: if (width >= 0) { michael@0: fAARectRenderer->strokeAARect(this->getGpu(), target, rect, michael@0: combinedMatrix, devBoundRect, michael@0: stroke, useVertexCoverage); michael@0: } else { michael@0: // filled AA rect michael@0: fAARectRenderer->fillAARect(this->getGpu(), target, michael@0: rect, combinedMatrix, devBoundRect, michael@0: useVertexCoverage); michael@0: } michael@0: return; michael@0: } michael@0: michael@0: if (width >= 0) { michael@0: // TODO: consider making static vertex buffers for these cases. michael@0: // Hairline could be done by just adding closing vertex to michael@0: // unitSquareVertexBuffer() michael@0: michael@0: static const int worstCaseVertCount = 10; michael@0: target->drawState()->setDefaultVertexAttribs(); michael@0: GrDrawTarget::AutoReleaseGeometry geo(target, worstCaseVertCount, 0); michael@0: michael@0: if (!geo.succeeded()) { michael@0: GrPrintf("Failed to get space for vertices!\n"); michael@0: return; michael@0: } michael@0: michael@0: GrPrimitiveType primType; michael@0: int vertCount; michael@0: GrPoint* vertex = geo.positions(); michael@0: michael@0: if (width > 0) { michael@0: vertCount = 10; michael@0: primType = kTriangleStrip_GrPrimitiveType; michael@0: setStrokeRectStrip(vertex, rect, width); michael@0: } else { michael@0: // hairline michael@0: vertCount = 5; michael@0: primType = kLineStrip_GrPrimitiveType; michael@0: vertex[0].set(rect.fLeft, rect.fTop); michael@0: vertex[1].set(rect.fRight, rect.fTop); michael@0: vertex[2].set(rect.fRight, rect.fBottom); michael@0: vertex[3].set(rect.fLeft, rect.fBottom); michael@0: vertex[4].set(rect.fLeft, rect.fTop); michael@0: } michael@0: michael@0: GrDrawState::AutoViewMatrixRestore avmr; michael@0: if (NULL != matrix) { michael@0: GrDrawState* drawState = target->drawState(); michael@0: avmr.set(drawState, *matrix); michael@0: } michael@0: michael@0: target->drawNonIndexed(primType, 0, vertCount); michael@0: } else { michael@0: // filled BW rect michael@0: target->drawSimpleRect(rect, matrix); michael@0: } michael@0: } michael@0: michael@0: void GrContext::drawRectToRect(const GrPaint& paint, michael@0: const SkRect& dstRect, michael@0: const SkRect& localRect, michael@0: const SkMatrix* dstMatrix, michael@0: const SkMatrix* localMatrix) { michael@0: SK_TRACE_EVENT0("GrContext::drawRectToRect"); michael@0: AutoRestoreEffects are; michael@0: AutoCheckFlush acf(this); michael@0: GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf); michael@0: michael@0: target->drawRect(dstRect, dstMatrix, &localRect, localMatrix); michael@0: } michael@0: michael@0: namespace { michael@0: michael@0: extern const GrVertexAttrib gPosUVColorAttribs[] = { michael@0: {kVec2f_GrVertexAttribType, 0, kPosition_GrVertexAttribBinding }, michael@0: {kVec2f_GrVertexAttribType, sizeof(GrPoint), kLocalCoord_GrVertexAttribBinding }, michael@0: {kVec4ub_GrVertexAttribType, 2*sizeof(GrPoint), kColor_GrVertexAttribBinding} michael@0: }; michael@0: michael@0: extern const GrVertexAttrib gPosColorAttribs[] = { michael@0: {kVec2f_GrVertexAttribType, 0, kPosition_GrVertexAttribBinding}, michael@0: {kVec4ub_GrVertexAttribType, sizeof(GrPoint), kColor_GrVertexAttribBinding}, michael@0: }; michael@0: michael@0: static void set_vertex_attributes(GrDrawState* drawState, michael@0: const GrPoint* texCoords, michael@0: const GrColor* colors, michael@0: int* colorOffset, michael@0: int* texOffset) { michael@0: *texOffset = -1; michael@0: *colorOffset = -1; michael@0: michael@0: if (NULL != texCoords && NULL != colors) { michael@0: *texOffset = sizeof(GrPoint); michael@0: *colorOffset = 2*sizeof(GrPoint); michael@0: drawState->setVertexAttribs(3); michael@0: } else if (NULL != texCoords) { michael@0: *texOffset = sizeof(GrPoint); michael@0: drawState->setVertexAttribs(2); michael@0: } else if (NULL != colors) { michael@0: *colorOffset = sizeof(GrPoint); michael@0: drawState->setVertexAttribs(2); michael@0: } else { michael@0: drawState->setVertexAttribs(1); michael@0: } michael@0: } michael@0: michael@0: }; michael@0: michael@0: void GrContext::drawVertices(const GrPaint& paint, michael@0: GrPrimitiveType primitiveType, michael@0: int vertexCount, michael@0: const GrPoint positions[], michael@0: const GrPoint texCoords[], michael@0: const GrColor colors[], michael@0: const uint16_t indices[], michael@0: int indexCount) { michael@0: SK_TRACE_EVENT0("GrContext::drawVertices"); michael@0: michael@0: AutoRestoreEffects are; michael@0: AutoCheckFlush acf(this); michael@0: GrDrawTarget::AutoReleaseGeometry geo; // must be inside AutoCheckFlush scope michael@0: michael@0: GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf); michael@0: michael@0: GrDrawState* drawState = target->drawState(); michael@0: michael@0: int colorOffset = -1, texOffset = -1; michael@0: set_vertex_attributes(drawState, texCoords, colors, &colorOffset, &texOffset); michael@0: michael@0: size_t vertexSize = drawState->getVertexSize(); michael@0: if (sizeof(GrPoint) != vertexSize) { michael@0: if (!geo.set(target, vertexCount, 0)) { michael@0: GrPrintf("Failed to get space for vertices!\n"); michael@0: return; michael@0: } michael@0: void* curVertex = geo.vertices(); michael@0: michael@0: for (int i = 0; i < vertexCount; ++i) { michael@0: *((GrPoint*)curVertex) = positions[i]; michael@0: michael@0: if (texOffset >= 0) { michael@0: *(GrPoint*)((intptr_t)curVertex + texOffset) = texCoords[i]; michael@0: } michael@0: if (colorOffset >= 0) { michael@0: *(GrColor*)((intptr_t)curVertex + colorOffset) = colors[i]; michael@0: } michael@0: curVertex = (void*)((intptr_t)curVertex + vertexSize); michael@0: } michael@0: } else { michael@0: target->setVertexSourceToArray(positions, vertexCount); michael@0: } michael@0: michael@0: // we don't currently apply offscreen AA to this path. Need improved michael@0: // management of GrDrawTarget's geometry to avoid copying points per-tile. michael@0: michael@0: if (NULL != indices) { michael@0: target->setIndexSourceToArray(indices, indexCount); michael@0: target->drawIndexed(primitiveType, 0, 0, vertexCount, indexCount); michael@0: target->resetIndexSource(); michael@0: } else { michael@0: target->drawNonIndexed(primitiveType, 0, vertexCount); michael@0: } michael@0: } michael@0: michael@0: /////////////////////////////////////////////////////////////////////////////// michael@0: michael@0: void GrContext::drawRRect(const GrPaint& paint, michael@0: const SkRRect& rect, michael@0: const SkStrokeRec& stroke) { michael@0: if (rect.isEmpty()) { michael@0: return; michael@0: } michael@0: michael@0: AutoRestoreEffects are; michael@0: AutoCheckFlush acf(this); michael@0: GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf); michael@0: michael@0: if (!fOvalRenderer->drawSimpleRRect(target, this, paint.isAntiAlias(), rect, stroke)) { michael@0: SkPath path; michael@0: path.addRRect(rect); michael@0: this->internalDrawPath(target, paint.isAntiAlias(), path, stroke); michael@0: } michael@0: } michael@0: michael@0: /////////////////////////////////////////////////////////////////////////////// michael@0: michael@0: void GrContext::drawOval(const GrPaint& paint, michael@0: const SkRect& oval, michael@0: const SkStrokeRec& stroke) { michael@0: if (oval.isEmpty()) { michael@0: return; michael@0: } michael@0: michael@0: AutoRestoreEffects are; michael@0: AutoCheckFlush acf(this); michael@0: GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf); michael@0: michael@0: if (!fOvalRenderer->drawOval(target, this, paint.isAntiAlias(), oval, stroke)) { michael@0: SkPath path; michael@0: path.addOval(oval); michael@0: this->internalDrawPath(target, paint.isAntiAlias(), path, stroke); michael@0: } michael@0: } michael@0: michael@0: // Can 'path' be drawn as a pair of filled nested rectangles? michael@0: static bool is_nested_rects(GrDrawTarget* target, michael@0: const SkPath& path, michael@0: const SkStrokeRec& stroke, michael@0: SkRect rects[2], michael@0: bool* useVertexCoverage) { michael@0: SkASSERT(stroke.isFillStyle()); michael@0: michael@0: if (path.isInverseFillType()) { michael@0: return false; michael@0: } michael@0: michael@0: const GrDrawState& drawState = target->getDrawState(); michael@0: michael@0: // TODO: this restriction could be lifted if we were willing to apply michael@0: // the matrix to all the points individually rather than just to the rect michael@0: if (!drawState.getViewMatrix().preservesAxisAlignment()) { michael@0: return false; michael@0: } michael@0: michael@0: *useVertexCoverage = false; michael@0: if (!target->getDrawState().canTweakAlphaForCoverage()) { michael@0: if (target->shouldDisableCoverageAAForBlend()) { michael@0: return false; michael@0: } else { michael@0: *useVertexCoverage = true; michael@0: } michael@0: } michael@0: michael@0: SkPath::Direction dirs[2]; michael@0: if (!path.isNestedRects(rects, dirs)) { michael@0: return false; michael@0: } michael@0: michael@0: if (SkPath::kWinding_FillType == path.getFillType() && dirs[0] == dirs[1]) { michael@0: // The two rects need to be wound opposite to each other michael@0: return false; michael@0: } michael@0: michael@0: // Right now, nested rects where the margin is not the same width michael@0: // all around do not render correctly michael@0: const SkScalar* outer = rects[0].asScalars(); michael@0: const SkScalar* inner = rects[1].asScalars(); michael@0: michael@0: SkScalar margin = SkScalarAbs(outer[0] - inner[0]); michael@0: for (int i = 1; i < 4; ++i) { michael@0: SkScalar temp = SkScalarAbs(outer[i] - inner[i]); michael@0: if (!SkScalarNearlyEqual(margin, temp)) { michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: void GrContext::drawPath(const GrPaint& paint, const SkPath& path, const SkStrokeRec& stroke) { michael@0: michael@0: if (path.isEmpty()) { michael@0: if (path.isInverseFillType()) { michael@0: this->drawPaint(paint); michael@0: } michael@0: return; michael@0: } michael@0: michael@0: // Note that internalDrawPath may sw-rasterize the path into a scratch texture. michael@0: // Scratch textures can be recycled after they are returned to the texture michael@0: // cache. This presents a potential hazard for buffered drawing. However, michael@0: // the writePixels that uploads to the scratch will perform a flush so we're michael@0: // OK. michael@0: AutoRestoreEffects are; michael@0: AutoCheckFlush acf(this); michael@0: GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf); michael@0: GrDrawState* drawState = target->drawState(); michael@0: michael@0: bool useCoverageAA = paint.isAntiAlias() && !drawState->getRenderTarget()->isMultisampled(); michael@0: michael@0: if (useCoverageAA && stroke.getWidth() < 0 && !path.isConvex()) { michael@0: // Concave AA paths are expensive - try to avoid them for special cases michael@0: bool useVertexCoverage; michael@0: SkRect rects[2]; michael@0: michael@0: if (is_nested_rects(target, path, stroke, rects, &useVertexCoverage)) { michael@0: SkMatrix origViewMatrix = drawState->getViewMatrix(); michael@0: GrDrawState::AutoViewMatrixRestore avmr; michael@0: if (!avmr.setIdentity(target->drawState())) { michael@0: return; michael@0: } michael@0: michael@0: fAARectRenderer->fillAANestedRects(this->getGpu(), target, michael@0: rects, michael@0: origViewMatrix, michael@0: useVertexCoverage); michael@0: return; michael@0: } michael@0: } michael@0: michael@0: SkRect ovalRect; michael@0: bool isOval = path.isOval(&ovalRect); michael@0: michael@0: if (!isOval || path.isInverseFillType() michael@0: || !fOvalRenderer->drawOval(target, this, paint.isAntiAlias(), ovalRect, stroke)) { michael@0: this->internalDrawPath(target, paint.isAntiAlias(), path, stroke); michael@0: } michael@0: } michael@0: michael@0: void GrContext::internalDrawPath(GrDrawTarget* target, bool useAA, const SkPath& path, michael@0: const SkStrokeRec& origStroke) { michael@0: SkASSERT(!path.isEmpty()); michael@0: michael@0: // An Assumption here is that path renderer would use some form of tweaking michael@0: // the src color (either the input alpha or in the frag shader) to implement michael@0: // aa. If we have some future driver-mojo path AA that can do the right michael@0: // thing WRT to the blend then we'll need some query on the PR. michael@0: bool useCoverageAA = useAA && michael@0: !target->getDrawState().getRenderTarget()->isMultisampled() && michael@0: !target->shouldDisableCoverageAAForBlend(); michael@0: michael@0: michael@0: GrPathRendererChain::DrawType type = michael@0: useCoverageAA ? GrPathRendererChain::kColorAntiAlias_DrawType : michael@0: GrPathRendererChain::kColor_DrawType; michael@0: michael@0: const SkPath* pathPtr = &path; michael@0: SkTLazy tmpPath; michael@0: SkTCopyOnFirstWrite stroke(origStroke); michael@0: michael@0: // Try a 1st time without stroking the path and without allowing the SW renderer michael@0: GrPathRenderer* pr = this->getPathRenderer(*pathPtr, *stroke, target, false, type); michael@0: michael@0: if (NULL == pr) { michael@0: if (!GrPathRenderer::IsStrokeHairlineOrEquivalent(*stroke, this->getMatrix(), NULL)) { michael@0: // It didn't work the 1st time, so try again with the stroked path michael@0: if (stroke->applyToPath(tmpPath.init(), *pathPtr)) { michael@0: pathPtr = tmpPath.get(); michael@0: stroke.writable()->setFillStyle(); michael@0: if (pathPtr->isEmpty()) { michael@0: return; michael@0: } michael@0: } michael@0: } michael@0: michael@0: // This time, allow SW renderer michael@0: pr = this->getPathRenderer(*pathPtr, *stroke, target, true, type); michael@0: } michael@0: michael@0: if (NULL == pr) { michael@0: #ifdef SK_DEBUG michael@0: GrPrintf("Unable to find path renderer compatible with path.\n"); michael@0: #endif michael@0: return; michael@0: } michael@0: michael@0: pr->drawPath(*pathPtr, *stroke, target, useCoverageAA); michael@0: } michael@0: michael@0: //////////////////////////////////////////////////////////////////////////////// michael@0: michael@0: void GrContext::flush(int flagsBitfield) { michael@0: if (NULL == fDrawBuffer) { michael@0: return; michael@0: } michael@0: michael@0: if (kDiscard_FlushBit & flagsBitfield) { michael@0: fDrawBuffer->reset(); michael@0: } else { michael@0: fDrawBuffer->flush(); michael@0: } michael@0: fFlushToReduceCacheSize = false; michael@0: } michael@0: michael@0: bool GrContext::writeTexturePixels(GrTexture* texture, michael@0: int left, int top, int width, int height, michael@0: GrPixelConfig config, const void* buffer, size_t rowBytes, michael@0: uint32_t flags) { michael@0: SK_TRACE_EVENT0("GrContext::writeTexturePixels"); michael@0: ASSERT_OWNED_RESOURCE(texture); michael@0: michael@0: if ((kUnpremul_PixelOpsFlag & flags) || !fGpu->canWriteTexturePixels(texture, config)) { michael@0: if (NULL != texture->asRenderTarget()) { michael@0: return this->writeRenderTargetPixels(texture->asRenderTarget(), michael@0: left, top, width, height, michael@0: config, buffer, rowBytes, flags); michael@0: } else { michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: if (!(kDontFlush_PixelOpsFlag & flags)) { michael@0: this->flush(); michael@0: } michael@0: michael@0: return fGpu->writeTexturePixels(texture, left, top, width, height, michael@0: config, buffer, rowBytes); michael@0: } michael@0: michael@0: bool GrContext::readTexturePixels(GrTexture* texture, michael@0: int left, int top, int width, int height, michael@0: GrPixelConfig config, void* buffer, size_t rowBytes, michael@0: uint32_t flags) { michael@0: SK_TRACE_EVENT0("GrContext::readTexturePixels"); michael@0: ASSERT_OWNED_RESOURCE(texture); michael@0: michael@0: GrRenderTarget* target = texture->asRenderTarget(); michael@0: if (NULL != target) { michael@0: return this->readRenderTargetPixels(target, michael@0: left, top, width, height, michael@0: config, buffer, rowBytes, michael@0: flags); michael@0: } else { michael@0: // TODO: make this more efficient for cases where we're reading the entire michael@0: // texture, i.e., use GetTexImage() instead michael@0: michael@0: // create scratch rendertarget and read from that michael@0: GrAutoScratchTexture ast; michael@0: GrTextureDesc desc; michael@0: desc.fFlags = kRenderTarget_GrTextureFlagBit; michael@0: desc.fWidth = width; michael@0: desc.fHeight = height; michael@0: desc.fConfig = config; michael@0: desc.fOrigin = kTopLeft_GrSurfaceOrigin; michael@0: ast.set(this, desc, kExact_ScratchTexMatch); michael@0: GrTexture* dst = ast.texture(); michael@0: if (NULL != dst && NULL != (target = dst->asRenderTarget())) { michael@0: this->copyTexture(texture, target, NULL); michael@0: return this->readRenderTargetPixels(target, michael@0: left, top, width, height, michael@0: config, buffer, rowBytes, michael@0: flags); michael@0: } michael@0: michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: #include "SkConfig8888.h" michael@0: michael@0: namespace { michael@0: /** michael@0: * Converts a GrPixelConfig to a SkCanvas::Config8888. Only byte-per-channel michael@0: * formats are representable as Config8888 and so the function returns false michael@0: * if the GrPixelConfig has no equivalent Config8888. michael@0: */ michael@0: bool grconfig_to_config8888(GrPixelConfig config, michael@0: bool unpremul, michael@0: SkCanvas::Config8888* config8888) { michael@0: switch (config) { michael@0: case kRGBA_8888_GrPixelConfig: michael@0: if (unpremul) { michael@0: *config8888 = SkCanvas::kRGBA_Unpremul_Config8888; michael@0: } else { michael@0: *config8888 = SkCanvas::kRGBA_Premul_Config8888; michael@0: } michael@0: return true; michael@0: case kBGRA_8888_GrPixelConfig: michael@0: if (unpremul) { michael@0: *config8888 = SkCanvas::kBGRA_Unpremul_Config8888; michael@0: } else { michael@0: *config8888 = SkCanvas::kBGRA_Premul_Config8888; michael@0: } michael@0: return true; michael@0: default: michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: // It returns a configuration with where the byte position of the R & B components are swapped in michael@0: // relation to the input config. This should only be called with the result of michael@0: // grconfig_to_config8888 as it will fail for other configs. michael@0: SkCanvas::Config8888 swap_config8888_red_and_blue(SkCanvas::Config8888 config8888) { michael@0: switch (config8888) { michael@0: case SkCanvas::kBGRA_Premul_Config8888: michael@0: return SkCanvas::kRGBA_Premul_Config8888; michael@0: case SkCanvas::kBGRA_Unpremul_Config8888: michael@0: return SkCanvas::kRGBA_Unpremul_Config8888; michael@0: case SkCanvas::kRGBA_Premul_Config8888: michael@0: return SkCanvas::kBGRA_Premul_Config8888; michael@0: case SkCanvas::kRGBA_Unpremul_Config8888: michael@0: return SkCanvas::kBGRA_Unpremul_Config8888; michael@0: default: michael@0: GrCrash("Unexpected input"); michael@0: return SkCanvas::kBGRA_Unpremul_Config8888;; michael@0: } michael@0: } michael@0: } michael@0: michael@0: bool GrContext::readRenderTargetPixels(GrRenderTarget* target, michael@0: int left, int top, int width, int height, michael@0: GrPixelConfig dstConfig, void* buffer, size_t rowBytes, michael@0: uint32_t flags) { michael@0: SK_TRACE_EVENT0("GrContext::readRenderTargetPixels"); michael@0: ASSERT_OWNED_RESOURCE(target); michael@0: michael@0: if (NULL == target) { michael@0: target = fRenderTarget.get(); michael@0: if (NULL == target) { michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: if (!(kDontFlush_PixelOpsFlag & flags)) { michael@0: this->flush(); michael@0: } michael@0: michael@0: // Determine which conversions have to be applied: flipY, swapRAnd, and/or unpremul. michael@0: michael@0: // If fGpu->readPixels would incur a y-flip cost then we will read the pixels upside down. We'll michael@0: // either do the flipY by drawing into a scratch with a matrix or on the cpu after the read. michael@0: bool flipY = fGpu->readPixelsWillPayForYFlip(target, left, top, michael@0: width, height, dstConfig, michael@0: rowBytes); michael@0: // We ignore the preferred config if it is different than our config unless it is an R/B swap. michael@0: // In that case we'll perform an R and B swap while drawing to a scratch texture of the swapped michael@0: // config. Then we will call readPixels on the scratch with the swapped config. The swaps during michael@0: // the draw cancels out the fact that we call readPixels with a config that is R/B swapped from michael@0: // dstConfig. michael@0: GrPixelConfig readConfig = dstConfig; michael@0: bool swapRAndB = false; michael@0: if (GrPixelConfigSwapRAndB(dstConfig) == michael@0: fGpu->preferredReadPixelsConfig(dstConfig, target->config())) { michael@0: readConfig = GrPixelConfigSwapRAndB(readConfig); michael@0: swapRAndB = true; michael@0: } michael@0: michael@0: bool unpremul = SkToBool(kUnpremul_PixelOpsFlag & flags); michael@0: michael@0: if (unpremul && !GrPixelConfigIs8888(dstConfig)) { michael@0: // The unpremul flag is only allowed for these two configs. michael@0: return false; michael@0: } michael@0: michael@0: // If the src is a texture and we would have to do conversions after read pixels, we instead michael@0: // do the conversions by drawing the src to a scratch texture. If we handle any of the michael@0: // conversions in the draw we set the corresponding bool to false so that we don't reapply it michael@0: // on the read back pixels. michael@0: GrTexture* src = target->asTexture(); michael@0: GrAutoScratchTexture ast; michael@0: if (NULL != src && (swapRAndB || unpremul || flipY)) { michael@0: // Make the scratch a render target because we don't have a robust readTexturePixels as of michael@0: // yet. It calls this function. michael@0: GrTextureDesc desc; michael@0: desc.fFlags = kRenderTarget_GrTextureFlagBit; michael@0: desc.fWidth = width; michael@0: desc.fHeight = height; michael@0: desc.fConfig = readConfig; michael@0: desc.fOrigin = kTopLeft_GrSurfaceOrigin; michael@0: michael@0: // When a full read back is faster than a partial we could always make the scratch exactly michael@0: // match the passed rect. However, if we see many different size rectangles we will trash michael@0: // our texture cache and pay the cost of creating and destroying many textures. So, we only michael@0: // request an exact match when the caller is reading an entire RT. michael@0: ScratchTexMatch match = kApprox_ScratchTexMatch; michael@0: if (0 == left && michael@0: 0 == top && michael@0: target->width() == width && michael@0: target->height() == height && michael@0: fGpu->fullReadPixelsIsFasterThanPartial()) { michael@0: match = kExact_ScratchTexMatch; michael@0: } michael@0: ast.set(this, desc, match); michael@0: GrTexture* texture = ast.texture(); michael@0: if (texture) { michael@0: // compute a matrix to perform the draw michael@0: SkMatrix textureMatrix; michael@0: textureMatrix.setTranslate(SK_Scalar1 *left, SK_Scalar1 *top); michael@0: textureMatrix.postIDiv(src->width(), src->height()); michael@0: michael@0: SkAutoTUnref effect; michael@0: if (unpremul) { michael@0: effect.reset(this->createPMToUPMEffect(src, swapRAndB, textureMatrix)); michael@0: if (NULL != effect) { michael@0: unpremul = false; // we no longer need to do this on CPU after the read back. michael@0: } michael@0: } michael@0: // If we failed to create a PM->UPM effect and have no other conversions to perform then michael@0: // there is no longer any point to using the scratch. michael@0: if (NULL != effect || flipY || swapRAndB) { michael@0: if (!effect) { michael@0: effect.reset(GrConfigConversionEffect::Create( michael@0: src, michael@0: swapRAndB, michael@0: GrConfigConversionEffect::kNone_PMConversion, michael@0: textureMatrix)); michael@0: } michael@0: swapRAndB = false; // we will handle the swap in the draw. michael@0: michael@0: // We protect the existing geometry here since it may not be michael@0: // clear to the caller that a draw operation (i.e., drawSimpleRect) michael@0: // can be invoked in this method michael@0: GrDrawTarget::AutoGeometryAndStatePush agasp(fGpu, GrDrawTarget::kReset_ASRInit); michael@0: GrDrawState* drawState = fGpu->drawState(); michael@0: SkASSERT(effect); michael@0: drawState->addColorEffect(effect); michael@0: michael@0: drawState->setRenderTarget(texture->asRenderTarget()); michael@0: SkRect rect = SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height)); michael@0: fGpu->drawSimpleRect(rect, NULL); michael@0: // we want to read back from the scratch's origin michael@0: left = 0; michael@0: top = 0; michael@0: target = texture->asRenderTarget(); michael@0: } michael@0: } michael@0: } michael@0: if (!fGpu->readPixels(target, michael@0: left, top, width, height, michael@0: readConfig, buffer, rowBytes)) { michael@0: return false; michael@0: } michael@0: // Perform any conversions we weren't able to perform using a scratch texture. michael@0: if (unpremul || swapRAndB) { michael@0: // These are initialized to suppress a warning michael@0: SkCanvas::Config8888 srcC8888 = SkCanvas::kNative_Premul_Config8888; michael@0: SkCanvas::Config8888 dstC8888 = SkCanvas::kNative_Premul_Config8888; michael@0: michael@0: SkDEBUGCODE(bool c8888IsValid =) grconfig_to_config8888(dstConfig, false, &srcC8888); michael@0: grconfig_to_config8888(dstConfig, unpremul, &dstC8888); michael@0: michael@0: if (swapRAndB) { michael@0: SkASSERT(c8888IsValid); // we should only do r/b swap on 8888 configs michael@0: srcC8888 = swap_config8888_red_and_blue(srcC8888); michael@0: } michael@0: SkASSERT(c8888IsValid); michael@0: uint32_t* b32 = reinterpret_cast(buffer); michael@0: SkConvertConfig8888Pixels(b32, rowBytes, dstC8888, michael@0: b32, rowBytes, srcC8888, michael@0: width, height); michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: void GrContext::resolveRenderTarget(GrRenderTarget* target) { michael@0: SkASSERT(target); michael@0: ASSERT_OWNED_RESOURCE(target); michael@0: // In the future we may track whether there are any pending draws to this michael@0: // target. We don't today so we always perform a flush. We don't promise michael@0: // this to our clients, though. michael@0: this->flush(); michael@0: fGpu->resolveRenderTarget(target); michael@0: } michael@0: michael@0: void GrContext::copyTexture(GrTexture* src, GrRenderTarget* dst, const SkIPoint* topLeft) { michael@0: if (NULL == src || NULL == dst) { michael@0: return; michael@0: } michael@0: ASSERT_OWNED_RESOURCE(src); michael@0: michael@0: // Writes pending to the source texture are not tracked, so a flush michael@0: // is required to ensure that the copy captures the most recent contents michael@0: // of the source texture. See similar behavior in michael@0: // GrContext::resolveRenderTarget. michael@0: this->flush(); michael@0: michael@0: GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit); michael@0: GrDrawState* drawState = fGpu->drawState(); michael@0: drawState->setRenderTarget(dst); michael@0: SkMatrix sampleM; michael@0: sampleM.setIDiv(src->width(), src->height()); michael@0: SkIRect srcRect = SkIRect::MakeWH(dst->width(), dst->height()); michael@0: if (NULL != topLeft) { michael@0: srcRect.offset(*topLeft); michael@0: } michael@0: SkIRect srcBounds = SkIRect::MakeWH(src->width(), src->height()); michael@0: if (!srcRect.intersect(srcBounds)) { michael@0: return; michael@0: } michael@0: sampleM.preTranslate(SkIntToScalar(srcRect.fLeft), SkIntToScalar(srcRect.fTop)); michael@0: drawState->addColorTextureEffect(src, sampleM); michael@0: SkRect dstR = SkRect::MakeWH(SkIntToScalar(srcRect.width()), SkIntToScalar(srcRect.height())); michael@0: fGpu->drawSimpleRect(dstR, NULL); michael@0: } michael@0: michael@0: bool GrContext::writeRenderTargetPixels(GrRenderTarget* target, michael@0: int left, int top, int width, int height, michael@0: GrPixelConfig srcConfig, michael@0: const void* buffer, michael@0: size_t rowBytes, michael@0: uint32_t flags) { michael@0: SK_TRACE_EVENT0("GrContext::writeRenderTargetPixels"); michael@0: ASSERT_OWNED_RESOURCE(target); michael@0: michael@0: if (NULL == target) { michael@0: target = fRenderTarget.get(); michael@0: if (NULL == target) { michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: // TODO: when underlying api has a direct way to do this we should use it (e.g. glDrawPixels on michael@0: // desktop GL). michael@0: michael@0: // We will always call some form of writeTexturePixels and we will pass our flags on to it. michael@0: // Thus, we don't perform a flush here since that call will do it (if the kNoFlush flag isn't michael@0: // set.) michael@0: michael@0: // If the RT is also a texture and we don't have to premultiply then take the texture path. michael@0: // We expect to be at least as fast or faster since it doesn't use an intermediate texture as michael@0: // we do below. michael@0: michael@0: #if !defined(SK_BUILD_FOR_MAC) michael@0: // At least some drivers on the Mac get confused when glTexImage2D is called on a texture michael@0: // attached to an FBO. The FBO still sees the old image. TODO: determine what OS versions and/or michael@0: // HW is affected. michael@0: if (NULL != target->asTexture() && !(kUnpremul_PixelOpsFlag & flags) && michael@0: fGpu->canWriteTexturePixels(target->asTexture(), srcConfig)) { michael@0: return this->writeTexturePixels(target->asTexture(), michael@0: left, top, width, height, michael@0: srcConfig, buffer, rowBytes, flags); michael@0: } michael@0: #endif michael@0: michael@0: // We ignore the preferred config unless it is a R/B swap of the src config. In that case michael@0: // we will upload the original src data to a scratch texture but we will spoof it as the swapped michael@0: // config. This scratch will then have R and B swapped. We correct for this by swapping again michael@0: // when drawing the scratch to the dst using a conversion effect. michael@0: bool swapRAndB = false; michael@0: GrPixelConfig writeConfig = srcConfig; michael@0: if (GrPixelConfigSwapRAndB(srcConfig) == michael@0: fGpu->preferredWritePixelsConfig(srcConfig, target->config())) { michael@0: writeConfig = GrPixelConfigSwapRAndB(srcConfig); michael@0: swapRAndB = true; michael@0: } michael@0: michael@0: GrTextureDesc desc; michael@0: desc.fWidth = width; michael@0: desc.fHeight = height; michael@0: desc.fConfig = writeConfig; michael@0: GrAutoScratchTexture ast(this, desc); michael@0: GrTexture* texture = ast.texture(); michael@0: if (NULL == texture) { michael@0: return false; michael@0: } michael@0: michael@0: SkAutoTUnref effect; michael@0: SkMatrix textureMatrix; michael@0: textureMatrix.setIDiv(texture->width(), texture->height()); michael@0: michael@0: // allocate a tmp buffer and sw convert the pixels to premul michael@0: SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(0); michael@0: michael@0: if (kUnpremul_PixelOpsFlag & flags) { michael@0: if (!GrPixelConfigIs8888(srcConfig)) { michael@0: return false; michael@0: } michael@0: effect.reset(this->createUPMToPMEffect(texture, swapRAndB, textureMatrix)); michael@0: // handle the unpremul step on the CPU if we couldn't create an effect to do it. michael@0: if (NULL == effect) { michael@0: SkCanvas::Config8888 srcConfig8888, dstConfig8888; michael@0: SkDEBUGCODE(bool success = ) michael@0: grconfig_to_config8888(srcConfig, true, &srcConfig8888); michael@0: SkASSERT(success); michael@0: SkDEBUGCODE(success = ) michael@0: grconfig_to_config8888(srcConfig, false, &dstConfig8888); michael@0: SkASSERT(success); michael@0: const uint32_t* src = reinterpret_cast(buffer); michael@0: tmpPixels.reset(width * height); michael@0: SkConvertConfig8888Pixels(tmpPixels.get(), 4 * width, dstConfig8888, michael@0: src, rowBytes, srcConfig8888, michael@0: width, height); michael@0: buffer = tmpPixels.get(); michael@0: rowBytes = 4 * width; michael@0: } michael@0: } michael@0: if (NULL == effect) { michael@0: effect.reset(GrConfigConversionEffect::Create(texture, michael@0: swapRAndB, michael@0: GrConfigConversionEffect::kNone_PMConversion, michael@0: textureMatrix)); michael@0: } michael@0: michael@0: if (!this->writeTexturePixels(texture, michael@0: 0, 0, width, height, michael@0: writeConfig, buffer, rowBytes, michael@0: flags & ~kUnpremul_PixelOpsFlag)) { michael@0: return false; michael@0: } michael@0: michael@0: // writeRenderTargetPixels can be called in the midst of drawing another michael@0: // object (e.g., when uploading a SW path rendering to the gpu while michael@0: // drawing a rect) so preserve the current geometry. michael@0: SkMatrix matrix; michael@0: matrix.setTranslate(SkIntToScalar(left), SkIntToScalar(top)); michael@0: GrDrawTarget::AutoGeometryAndStatePush agasp(fGpu, GrDrawTarget::kReset_ASRInit, &matrix); michael@0: GrDrawState* drawState = fGpu->drawState(); michael@0: SkASSERT(effect); michael@0: drawState->addColorEffect(effect); michael@0: michael@0: drawState->setRenderTarget(target); michael@0: michael@0: fGpu->drawSimpleRect(SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height)), NULL); michael@0: return true; michael@0: } michael@0: //////////////////////////////////////////////////////////////////////////////// michael@0: michael@0: GrDrawTarget* GrContext::prepareToDraw(const GrPaint* paint, michael@0: BufferedDraw buffered, michael@0: AutoRestoreEffects* are, michael@0: AutoCheckFlush* acf) { michael@0: // All users of this draw state should be freeing up all effects when they're done. michael@0: // Otherwise effects that own resources may keep those resources alive indefinitely. michael@0: SkASSERT(0 == fDrawState->numColorStages() && 0 == fDrawState->numCoverageStages()); michael@0: michael@0: if (kNo_BufferedDraw == buffered && kYes_BufferedDraw == fLastDrawWasBuffered) { michael@0: fDrawBuffer->flush(); michael@0: fLastDrawWasBuffered = kNo_BufferedDraw; michael@0: } michael@0: ASSERT_OWNED_RESOURCE(fRenderTarget.get()); michael@0: if (NULL != paint) { michael@0: SkASSERT(NULL != are); michael@0: SkASSERT(NULL != acf); michael@0: are->set(fDrawState); michael@0: fDrawState->setFromPaint(*paint, fViewMatrix, fRenderTarget.get()); michael@0: #if GR_DEBUG_PARTIAL_COVERAGE_CHECK michael@0: if ((paint->hasMask() || 0xff != paint->fCoverage) && michael@0: !fGpu->canApplyCoverage()) { michael@0: GrPrintf("Partial pixel coverage will be incorrectly blended.\n"); michael@0: } michael@0: #endif michael@0: } else { michael@0: fDrawState->reset(fViewMatrix); michael@0: fDrawState->setRenderTarget(fRenderTarget.get()); michael@0: } michael@0: GrDrawTarget* target; michael@0: if (kYes_BufferedDraw == buffered) { michael@0: fLastDrawWasBuffered = kYes_BufferedDraw; michael@0: target = fDrawBuffer; michael@0: } else { michael@0: SkASSERT(kNo_BufferedDraw == buffered); michael@0: fLastDrawWasBuffered = kNo_BufferedDraw; michael@0: target = fGpu; michael@0: } michael@0: fDrawState->setState(GrDrawState::kClip_StateBit, NULL != fClip && michael@0: !fClip->fClipStack->isWideOpen()); michael@0: target->setClip(fClip); michael@0: SkASSERT(fDrawState == target->drawState()); michael@0: return target; michael@0: } michael@0: michael@0: /* michael@0: * This method finds a path renderer that can draw the specified path on michael@0: * the provided target. michael@0: * Due to its expense, the software path renderer has split out so it can michael@0: * can be individually allowed/disallowed via the "allowSW" boolean. michael@0: */ michael@0: GrPathRenderer* GrContext::getPathRenderer(const SkPath& path, michael@0: const SkStrokeRec& stroke, michael@0: const GrDrawTarget* target, michael@0: bool allowSW, michael@0: GrPathRendererChain::DrawType drawType, michael@0: GrPathRendererChain::StencilSupport* stencilSupport) { michael@0: michael@0: if (NULL == fPathRendererChain) { michael@0: fPathRendererChain = SkNEW_ARGS(GrPathRendererChain, (this)); michael@0: } michael@0: michael@0: GrPathRenderer* pr = fPathRendererChain->getPathRenderer(path, michael@0: stroke, michael@0: target, michael@0: drawType, michael@0: stencilSupport); michael@0: michael@0: if (NULL == pr && allowSW) { michael@0: if (NULL == fSoftwarePathRenderer) { michael@0: fSoftwarePathRenderer = SkNEW_ARGS(GrSoftwarePathRenderer, (this)); michael@0: } michael@0: pr = fSoftwarePathRenderer; michael@0: } michael@0: michael@0: return pr; michael@0: } michael@0: michael@0: //////////////////////////////////////////////////////////////////////////////// michael@0: bool GrContext::isConfigRenderable(GrPixelConfig config, bool withMSAA) const { michael@0: return fGpu->caps()->isConfigRenderable(config, withMSAA); michael@0: } michael@0: michael@0: int GrContext::getRecommendedSampleCount(GrPixelConfig config, michael@0: SkScalar dpi) const { michael@0: if (!this->isConfigRenderable(config, true)) { michael@0: return 0; michael@0: } michael@0: int chosenSampleCount = 0; michael@0: if (fGpu->caps()->pathRenderingSupport()) { michael@0: if (dpi >= 250.0f) { michael@0: chosenSampleCount = 4; michael@0: } else { michael@0: chosenSampleCount = 16; michael@0: } michael@0: } michael@0: return chosenSampleCount <= fGpu->caps()->maxSampleCount() ? michael@0: chosenSampleCount : 0; michael@0: } michael@0: michael@0: void GrContext::setupDrawBuffer() { michael@0: SkASSERT(NULL == fDrawBuffer); michael@0: SkASSERT(NULL == fDrawBufferVBAllocPool); michael@0: SkASSERT(NULL == fDrawBufferIBAllocPool); michael@0: michael@0: fDrawBufferVBAllocPool = michael@0: SkNEW_ARGS(GrVertexBufferAllocPool, (fGpu, false, michael@0: DRAW_BUFFER_VBPOOL_BUFFER_SIZE, michael@0: DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS)); michael@0: fDrawBufferIBAllocPool = michael@0: SkNEW_ARGS(GrIndexBufferAllocPool, (fGpu, false, michael@0: DRAW_BUFFER_IBPOOL_BUFFER_SIZE, michael@0: DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS)); michael@0: michael@0: fDrawBuffer = SkNEW_ARGS(GrInOrderDrawBuffer, (fGpu, michael@0: fDrawBufferVBAllocPool, michael@0: fDrawBufferIBAllocPool)); michael@0: michael@0: fDrawBuffer->setDrawState(fDrawState); michael@0: } michael@0: michael@0: GrDrawTarget* GrContext::getTextTarget() { michael@0: return this->prepareToDraw(NULL, BUFFERED_DRAW, NULL, NULL); michael@0: } michael@0: michael@0: const GrIndexBuffer* GrContext::getQuadIndexBuffer() const { michael@0: return fGpu->getQuadIndexBuffer(); michael@0: } michael@0: michael@0: namespace { michael@0: void test_pm_conversions(GrContext* ctx, int* pmToUPMValue, int* upmToPMValue) { michael@0: GrConfigConversionEffect::PMConversion pmToUPM; michael@0: GrConfigConversionEffect::PMConversion upmToPM; michael@0: GrConfigConversionEffect::TestForPreservingPMConversions(ctx, &pmToUPM, &upmToPM); michael@0: *pmToUPMValue = pmToUPM; michael@0: *upmToPMValue = upmToPM; michael@0: } michael@0: } michael@0: michael@0: const GrEffectRef* GrContext::createPMToUPMEffect(GrTexture* texture, michael@0: bool swapRAndB, michael@0: const SkMatrix& matrix) { michael@0: if (!fDidTestPMConversions) { michael@0: test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion); michael@0: fDidTestPMConversions = true; michael@0: } michael@0: GrConfigConversionEffect::PMConversion pmToUPM = michael@0: static_cast(fPMToUPMConversion); michael@0: if (GrConfigConversionEffect::kNone_PMConversion != pmToUPM) { michael@0: return GrConfigConversionEffect::Create(texture, swapRAndB, pmToUPM, matrix); michael@0: } else { michael@0: return NULL; michael@0: } michael@0: } michael@0: michael@0: const GrEffectRef* GrContext::createUPMToPMEffect(GrTexture* texture, michael@0: bool swapRAndB, michael@0: const SkMatrix& matrix) { michael@0: if (!fDidTestPMConversions) { michael@0: test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion); michael@0: fDidTestPMConversions = true; michael@0: } michael@0: GrConfigConversionEffect::PMConversion upmToPM = michael@0: static_cast(fUPMToPMConversion); michael@0: if (GrConfigConversionEffect::kNone_PMConversion != upmToPM) { michael@0: return GrConfigConversionEffect::Create(texture, swapRAndB, upmToPM, matrix); michael@0: } else { michael@0: return NULL; michael@0: } michael@0: } michael@0: michael@0: GrPath* GrContext::createPath(const SkPath& inPath, const SkStrokeRec& stroke) { michael@0: SkASSERT(fGpu->caps()->pathRenderingSupport()); michael@0: michael@0: // TODO: now we add to fTextureCache. This should change to fResourceCache. michael@0: GrResourceKey resourceKey = GrPath::ComputeKey(inPath, stroke); michael@0: GrPath* path = static_cast(fTextureCache->find(resourceKey)); michael@0: if (NULL != path && path->isEqualTo(inPath, stroke)) { michael@0: path->ref(); michael@0: } else { michael@0: path = fGpu->createPath(inPath, stroke); michael@0: fTextureCache->purgeAsNeeded(1, path->sizeInBytes()); michael@0: fTextureCache->addResource(resourceKey, path); michael@0: } michael@0: return path; michael@0: } michael@0: michael@0: /////////////////////////////////////////////////////////////////////////////// michael@0: #if GR_CACHE_STATS michael@0: void GrContext::printCacheStats() const { michael@0: fTextureCache->printStats(); michael@0: } michael@0: #endif