michael@0: /* michael@0: * Copyright 2011 Google Inc. michael@0: * michael@0: * Use of this source code is governed by a BSD-style license that can be michael@0: * found in the LICENSE file. michael@0: */ michael@0: michael@0: #include "GrInOrderDrawBuffer.h" michael@0: michael@0: #include "GrBufferAllocPool.h" michael@0: #include "GrDrawTargetCaps.h" michael@0: #include "GrGpu.h" michael@0: #include "GrIndexBuffer.h" michael@0: #include "GrPath.h" michael@0: #include "GrPoint.h" michael@0: #include "GrRenderTarget.h" michael@0: #include "GrTemplates.h" michael@0: #include "GrTexture.h" michael@0: #include "GrVertexBuffer.h" michael@0: michael@0: GrInOrderDrawBuffer::GrInOrderDrawBuffer(GrGpu* gpu, michael@0: GrVertexBufferAllocPool* vertexPool, michael@0: GrIndexBufferAllocPool* indexPool) michael@0: : GrDrawTarget(gpu->getContext()) michael@0: , fDstGpu(gpu) michael@0: , fClipSet(true) michael@0: , fClipProxyState(kUnknown_ClipProxyState) michael@0: , fVertexPool(*vertexPool) michael@0: , fIndexPool(*indexPool) michael@0: , fFlushing(false) michael@0: , fDrawID(0) { michael@0: michael@0: fDstGpu->ref(); michael@0: fCaps.reset(SkRef(fDstGpu->caps())); michael@0: michael@0: SkASSERT(NULL != vertexPool); michael@0: SkASSERT(NULL != indexPool); michael@0: michael@0: GeometryPoolState& poolState = fGeoPoolStateStack.push_back(); michael@0: poolState.fUsedPoolVertexBytes = 0; michael@0: poolState.fUsedPoolIndexBytes = 0; michael@0: #ifdef SK_DEBUG michael@0: poolState.fPoolVertexBuffer = (GrVertexBuffer*)~0; michael@0: poolState.fPoolStartVertex = ~0; michael@0: poolState.fPoolIndexBuffer = (GrIndexBuffer*)~0; michael@0: poolState.fPoolStartIndex = ~0; michael@0: #endif michael@0: this->reset(); michael@0: } michael@0: michael@0: GrInOrderDrawBuffer::~GrInOrderDrawBuffer() { michael@0: this->reset(); michael@0: // This must be called by before the GrDrawTarget destructor michael@0: this->releaseGeometry(); michael@0: fDstGpu->unref(); michael@0: } michael@0: michael@0: //////////////////////////////////////////////////////////////////////////////// michael@0: michael@0: namespace { michael@0: void get_vertex_bounds(const void* vertices, michael@0: size_t vertexSize, michael@0: int vertexCount, michael@0: SkRect* bounds) { michael@0: SkASSERT(vertexSize >= sizeof(GrPoint)); michael@0: SkASSERT(vertexCount > 0); michael@0: const GrPoint* point = static_cast(vertices); michael@0: bounds->fLeft = bounds->fRight = point->fX; michael@0: bounds->fTop = bounds->fBottom = point->fY; michael@0: for (int i = 1; i < vertexCount; ++i) { michael@0: point = reinterpret_cast(reinterpret_cast(point) + vertexSize); michael@0: bounds->growToInclude(point->fX, point->fY); michael@0: } michael@0: } michael@0: } michael@0: michael@0: michael@0: namespace { michael@0: michael@0: extern const GrVertexAttrib kRectPosColorUVAttribs[] = { michael@0: {kVec2f_GrVertexAttribType, 0, kPosition_GrVertexAttribBinding}, michael@0: {kVec4ub_GrVertexAttribType, sizeof(GrPoint), kColor_GrVertexAttribBinding}, michael@0: {kVec2f_GrVertexAttribType, sizeof(GrPoint)+sizeof(GrColor), michael@0: kLocalCoord_GrVertexAttribBinding}, michael@0: }; michael@0: michael@0: extern const GrVertexAttrib kRectPosUVAttribs[] = { michael@0: {kVec2f_GrVertexAttribType, 0, kPosition_GrVertexAttribBinding}, michael@0: {kVec2f_GrVertexAttribType, sizeof(GrPoint), kLocalCoord_GrVertexAttribBinding}, michael@0: }; michael@0: michael@0: static void set_vertex_attributes(GrDrawState* drawState, michael@0: bool hasColor, bool hasUVs, michael@0: int* colorOffset, int* localOffset) { michael@0: *colorOffset = -1; michael@0: *localOffset = -1; michael@0: michael@0: // Using per-vertex colors allows batching across colors. (A lot of rects in a row differing michael@0: // only in color is a common occurrence in tables). However, having per-vertex colors disables michael@0: // blending optimizations because we don't know if the color will be solid or not. These michael@0: // optimizations help determine whether coverage and color can be blended correctly when michael@0: // dual-source blending isn't available. This comes into play when there is coverage. If colors michael@0: // were a stage it could take a hint that every vertex's color will be opaque. michael@0: if (hasColor && hasUVs) { michael@0: *colorOffset = sizeof(GrPoint); michael@0: *localOffset = sizeof(GrPoint) + sizeof(GrColor); michael@0: drawState->setVertexAttribs(3); michael@0: } else if (hasColor) { michael@0: *colorOffset = sizeof(GrPoint); michael@0: drawState->setVertexAttribs(2); michael@0: } else if (hasUVs) { michael@0: *localOffset = sizeof(GrPoint); michael@0: drawState->setVertexAttribs(2); michael@0: } else { michael@0: drawState->setVertexAttribs(1); michael@0: } michael@0: } michael@0: michael@0: }; michael@0: michael@0: void GrInOrderDrawBuffer::onDrawRect(const SkRect& rect, michael@0: const SkMatrix* matrix, michael@0: const SkRect* localRect, michael@0: const SkMatrix* localMatrix) { michael@0: GrDrawState::AutoColorRestore acr; michael@0: michael@0: GrDrawState* drawState = this->drawState(); michael@0: michael@0: GrColor color = drawState->getColor(); michael@0: michael@0: int colorOffset, localOffset; michael@0: set_vertex_attributes(drawState, michael@0: this->caps()->dualSourceBlendingSupport() || drawState->hasSolidCoverage(), michael@0: NULL != localRect, michael@0: &colorOffset, &localOffset); michael@0: if (colorOffset >= 0) { michael@0: // We set the draw state's color to white here. This is done so that any batching performed michael@0: // in our subclass's onDraw() won't get a false from GrDrawState::op== due to a color michael@0: // mismatch. TODO: Once vertex layout is owned by GrDrawState it should skip comparing the michael@0: // constant color in its op== when the kColor layout bit is set and then we can remove michael@0: // this. michael@0: acr.set(drawState, 0xFFFFFFFF); michael@0: } michael@0: michael@0: AutoReleaseGeometry geo(this, 4, 0); michael@0: if (!geo.succeeded()) { michael@0: GrPrintf("Failed to get space for vertices!\n"); michael@0: return; michael@0: } michael@0: michael@0: // Go to device coords to allow batching across matrix changes michael@0: SkMatrix combinedMatrix; michael@0: if (NULL != matrix) { michael@0: combinedMatrix = *matrix; michael@0: } else { michael@0: combinedMatrix.reset(); michael@0: } michael@0: combinedMatrix.postConcat(drawState->getViewMatrix()); michael@0: // When the caller has provided an explicit source rect for a stage then we don't want to michael@0: // modify that stage's matrix. Otherwise if the effect is generating its source rect from michael@0: // the vertex positions then we have to account for the view matrix change. michael@0: GrDrawState::AutoViewMatrixRestore avmr; michael@0: if (!avmr.setIdentity(drawState)) { michael@0: return; michael@0: } michael@0: michael@0: size_t vsize = drawState->getVertexSize(); michael@0: michael@0: geo.positions()->setRectFan(rect.fLeft, rect.fTop, rect.fRight, rect.fBottom, vsize); michael@0: combinedMatrix.mapPointsWithStride(geo.positions(), vsize, 4); michael@0: michael@0: SkRect devBounds; michael@0: // since we already computed the dev verts, set the bounds hint. This will help us avoid michael@0: // unnecessary clipping in our onDraw(). michael@0: get_vertex_bounds(geo.vertices(), vsize, 4, &devBounds); michael@0: michael@0: if (localOffset >= 0) { michael@0: GrPoint* coords = GrTCast(GrTCast(geo.vertices()) + localOffset); michael@0: coords->setRectFan(localRect->fLeft, localRect->fTop, michael@0: localRect->fRight, localRect->fBottom, michael@0: vsize); michael@0: if (NULL != localMatrix) { michael@0: localMatrix->mapPointsWithStride(coords, vsize, 4); michael@0: } michael@0: } michael@0: michael@0: if (colorOffset >= 0) { michael@0: GrColor* vertColor = GrTCast(GrTCast(geo.vertices()) + colorOffset); michael@0: for (int i = 0; i < 4; ++i) { michael@0: *vertColor = color; michael@0: vertColor = (GrColor*) ((intptr_t) vertColor + vsize); michael@0: } michael@0: } michael@0: michael@0: this->setIndexSourceToBuffer(this->getContext()->getQuadIndexBuffer()); michael@0: this->drawIndexedInstances(kTriangles_GrPrimitiveType, 1, 4, 6, &devBounds); michael@0: michael@0: // to ensure that stashing the drawState ptr is valid michael@0: SkASSERT(this->drawState() == drawState); michael@0: } michael@0: michael@0: bool GrInOrderDrawBuffer::quickInsideClip(const SkRect& devBounds) { michael@0: if (!this->getDrawState().isClipState()) { michael@0: return true; michael@0: } michael@0: if (kUnknown_ClipProxyState == fClipProxyState) { michael@0: SkIRect rect; michael@0: bool iior; michael@0: this->getClip()->getConservativeBounds(this->getDrawState().getRenderTarget(), &rect, &iior); michael@0: if (iior) { michael@0: // The clip is a rect. We will remember that in fProxyClip. It is common for an edge (or michael@0: // all edges) of the clip to be at the edge of the RT. However, we get that clipping for michael@0: // free via the viewport. We don't want to think that clipping must be enabled in this michael@0: // case. So we extend the clip outward from the edge to avoid these false negatives. michael@0: fClipProxyState = kValid_ClipProxyState; michael@0: fClipProxy = SkRect::Make(rect); michael@0: michael@0: if (fClipProxy.fLeft <= 0) { michael@0: fClipProxy.fLeft = SK_ScalarMin; michael@0: } michael@0: if (fClipProxy.fTop <= 0) { michael@0: fClipProxy.fTop = SK_ScalarMin; michael@0: } michael@0: if (fClipProxy.fRight >= this->getDrawState().getRenderTarget()->width()) { michael@0: fClipProxy.fRight = SK_ScalarMax; michael@0: } michael@0: if (fClipProxy.fBottom >= this->getDrawState().getRenderTarget()->height()) { michael@0: fClipProxy.fBottom = SK_ScalarMax; michael@0: } michael@0: } else { michael@0: fClipProxyState = kInvalid_ClipProxyState; michael@0: } michael@0: } michael@0: if (kValid_ClipProxyState == fClipProxyState) { michael@0: return fClipProxy.contains(devBounds); michael@0: } michael@0: SkPoint originOffset = {SkIntToScalar(this->getClip()->fOrigin.fX), michael@0: SkIntToScalar(this->getClip()->fOrigin.fY)}; michael@0: SkRect clipSpaceBounds = devBounds; michael@0: clipSpaceBounds.offset(originOffset); michael@0: return this->getClip()->fClipStack->quickContains(clipSpaceBounds); michael@0: } michael@0: michael@0: int GrInOrderDrawBuffer::concatInstancedDraw(const DrawInfo& info) { michael@0: SkASSERT(info.isInstanced()); michael@0: michael@0: const GeometrySrcState& geomSrc = this->getGeomSrc(); michael@0: const GrDrawState& drawState = this->getDrawState(); michael@0: michael@0: // we only attempt to concat the case when reserved verts are used with a client-specified index michael@0: // buffer. To make this work with client-specified VBs we'd need to know if the VB was updated michael@0: // between draws. michael@0: if (kReserved_GeometrySrcType != geomSrc.fVertexSrc || michael@0: kBuffer_GeometrySrcType != geomSrc.fIndexSrc) { michael@0: return 0; michael@0: } michael@0: // Check if there is a draw info that is compatible that uses the same VB from the pool and michael@0: // the same IB michael@0: if (kDraw_Cmd != fCmds.back()) { michael@0: return 0; michael@0: } michael@0: michael@0: DrawRecord* draw = &fDraws.back(); michael@0: GeometryPoolState& poolState = fGeoPoolStateStack.back(); michael@0: const GrVertexBuffer* vertexBuffer = poolState.fPoolVertexBuffer; michael@0: michael@0: if (!draw->isInstanced() || michael@0: draw->verticesPerInstance() != info.verticesPerInstance() || michael@0: draw->indicesPerInstance() != info.indicesPerInstance() || michael@0: draw->fVertexBuffer != vertexBuffer || michael@0: draw->fIndexBuffer != geomSrc.fIndexBuffer) { michael@0: return 0; michael@0: } michael@0: // info does not yet account for the offset from the start of the pool's VB while the previous michael@0: // draw record does. michael@0: int adjustedStartVertex = poolState.fPoolStartVertex + info.startVertex(); michael@0: if (draw->startVertex() + draw->vertexCount() != adjustedStartVertex) { michael@0: return 0; michael@0: } michael@0: michael@0: SkASSERT(poolState.fPoolStartVertex == draw->startVertex() + draw->vertexCount()); michael@0: michael@0: // how many instances can be concat'ed onto draw given the size of the index buffer michael@0: int instancesToConcat = this->indexCountInCurrentSource() / info.indicesPerInstance(); michael@0: instancesToConcat -= draw->instanceCount(); michael@0: instancesToConcat = GrMin(instancesToConcat, info.instanceCount()); michael@0: michael@0: // update the amount of reserved vertex data actually referenced in draws michael@0: size_t vertexBytes = instancesToConcat * info.verticesPerInstance() * michael@0: drawState.getVertexSize(); michael@0: poolState.fUsedPoolVertexBytes = GrMax(poolState.fUsedPoolVertexBytes, vertexBytes); michael@0: michael@0: draw->adjustInstanceCount(instancesToConcat); michael@0: return instancesToConcat; michael@0: } michael@0: michael@0: class AutoClipReenable { michael@0: public: michael@0: AutoClipReenable() : fDrawState(NULL) {} michael@0: ~AutoClipReenable() { michael@0: if (NULL != fDrawState) { michael@0: fDrawState->enableState(GrDrawState::kClip_StateBit); michael@0: } michael@0: } michael@0: void set(GrDrawState* drawState) { michael@0: if (drawState->isClipState()) { michael@0: fDrawState = drawState; michael@0: drawState->disableState(GrDrawState::kClip_StateBit); michael@0: } michael@0: } michael@0: private: michael@0: GrDrawState* fDrawState; michael@0: }; michael@0: michael@0: void GrInOrderDrawBuffer::onDraw(const DrawInfo& info) { michael@0: michael@0: GeometryPoolState& poolState = fGeoPoolStateStack.back(); michael@0: const GrDrawState& drawState = this->getDrawState(); michael@0: AutoClipReenable acr; michael@0: michael@0: if (drawState.isClipState() && michael@0: NULL != info.getDevBounds() && michael@0: this->quickInsideClip(*info.getDevBounds())) { michael@0: acr.set(this->drawState()); michael@0: } michael@0: michael@0: if (this->needsNewClip()) { michael@0: this->recordClip(); michael@0: } michael@0: if (this->needsNewState()) { michael@0: this->recordState(); michael@0: } michael@0: michael@0: DrawRecord* draw; michael@0: if (info.isInstanced()) { michael@0: int instancesConcated = this->concatInstancedDraw(info); michael@0: if (info.instanceCount() > instancesConcated) { michael@0: draw = this->recordDraw(info); michael@0: draw->adjustInstanceCount(-instancesConcated); michael@0: } else { michael@0: return; michael@0: } michael@0: } else { michael@0: draw = this->recordDraw(info); michael@0: } michael@0: michael@0: switch (this->getGeomSrc().fVertexSrc) { michael@0: case kBuffer_GeometrySrcType: michael@0: draw->fVertexBuffer = this->getGeomSrc().fVertexBuffer; michael@0: break; michael@0: case kReserved_GeometrySrcType: // fallthrough michael@0: case kArray_GeometrySrcType: { michael@0: size_t vertexBytes = (info.vertexCount() + info.startVertex()) * michael@0: drawState.getVertexSize(); michael@0: poolState.fUsedPoolVertexBytes = GrMax(poolState.fUsedPoolVertexBytes, vertexBytes); michael@0: draw->fVertexBuffer = poolState.fPoolVertexBuffer; michael@0: draw->adjustStartVertex(poolState.fPoolStartVertex); michael@0: break; michael@0: } michael@0: default: michael@0: GrCrash("unknown geom src type"); michael@0: } michael@0: draw->fVertexBuffer->ref(); michael@0: michael@0: if (info.isIndexed()) { michael@0: switch (this->getGeomSrc().fIndexSrc) { michael@0: case kBuffer_GeometrySrcType: michael@0: draw->fIndexBuffer = this->getGeomSrc().fIndexBuffer; michael@0: break; michael@0: case kReserved_GeometrySrcType: // fallthrough michael@0: case kArray_GeometrySrcType: { michael@0: size_t indexBytes = (info.indexCount() + info.startIndex()) * sizeof(uint16_t); michael@0: poolState.fUsedPoolIndexBytes = GrMax(poolState.fUsedPoolIndexBytes, indexBytes); michael@0: draw->fIndexBuffer = poolState.fPoolIndexBuffer; michael@0: draw->adjustStartIndex(poolState.fPoolStartIndex); michael@0: break; michael@0: } michael@0: default: michael@0: GrCrash("unknown geom src type"); michael@0: } michael@0: draw->fIndexBuffer->ref(); michael@0: } else { michael@0: draw->fIndexBuffer = NULL; michael@0: } michael@0: } michael@0: michael@0: GrInOrderDrawBuffer::StencilPath::StencilPath() {} michael@0: GrInOrderDrawBuffer::DrawPath::DrawPath() {} michael@0: michael@0: void GrInOrderDrawBuffer::onStencilPath(const GrPath* path, SkPath::FillType fill) { michael@0: if (this->needsNewClip()) { michael@0: this->recordClip(); michael@0: } michael@0: // Only compare the subset of GrDrawState relevant to path stenciling? michael@0: if (this->needsNewState()) { michael@0: this->recordState(); michael@0: } michael@0: StencilPath* sp = this->recordStencilPath(); michael@0: sp->fPath.reset(path); michael@0: path->ref(); michael@0: sp->fFill = fill; michael@0: } michael@0: michael@0: void GrInOrderDrawBuffer::onDrawPath(const GrPath* path, michael@0: SkPath::FillType fill, const GrDeviceCoordTexture* dstCopy) { michael@0: if (this->needsNewClip()) { michael@0: this->recordClip(); michael@0: } michael@0: // TODO: Only compare the subset of GrDrawState relevant to path covering? michael@0: if (this->needsNewState()) { michael@0: this->recordState(); michael@0: } michael@0: DrawPath* cp = this->recordDrawPath(); michael@0: cp->fPath.reset(path); michael@0: path->ref(); michael@0: cp->fFill = fill; michael@0: if (NULL != dstCopy) { michael@0: cp->fDstCopy = *dstCopy; michael@0: } michael@0: } michael@0: michael@0: void GrInOrderDrawBuffer::clear(const SkIRect* rect, GrColor color, michael@0: bool canIgnoreRect, GrRenderTarget* renderTarget) { michael@0: SkIRect r; michael@0: if (NULL == renderTarget) { michael@0: renderTarget = this->drawState()->getRenderTarget(); michael@0: SkASSERT(NULL != renderTarget); michael@0: } michael@0: if (NULL == rect) { michael@0: // We could do something smart and remove previous draws and clears to michael@0: // the current render target. If we get that smart we have to make sure michael@0: // those draws aren't read before this clear (render-to-texture). michael@0: r.setLTRB(0, 0, renderTarget->width(), renderTarget->height()); michael@0: rect = &r; michael@0: } michael@0: Clear* clr = this->recordClear(); michael@0: clr->fColor = color; michael@0: clr->fRect = *rect; michael@0: clr->fCanIgnoreRect = canIgnoreRect; michael@0: clr->fRenderTarget = renderTarget; michael@0: renderTarget->ref(); michael@0: } michael@0: michael@0: void GrInOrderDrawBuffer::onInstantGpuTraceEvent(const char* marker) { michael@0: // TODO: adds command to buffer michael@0: } michael@0: michael@0: void GrInOrderDrawBuffer::onPushGpuTraceEvent(const char* marker) { michael@0: // TODO: adds command to buffer michael@0: } michael@0: michael@0: void GrInOrderDrawBuffer::onPopGpuTraceEvent() { michael@0: // TODO: adds command to buffer michael@0: } michael@0: michael@0: void GrInOrderDrawBuffer::reset() { michael@0: SkASSERT(1 == fGeoPoolStateStack.count()); michael@0: this->resetVertexSource(); michael@0: this->resetIndexSource(); michael@0: int numDraws = fDraws.count(); michael@0: for (int d = 0; d < numDraws; ++d) { michael@0: // we always have a VB, but not always an IB michael@0: SkASSERT(NULL != fDraws[d].fVertexBuffer); michael@0: fDraws[d].fVertexBuffer->unref(); michael@0: SkSafeUnref(fDraws[d].fIndexBuffer); michael@0: } michael@0: fCmds.reset(); michael@0: fDraws.reset(); michael@0: fStencilPaths.reset(); michael@0: fDrawPaths.reset(); michael@0: fStates.reset(); michael@0: fClears.reset(); michael@0: fVertexPool.reset(); michael@0: fIndexPool.reset(); michael@0: fClips.reset(); michael@0: fClipOrigins.reset(); michael@0: fCopySurfaces.reset(); michael@0: fClipSet = true; michael@0: } michael@0: michael@0: void GrInOrderDrawBuffer::flush() { michael@0: if (fFlushing) { michael@0: return; michael@0: } michael@0: michael@0: SkASSERT(kReserved_GeometrySrcType != this->getGeomSrc().fVertexSrc); michael@0: SkASSERT(kReserved_GeometrySrcType != this->getGeomSrc().fIndexSrc); michael@0: michael@0: int numCmds = fCmds.count(); michael@0: if (0 == numCmds) { michael@0: return; michael@0: } michael@0: michael@0: GrAutoTRestore flushRestore(&fFlushing); michael@0: fFlushing = true; michael@0: michael@0: fVertexPool.unlock(); michael@0: fIndexPool.unlock(); michael@0: michael@0: GrDrawTarget::AutoClipRestore acr(fDstGpu); michael@0: AutoGeometryAndStatePush agasp(fDstGpu, kPreserve_ASRInit); michael@0: michael@0: GrDrawState playbackState; michael@0: GrDrawState* prevDrawState = fDstGpu->drawState(); michael@0: prevDrawState->ref(); michael@0: fDstGpu->setDrawState(&playbackState); michael@0: michael@0: GrClipData clipData; michael@0: michael@0: int currState = 0; michael@0: int currClip = 0; michael@0: int currClear = 0; michael@0: int currDraw = 0; michael@0: int currStencilPath = 0; michael@0: int currDrawPath = 0; michael@0: int currCopySurface = 0; michael@0: michael@0: for (int c = 0; c < numCmds; ++c) { michael@0: switch (fCmds[c]) { michael@0: case kDraw_Cmd: { michael@0: const DrawRecord& draw = fDraws[currDraw]; michael@0: fDstGpu->setVertexSourceToBuffer(draw.fVertexBuffer); michael@0: if (draw.isIndexed()) { michael@0: fDstGpu->setIndexSourceToBuffer(draw.fIndexBuffer); michael@0: } michael@0: fDstGpu->executeDraw(draw); michael@0: michael@0: ++currDraw; michael@0: break; michael@0: } michael@0: case kStencilPath_Cmd: { michael@0: const StencilPath& sp = fStencilPaths[currStencilPath]; michael@0: fDstGpu->stencilPath(sp.fPath.get(), sp.fFill); michael@0: ++currStencilPath; michael@0: break; michael@0: } michael@0: case kDrawPath_Cmd: { michael@0: const DrawPath& cp = fDrawPaths[currDrawPath]; michael@0: fDstGpu->executeDrawPath(cp.fPath.get(), cp.fFill, michael@0: NULL != cp.fDstCopy.texture() ? &cp.fDstCopy : NULL); michael@0: ++currDrawPath; michael@0: break; michael@0: } michael@0: case kSetState_Cmd: michael@0: fStates[currState].restoreTo(&playbackState); michael@0: ++currState; michael@0: break; michael@0: case kSetClip_Cmd: michael@0: clipData.fClipStack = &fClips[currClip]; michael@0: clipData.fOrigin = fClipOrigins[currClip]; michael@0: fDstGpu->setClip(&clipData); michael@0: ++currClip; michael@0: break; michael@0: case kClear_Cmd: michael@0: fDstGpu->clear(&fClears[currClear].fRect, michael@0: fClears[currClear].fColor, michael@0: fClears[currClear].fCanIgnoreRect, michael@0: fClears[currClear].fRenderTarget); michael@0: ++currClear; michael@0: break; michael@0: case kCopySurface_Cmd: michael@0: fDstGpu->copySurface(fCopySurfaces[currCopySurface].fDst.get(), michael@0: fCopySurfaces[currCopySurface].fSrc.get(), michael@0: fCopySurfaces[currCopySurface].fSrcRect, michael@0: fCopySurfaces[currCopySurface].fDstPoint); michael@0: ++currCopySurface; michael@0: break; michael@0: } michael@0: } michael@0: // we should have consumed all the states, clips, etc. michael@0: SkASSERT(fStates.count() == currState); michael@0: SkASSERT(fClips.count() == currClip); michael@0: SkASSERT(fClipOrigins.count() == currClip); michael@0: SkASSERT(fClears.count() == currClear); michael@0: SkASSERT(fDraws.count() == currDraw); michael@0: SkASSERT(fCopySurfaces.count() == currCopySurface); michael@0: michael@0: fDstGpu->setDrawState(prevDrawState); michael@0: prevDrawState->unref(); michael@0: this->reset(); michael@0: ++fDrawID; michael@0: } michael@0: michael@0: bool GrInOrderDrawBuffer::onCopySurface(GrSurface* dst, michael@0: GrSurface* src, michael@0: const SkIRect& srcRect, michael@0: const SkIPoint& dstPoint) { michael@0: if (fDstGpu->canCopySurface(dst, src, srcRect, dstPoint)) { michael@0: CopySurface* cs = this->recordCopySurface(); michael@0: cs->fDst.reset(SkRef(dst)); michael@0: cs->fSrc.reset(SkRef(src)); michael@0: cs->fSrcRect = srcRect; michael@0: cs->fDstPoint = dstPoint; michael@0: return true; michael@0: } else { michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: bool GrInOrderDrawBuffer::onCanCopySurface(GrSurface* dst, michael@0: GrSurface* src, michael@0: const SkIRect& srcRect, michael@0: const SkIPoint& dstPoint) { michael@0: return fDstGpu->canCopySurface(dst, src, srcRect, dstPoint); michael@0: } michael@0: michael@0: void GrInOrderDrawBuffer::initCopySurfaceDstDesc(const GrSurface* src, GrTextureDesc* desc) { michael@0: fDstGpu->initCopySurfaceDstDesc(src, desc); michael@0: } michael@0: michael@0: void GrInOrderDrawBuffer::willReserveVertexAndIndexSpace(int vertexCount, michael@0: int indexCount) { michael@0: // We use geometryHints() to know whether to flush the draw buffer. We michael@0: // can't flush if we are inside an unbalanced pushGeometrySource. michael@0: // Moreover, flushing blows away vertex and index data that was michael@0: // previously reserved. So if the vertex or index data is pulled from michael@0: // reserved space and won't be released by this request then we can't michael@0: // flush. michael@0: bool insideGeoPush = fGeoPoolStateStack.count() > 1; michael@0: michael@0: bool unreleasedVertexSpace = michael@0: !vertexCount && michael@0: kReserved_GeometrySrcType == this->getGeomSrc().fVertexSrc; michael@0: michael@0: bool unreleasedIndexSpace = michael@0: !indexCount && michael@0: kReserved_GeometrySrcType == this->getGeomSrc().fIndexSrc; michael@0: michael@0: // we don't want to finalize any reserved geom on the target since michael@0: // we don't know that the client has finished writing to it. michael@0: bool targetHasReservedGeom = fDstGpu->hasReservedVerticesOrIndices(); michael@0: michael@0: int vcount = vertexCount; michael@0: int icount = indexCount; michael@0: michael@0: if (!insideGeoPush && michael@0: !unreleasedVertexSpace && michael@0: !unreleasedIndexSpace && michael@0: !targetHasReservedGeom && michael@0: this->geometryHints(&vcount, &icount)) { michael@0: michael@0: this->flush(); michael@0: } michael@0: } michael@0: michael@0: bool GrInOrderDrawBuffer::geometryHints(int* vertexCount, michael@0: int* indexCount) const { michael@0: // we will recommend a flush if the data could fit in a single michael@0: // preallocated buffer but none are left and it can't fit michael@0: // in the current buffer (which may not be prealloced). michael@0: bool flush = false; michael@0: if (NULL != indexCount) { michael@0: int32_t currIndices = fIndexPool.currentBufferIndices(); michael@0: if (*indexCount > currIndices && michael@0: (!fIndexPool.preallocatedBuffersRemaining() && michael@0: *indexCount <= fIndexPool.preallocatedBufferIndices())) { michael@0: michael@0: flush = true; michael@0: } michael@0: *indexCount = currIndices; michael@0: } michael@0: if (NULL != vertexCount) { michael@0: size_t vertexSize = this->getDrawState().getVertexSize(); michael@0: int32_t currVertices = fVertexPool.currentBufferVertices(vertexSize); michael@0: if (*vertexCount > currVertices && michael@0: (!fVertexPool.preallocatedBuffersRemaining() && michael@0: *vertexCount <= fVertexPool.preallocatedBufferVertices(vertexSize))) { michael@0: michael@0: flush = true; michael@0: } michael@0: *vertexCount = currVertices; michael@0: } michael@0: return flush; michael@0: } michael@0: michael@0: bool GrInOrderDrawBuffer::onReserveVertexSpace(size_t vertexSize, michael@0: int vertexCount, michael@0: void** vertices) { michael@0: GeometryPoolState& poolState = fGeoPoolStateStack.back(); michael@0: SkASSERT(vertexCount > 0); michael@0: SkASSERT(NULL != vertices); michael@0: SkASSERT(0 == poolState.fUsedPoolVertexBytes); michael@0: michael@0: *vertices = fVertexPool.makeSpace(vertexSize, michael@0: vertexCount, michael@0: &poolState.fPoolVertexBuffer, michael@0: &poolState.fPoolStartVertex); michael@0: return NULL != *vertices; michael@0: } michael@0: michael@0: bool GrInOrderDrawBuffer::onReserveIndexSpace(int indexCount, void** indices) { michael@0: GeometryPoolState& poolState = fGeoPoolStateStack.back(); michael@0: SkASSERT(indexCount > 0); michael@0: SkASSERT(NULL != indices); michael@0: SkASSERT(0 == poolState.fUsedPoolIndexBytes); michael@0: michael@0: *indices = fIndexPool.makeSpace(indexCount, michael@0: &poolState.fPoolIndexBuffer, michael@0: &poolState.fPoolStartIndex); michael@0: return NULL != *indices; michael@0: } michael@0: michael@0: void GrInOrderDrawBuffer::releaseReservedVertexSpace() { michael@0: GeometryPoolState& poolState = fGeoPoolStateStack.back(); michael@0: const GeometrySrcState& geoSrc = this->getGeomSrc(); michael@0: michael@0: // If we get a release vertex space call then our current source should either be reserved michael@0: // or array (which we copied into reserved space). michael@0: SkASSERT(kReserved_GeometrySrcType == geoSrc.fVertexSrc || michael@0: kArray_GeometrySrcType == geoSrc.fVertexSrc); michael@0: michael@0: // When the caller reserved vertex buffer space we gave it back a pointer michael@0: // provided by the vertex buffer pool. At each draw we tracked the largest michael@0: // offset into the pool's pointer that was referenced. Now we return to the michael@0: // pool any portion at the tail of the allocation that no draw referenced. michael@0: size_t reservedVertexBytes = geoSrc.fVertexSize * geoSrc.fVertexCount; michael@0: fVertexPool.putBack(reservedVertexBytes - michael@0: poolState.fUsedPoolVertexBytes); michael@0: poolState.fUsedPoolVertexBytes = 0; michael@0: poolState.fPoolVertexBuffer = NULL; michael@0: poolState.fPoolStartVertex = 0; michael@0: } michael@0: michael@0: void GrInOrderDrawBuffer::releaseReservedIndexSpace() { michael@0: GeometryPoolState& poolState = fGeoPoolStateStack.back(); michael@0: const GeometrySrcState& geoSrc = this->getGeomSrc(); michael@0: michael@0: // If we get a release index space call then our current source should either be reserved michael@0: // or array (which we copied into reserved space). michael@0: SkASSERT(kReserved_GeometrySrcType == geoSrc.fIndexSrc || michael@0: kArray_GeometrySrcType == geoSrc.fIndexSrc); michael@0: michael@0: // Similar to releaseReservedVertexSpace we return any unused portion at michael@0: // the tail michael@0: size_t reservedIndexBytes = sizeof(uint16_t) * geoSrc.fIndexCount; michael@0: fIndexPool.putBack(reservedIndexBytes - poolState.fUsedPoolIndexBytes); michael@0: poolState.fUsedPoolIndexBytes = 0; michael@0: poolState.fPoolIndexBuffer = NULL; michael@0: poolState.fPoolStartIndex = 0; michael@0: } michael@0: michael@0: void GrInOrderDrawBuffer::onSetVertexSourceToArray(const void* vertexArray, michael@0: int vertexCount) { michael@0: michael@0: GeometryPoolState& poolState = fGeoPoolStateStack.back(); michael@0: SkASSERT(0 == poolState.fUsedPoolVertexBytes); michael@0: #ifdef SK_DEBUG michael@0: bool success = michael@0: #endif michael@0: fVertexPool.appendVertices(this->getVertexSize(), michael@0: vertexCount, michael@0: vertexArray, michael@0: &poolState.fPoolVertexBuffer, michael@0: &poolState.fPoolStartVertex); michael@0: GR_DEBUGASSERT(success); michael@0: } michael@0: michael@0: void GrInOrderDrawBuffer::onSetIndexSourceToArray(const void* indexArray, michael@0: int indexCount) { michael@0: GeometryPoolState& poolState = fGeoPoolStateStack.back(); michael@0: SkASSERT(0 == poolState.fUsedPoolIndexBytes); michael@0: #ifdef SK_DEBUG michael@0: bool success = michael@0: #endif michael@0: fIndexPool.appendIndices(indexCount, michael@0: indexArray, michael@0: &poolState.fPoolIndexBuffer, michael@0: &poolState.fPoolStartIndex); michael@0: GR_DEBUGASSERT(success); michael@0: } michael@0: michael@0: void GrInOrderDrawBuffer::releaseVertexArray() { michael@0: // When the client provides an array as the vertex source we handled it michael@0: // by copying their array into reserved space. michael@0: this->GrInOrderDrawBuffer::releaseReservedVertexSpace(); michael@0: } michael@0: michael@0: void GrInOrderDrawBuffer::releaseIndexArray() { michael@0: // When the client provides an array as the index source we handled it michael@0: // by copying their array into reserved space. michael@0: this->GrInOrderDrawBuffer::releaseReservedIndexSpace(); michael@0: } michael@0: michael@0: void GrInOrderDrawBuffer::geometrySourceWillPush() { michael@0: GeometryPoolState& poolState = fGeoPoolStateStack.push_back(); michael@0: poolState.fUsedPoolVertexBytes = 0; michael@0: poolState.fUsedPoolIndexBytes = 0; michael@0: #ifdef SK_DEBUG michael@0: poolState.fPoolVertexBuffer = (GrVertexBuffer*)~0; michael@0: poolState.fPoolStartVertex = ~0; michael@0: poolState.fPoolIndexBuffer = (GrIndexBuffer*)~0; michael@0: poolState.fPoolStartIndex = ~0; michael@0: #endif michael@0: } michael@0: michael@0: void GrInOrderDrawBuffer::geometrySourceWillPop( michael@0: const GeometrySrcState& restoredState) { michael@0: SkASSERT(fGeoPoolStateStack.count() > 1); michael@0: fGeoPoolStateStack.pop_back(); michael@0: GeometryPoolState& poolState = fGeoPoolStateStack.back(); michael@0: // we have to assume that any slack we had in our vertex/index data michael@0: // is now unreleasable because data may have been appended later in the michael@0: // pool. michael@0: if (kReserved_GeometrySrcType == restoredState.fVertexSrc || michael@0: kArray_GeometrySrcType == restoredState.fVertexSrc) { michael@0: poolState.fUsedPoolVertexBytes = restoredState.fVertexSize * restoredState.fVertexCount; michael@0: } michael@0: if (kReserved_GeometrySrcType == restoredState.fIndexSrc || michael@0: kArray_GeometrySrcType == restoredState.fIndexSrc) { michael@0: poolState.fUsedPoolIndexBytes = sizeof(uint16_t) * michael@0: restoredState.fIndexCount; michael@0: } michael@0: } michael@0: michael@0: bool GrInOrderDrawBuffer::needsNewState() const { michael@0: return fStates.empty() || !fStates.back().isEqual(this->getDrawState()); michael@0: } michael@0: michael@0: bool GrInOrderDrawBuffer::needsNewClip() const { michael@0: SkASSERT(fClips.count() == fClipOrigins.count()); michael@0: if (this->getDrawState().isClipState()) { michael@0: if (fClipSet && michael@0: (fClips.empty() || michael@0: fClips.back() != *this->getClip()->fClipStack || michael@0: fClipOrigins.back() != this->getClip()->fOrigin)) { michael@0: return true; michael@0: } michael@0: } michael@0: return false; michael@0: } michael@0: michael@0: void GrInOrderDrawBuffer::recordClip() { michael@0: fClips.push_back() = *this->getClip()->fClipStack; michael@0: fClipOrigins.push_back() = this->getClip()->fOrigin; michael@0: fClipSet = false; michael@0: fCmds.push_back(kSetClip_Cmd); michael@0: } michael@0: michael@0: void GrInOrderDrawBuffer::recordState() { michael@0: fStates.push_back().saveFrom(this->getDrawState()); michael@0: fCmds.push_back(kSetState_Cmd); michael@0: } michael@0: michael@0: GrInOrderDrawBuffer::DrawRecord* GrInOrderDrawBuffer::recordDraw(const DrawInfo& info) { michael@0: fCmds.push_back(kDraw_Cmd); michael@0: return &fDraws.push_back(info); michael@0: } michael@0: michael@0: GrInOrderDrawBuffer::StencilPath* GrInOrderDrawBuffer::recordStencilPath() { michael@0: fCmds.push_back(kStencilPath_Cmd); michael@0: return &fStencilPaths.push_back(); michael@0: } michael@0: michael@0: GrInOrderDrawBuffer::DrawPath* GrInOrderDrawBuffer::recordDrawPath() { michael@0: fCmds.push_back(kDrawPath_Cmd); michael@0: return &fDrawPaths.push_back(); michael@0: } michael@0: michael@0: GrInOrderDrawBuffer::Clear* GrInOrderDrawBuffer::recordClear() { michael@0: fCmds.push_back(kClear_Cmd); michael@0: return &fClears.push_back(); michael@0: } michael@0: michael@0: GrInOrderDrawBuffer::CopySurface* GrInOrderDrawBuffer::recordCopySurface() { michael@0: fCmds.push_back(kCopySurface_Cmd); michael@0: return &fCopySurfaces.push_back(); michael@0: } michael@0: michael@0: michael@0: void GrInOrderDrawBuffer::clipWillBeSet(const GrClipData* newClipData) { michael@0: INHERITED::clipWillBeSet(newClipData); michael@0: fClipSet = true; michael@0: fClipProxyState = kUnknown_ClipProxyState; michael@0: }