Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
michael@0 | 1 | /* |
michael@0 | 2 | * Copyright 2011 Google Inc. |
michael@0 | 3 | * |
michael@0 | 4 | * Use of this source code is governed by a BSD-style license that can be |
michael@0 | 5 | * found in the LICENSE file. |
michael@0 | 6 | */ |
michael@0 | 7 | |
michael@0 | 8 | #include "GrInOrderDrawBuffer.h" |
michael@0 | 9 | |
michael@0 | 10 | #include "GrBufferAllocPool.h" |
michael@0 | 11 | #include "GrDrawTargetCaps.h" |
michael@0 | 12 | #include "GrGpu.h" |
michael@0 | 13 | #include "GrIndexBuffer.h" |
michael@0 | 14 | #include "GrPath.h" |
michael@0 | 15 | #include "GrPoint.h" |
michael@0 | 16 | #include "GrRenderTarget.h" |
michael@0 | 17 | #include "GrTemplates.h" |
michael@0 | 18 | #include "GrTexture.h" |
michael@0 | 19 | #include "GrVertexBuffer.h" |
michael@0 | 20 | |
michael@0 | 21 | GrInOrderDrawBuffer::GrInOrderDrawBuffer(GrGpu* gpu, |
michael@0 | 22 | GrVertexBufferAllocPool* vertexPool, |
michael@0 | 23 | GrIndexBufferAllocPool* indexPool) |
michael@0 | 24 | : GrDrawTarget(gpu->getContext()) |
michael@0 | 25 | , fDstGpu(gpu) |
michael@0 | 26 | , fClipSet(true) |
michael@0 | 27 | , fClipProxyState(kUnknown_ClipProxyState) |
michael@0 | 28 | , fVertexPool(*vertexPool) |
michael@0 | 29 | , fIndexPool(*indexPool) |
michael@0 | 30 | , fFlushing(false) |
michael@0 | 31 | , fDrawID(0) { |
michael@0 | 32 | |
michael@0 | 33 | fDstGpu->ref(); |
michael@0 | 34 | fCaps.reset(SkRef(fDstGpu->caps())); |
michael@0 | 35 | |
michael@0 | 36 | SkASSERT(NULL != vertexPool); |
michael@0 | 37 | SkASSERT(NULL != indexPool); |
michael@0 | 38 | |
michael@0 | 39 | GeometryPoolState& poolState = fGeoPoolStateStack.push_back(); |
michael@0 | 40 | poolState.fUsedPoolVertexBytes = 0; |
michael@0 | 41 | poolState.fUsedPoolIndexBytes = 0; |
michael@0 | 42 | #ifdef SK_DEBUG |
michael@0 | 43 | poolState.fPoolVertexBuffer = (GrVertexBuffer*)~0; |
michael@0 | 44 | poolState.fPoolStartVertex = ~0; |
michael@0 | 45 | poolState.fPoolIndexBuffer = (GrIndexBuffer*)~0; |
michael@0 | 46 | poolState.fPoolStartIndex = ~0; |
michael@0 | 47 | #endif |
michael@0 | 48 | this->reset(); |
michael@0 | 49 | } |
michael@0 | 50 | |
michael@0 | 51 | GrInOrderDrawBuffer::~GrInOrderDrawBuffer() { |
michael@0 | 52 | this->reset(); |
michael@0 | 53 | // This must be called by before the GrDrawTarget destructor |
michael@0 | 54 | this->releaseGeometry(); |
michael@0 | 55 | fDstGpu->unref(); |
michael@0 | 56 | } |
michael@0 | 57 | |
michael@0 | 58 | //////////////////////////////////////////////////////////////////////////////// |
michael@0 | 59 | |
michael@0 | 60 | namespace { |
michael@0 | 61 | void get_vertex_bounds(const void* vertices, |
michael@0 | 62 | size_t vertexSize, |
michael@0 | 63 | int vertexCount, |
michael@0 | 64 | SkRect* bounds) { |
michael@0 | 65 | SkASSERT(vertexSize >= sizeof(GrPoint)); |
michael@0 | 66 | SkASSERT(vertexCount > 0); |
michael@0 | 67 | const GrPoint* point = static_cast<const GrPoint*>(vertices); |
michael@0 | 68 | bounds->fLeft = bounds->fRight = point->fX; |
michael@0 | 69 | bounds->fTop = bounds->fBottom = point->fY; |
michael@0 | 70 | for (int i = 1; i < vertexCount; ++i) { |
michael@0 | 71 | point = reinterpret_cast<GrPoint*>(reinterpret_cast<intptr_t>(point) + vertexSize); |
michael@0 | 72 | bounds->growToInclude(point->fX, point->fY); |
michael@0 | 73 | } |
michael@0 | 74 | } |
michael@0 | 75 | } |
michael@0 | 76 | |
michael@0 | 77 | |
michael@0 | 78 | namespace { |
michael@0 | 79 | |
michael@0 | 80 | extern const GrVertexAttrib kRectPosColorUVAttribs[] = { |
michael@0 | 81 | {kVec2f_GrVertexAttribType, 0, kPosition_GrVertexAttribBinding}, |
michael@0 | 82 | {kVec4ub_GrVertexAttribType, sizeof(GrPoint), kColor_GrVertexAttribBinding}, |
michael@0 | 83 | {kVec2f_GrVertexAttribType, sizeof(GrPoint)+sizeof(GrColor), |
michael@0 | 84 | kLocalCoord_GrVertexAttribBinding}, |
michael@0 | 85 | }; |
michael@0 | 86 | |
michael@0 | 87 | extern const GrVertexAttrib kRectPosUVAttribs[] = { |
michael@0 | 88 | {kVec2f_GrVertexAttribType, 0, kPosition_GrVertexAttribBinding}, |
michael@0 | 89 | {kVec2f_GrVertexAttribType, sizeof(GrPoint), kLocalCoord_GrVertexAttribBinding}, |
michael@0 | 90 | }; |
michael@0 | 91 | |
michael@0 | 92 | static void set_vertex_attributes(GrDrawState* drawState, |
michael@0 | 93 | bool hasColor, bool hasUVs, |
michael@0 | 94 | int* colorOffset, int* localOffset) { |
michael@0 | 95 | *colorOffset = -1; |
michael@0 | 96 | *localOffset = -1; |
michael@0 | 97 | |
michael@0 | 98 | // Using per-vertex colors allows batching across colors. (A lot of rects in a row differing |
michael@0 | 99 | // only in color is a common occurrence in tables). However, having per-vertex colors disables |
michael@0 | 100 | // blending optimizations because we don't know if the color will be solid or not. These |
michael@0 | 101 | // optimizations help determine whether coverage and color can be blended correctly when |
michael@0 | 102 | // dual-source blending isn't available. This comes into play when there is coverage. If colors |
michael@0 | 103 | // were a stage it could take a hint that every vertex's color will be opaque. |
michael@0 | 104 | if (hasColor && hasUVs) { |
michael@0 | 105 | *colorOffset = sizeof(GrPoint); |
michael@0 | 106 | *localOffset = sizeof(GrPoint) + sizeof(GrColor); |
michael@0 | 107 | drawState->setVertexAttribs<kRectPosColorUVAttribs>(3); |
michael@0 | 108 | } else if (hasColor) { |
michael@0 | 109 | *colorOffset = sizeof(GrPoint); |
michael@0 | 110 | drawState->setVertexAttribs<kRectPosColorUVAttribs>(2); |
michael@0 | 111 | } else if (hasUVs) { |
michael@0 | 112 | *localOffset = sizeof(GrPoint); |
michael@0 | 113 | drawState->setVertexAttribs<kRectPosUVAttribs>(2); |
michael@0 | 114 | } else { |
michael@0 | 115 | drawState->setVertexAttribs<kRectPosUVAttribs>(1); |
michael@0 | 116 | } |
michael@0 | 117 | } |
michael@0 | 118 | |
michael@0 | 119 | }; |
michael@0 | 120 | |
michael@0 | 121 | void GrInOrderDrawBuffer::onDrawRect(const SkRect& rect, |
michael@0 | 122 | const SkMatrix* matrix, |
michael@0 | 123 | const SkRect* localRect, |
michael@0 | 124 | const SkMatrix* localMatrix) { |
michael@0 | 125 | GrDrawState::AutoColorRestore acr; |
michael@0 | 126 | |
michael@0 | 127 | GrDrawState* drawState = this->drawState(); |
michael@0 | 128 | |
michael@0 | 129 | GrColor color = drawState->getColor(); |
michael@0 | 130 | |
michael@0 | 131 | int colorOffset, localOffset; |
michael@0 | 132 | set_vertex_attributes(drawState, |
michael@0 | 133 | this->caps()->dualSourceBlendingSupport() || drawState->hasSolidCoverage(), |
michael@0 | 134 | NULL != localRect, |
michael@0 | 135 | &colorOffset, &localOffset); |
michael@0 | 136 | if (colorOffset >= 0) { |
michael@0 | 137 | // We set the draw state's color to white here. This is done so that any batching performed |
michael@0 | 138 | // in our subclass's onDraw() won't get a false from GrDrawState::op== due to a color |
michael@0 | 139 | // mismatch. TODO: Once vertex layout is owned by GrDrawState it should skip comparing the |
michael@0 | 140 | // constant color in its op== when the kColor layout bit is set and then we can remove |
michael@0 | 141 | // this. |
michael@0 | 142 | acr.set(drawState, 0xFFFFFFFF); |
michael@0 | 143 | } |
michael@0 | 144 | |
michael@0 | 145 | AutoReleaseGeometry geo(this, 4, 0); |
michael@0 | 146 | if (!geo.succeeded()) { |
michael@0 | 147 | GrPrintf("Failed to get space for vertices!\n"); |
michael@0 | 148 | return; |
michael@0 | 149 | } |
michael@0 | 150 | |
michael@0 | 151 | // Go to device coords to allow batching across matrix changes |
michael@0 | 152 | SkMatrix combinedMatrix; |
michael@0 | 153 | if (NULL != matrix) { |
michael@0 | 154 | combinedMatrix = *matrix; |
michael@0 | 155 | } else { |
michael@0 | 156 | combinedMatrix.reset(); |
michael@0 | 157 | } |
michael@0 | 158 | combinedMatrix.postConcat(drawState->getViewMatrix()); |
michael@0 | 159 | // When the caller has provided an explicit source rect for a stage then we don't want to |
michael@0 | 160 | // modify that stage's matrix. Otherwise if the effect is generating its source rect from |
michael@0 | 161 | // the vertex positions then we have to account for the view matrix change. |
michael@0 | 162 | GrDrawState::AutoViewMatrixRestore avmr; |
michael@0 | 163 | if (!avmr.setIdentity(drawState)) { |
michael@0 | 164 | return; |
michael@0 | 165 | } |
michael@0 | 166 | |
michael@0 | 167 | size_t vsize = drawState->getVertexSize(); |
michael@0 | 168 | |
michael@0 | 169 | geo.positions()->setRectFan(rect.fLeft, rect.fTop, rect.fRight, rect.fBottom, vsize); |
michael@0 | 170 | combinedMatrix.mapPointsWithStride(geo.positions(), vsize, 4); |
michael@0 | 171 | |
michael@0 | 172 | SkRect devBounds; |
michael@0 | 173 | // since we already computed the dev verts, set the bounds hint. This will help us avoid |
michael@0 | 174 | // unnecessary clipping in our onDraw(). |
michael@0 | 175 | get_vertex_bounds(geo.vertices(), vsize, 4, &devBounds); |
michael@0 | 176 | |
michael@0 | 177 | if (localOffset >= 0) { |
michael@0 | 178 | GrPoint* coords = GrTCast<GrPoint*>(GrTCast<intptr_t>(geo.vertices()) + localOffset); |
michael@0 | 179 | coords->setRectFan(localRect->fLeft, localRect->fTop, |
michael@0 | 180 | localRect->fRight, localRect->fBottom, |
michael@0 | 181 | vsize); |
michael@0 | 182 | if (NULL != localMatrix) { |
michael@0 | 183 | localMatrix->mapPointsWithStride(coords, vsize, 4); |
michael@0 | 184 | } |
michael@0 | 185 | } |
michael@0 | 186 | |
michael@0 | 187 | if (colorOffset >= 0) { |
michael@0 | 188 | GrColor* vertColor = GrTCast<GrColor*>(GrTCast<intptr_t>(geo.vertices()) + colorOffset); |
michael@0 | 189 | for (int i = 0; i < 4; ++i) { |
michael@0 | 190 | *vertColor = color; |
michael@0 | 191 | vertColor = (GrColor*) ((intptr_t) vertColor + vsize); |
michael@0 | 192 | } |
michael@0 | 193 | } |
michael@0 | 194 | |
michael@0 | 195 | this->setIndexSourceToBuffer(this->getContext()->getQuadIndexBuffer()); |
michael@0 | 196 | this->drawIndexedInstances(kTriangles_GrPrimitiveType, 1, 4, 6, &devBounds); |
michael@0 | 197 | |
michael@0 | 198 | // to ensure that stashing the drawState ptr is valid |
michael@0 | 199 | SkASSERT(this->drawState() == drawState); |
michael@0 | 200 | } |
michael@0 | 201 | |
michael@0 | 202 | bool GrInOrderDrawBuffer::quickInsideClip(const SkRect& devBounds) { |
michael@0 | 203 | if (!this->getDrawState().isClipState()) { |
michael@0 | 204 | return true; |
michael@0 | 205 | } |
michael@0 | 206 | if (kUnknown_ClipProxyState == fClipProxyState) { |
michael@0 | 207 | SkIRect rect; |
michael@0 | 208 | bool iior; |
michael@0 | 209 | this->getClip()->getConservativeBounds(this->getDrawState().getRenderTarget(), &rect, &iior); |
michael@0 | 210 | if (iior) { |
michael@0 | 211 | // The clip is a rect. We will remember that in fProxyClip. It is common for an edge (or |
michael@0 | 212 | // all edges) of the clip to be at the edge of the RT. However, we get that clipping for |
michael@0 | 213 | // free via the viewport. We don't want to think that clipping must be enabled in this |
michael@0 | 214 | // case. So we extend the clip outward from the edge to avoid these false negatives. |
michael@0 | 215 | fClipProxyState = kValid_ClipProxyState; |
michael@0 | 216 | fClipProxy = SkRect::Make(rect); |
michael@0 | 217 | |
michael@0 | 218 | if (fClipProxy.fLeft <= 0) { |
michael@0 | 219 | fClipProxy.fLeft = SK_ScalarMin; |
michael@0 | 220 | } |
michael@0 | 221 | if (fClipProxy.fTop <= 0) { |
michael@0 | 222 | fClipProxy.fTop = SK_ScalarMin; |
michael@0 | 223 | } |
michael@0 | 224 | if (fClipProxy.fRight >= this->getDrawState().getRenderTarget()->width()) { |
michael@0 | 225 | fClipProxy.fRight = SK_ScalarMax; |
michael@0 | 226 | } |
michael@0 | 227 | if (fClipProxy.fBottom >= this->getDrawState().getRenderTarget()->height()) { |
michael@0 | 228 | fClipProxy.fBottom = SK_ScalarMax; |
michael@0 | 229 | } |
michael@0 | 230 | } else { |
michael@0 | 231 | fClipProxyState = kInvalid_ClipProxyState; |
michael@0 | 232 | } |
michael@0 | 233 | } |
michael@0 | 234 | if (kValid_ClipProxyState == fClipProxyState) { |
michael@0 | 235 | return fClipProxy.contains(devBounds); |
michael@0 | 236 | } |
michael@0 | 237 | SkPoint originOffset = {SkIntToScalar(this->getClip()->fOrigin.fX), |
michael@0 | 238 | SkIntToScalar(this->getClip()->fOrigin.fY)}; |
michael@0 | 239 | SkRect clipSpaceBounds = devBounds; |
michael@0 | 240 | clipSpaceBounds.offset(originOffset); |
michael@0 | 241 | return this->getClip()->fClipStack->quickContains(clipSpaceBounds); |
michael@0 | 242 | } |
michael@0 | 243 | |
michael@0 | 244 | int GrInOrderDrawBuffer::concatInstancedDraw(const DrawInfo& info) { |
michael@0 | 245 | SkASSERT(info.isInstanced()); |
michael@0 | 246 | |
michael@0 | 247 | const GeometrySrcState& geomSrc = this->getGeomSrc(); |
michael@0 | 248 | const GrDrawState& drawState = this->getDrawState(); |
michael@0 | 249 | |
michael@0 | 250 | // we only attempt to concat the case when reserved verts are used with a client-specified index |
michael@0 | 251 | // buffer. To make this work with client-specified VBs we'd need to know if the VB was updated |
michael@0 | 252 | // between draws. |
michael@0 | 253 | if (kReserved_GeometrySrcType != geomSrc.fVertexSrc || |
michael@0 | 254 | kBuffer_GeometrySrcType != geomSrc.fIndexSrc) { |
michael@0 | 255 | return 0; |
michael@0 | 256 | } |
michael@0 | 257 | // Check if there is a draw info that is compatible that uses the same VB from the pool and |
michael@0 | 258 | // the same IB |
michael@0 | 259 | if (kDraw_Cmd != fCmds.back()) { |
michael@0 | 260 | return 0; |
michael@0 | 261 | } |
michael@0 | 262 | |
michael@0 | 263 | DrawRecord* draw = &fDraws.back(); |
michael@0 | 264 | GeometryPoolState& poolState = fGeoPoolStateStack.back(); |
michael@0 | 265 | const GrVertexBuffer* vertexBuffer = poolState.fPoolVertexBuffer; |
michael@0 | 266 | |
michael@0 | 267 | if (!draw->isInstanced() || |
michael@0 | 268 | draw->verticesPerInstance() != info.verticesPerInstance() || |
michael@0 | 269 | draw->indicesPerInstance() != info.indicesPerInstance() || |
michael@0 | 270 | draw->fVertexBuffer != vertexBuffer || |
michael@0 | 271 | draw->fIndexBuffer != geomSrc.fIndexBuffer) { |
michael@0 | 272 | return 0; |
michael@0 | 273 | } |
michael@0 | 274 | // info does not yet account for the offset from the start of the pool's VB while the previous |
michael@0 | 275 | // draw record does. |
michael@0 | 276 | int adjustedStartVertex = poolState.fPoolStartVertex + info.startVertex(); |
michael@0 | 277 | if (draw->startVertex() + draw->vertexCount() != adjustedStartVertex) { |
michael@0 | 278 | return 0; |
michael@0 | 279 | } |
michael@0 | 280 | |
michael@0 | 281 | SkASSERT(poolState.fPoolStartVertex == draw->startVertex() + draw->vertexCount()); |
michael@0 | 282 | |
michael@0 | 283 | // how many instances can be concat'ed onto draw given the size of the index buffer |
michael@0 | 284 | int instancesToConcat = this->indexCountInCurrentSource() / info.indicesPerInstance(); |
michael@0 | 285 | instancesToConcat -= draw->instanceCount(); |
michael@0 | 286 | instancesToConcat = GrMin(instancesToConcat, info.instanceCount()); |
michael@0 | 287 | |
michael@0 | 288 | // update the amount of reserved vertex data actually referenced in draws |
michael@0 | 289 | size_t vertexBytes = instancesToConcat * info.verticesPerInstance() * |
michael@0 | 290 | drawState.getVertexSize(); |
michael@0 | 291 | poolState.fUsedPoolVertexBytes = GrMax(poolState.fUsedPoolVertexBytes, vertexBytes); |
michael@0 | 292 | |
michael@0 | 293 | draw->adjustInstanceCount(instancesToConcat); |
michael@0 | 294 | return instancesToConcat; |
michael@0 | 295 | } |
michael@0 | 296 | |
michael@0 | 297 | class AutoClipReenable { |
michael@0 | 298 | public: |
michael@0 | 299 | AutoClipReenable() : fDrawState(NULL) {} |
michael@0 | 300 | ~AutoClipReenable() { |
michael@0 | 301 | if (NULL != fDrawState) { |
michael@0 | 302 | fDrawState->enableState(GrDrawState::kClip_StateBit); |
michael@0 | 303 | } |
michael@0 | 304 | } |
michael@0 | 305 | void set(GrDrawState* drawState) { |
michael@0 | 306 | if (drawState->isClipState()) { |
michael@0 | 307 | fDrawState = drawState; |
michael@0 | 308 | drawState->disableState(GrDrawState::kClip_StateBit); |
michael@0 | 309 | } |
michael@0 | 310 | } |
michael@0 | 311 | private: |
michael@0 | 312 | GrDrawState* fDrawState; |
michael@0 | 313 | }; |
michael@0 | 314 | |
michael@0 | 315 | void GrInOrderDrawBuffer::onDraw(const DrawInfo& info) { |
michael@0 | 316 | |
michael@0 | 317 | GeometryPoolState& poolState = fGeoPoolStateStack.back(); |
michael@0 | 318 | const GrDrawState& drawState = this->getDrawState(); |
michael@0 | 319 | AutoClipReenable acr; |
michael@0 | 320 | |
michael@0 | 321 | if (drawState.isClipState() && |
michael@0 | 322 | NULL != info.getDevBounds() && |
michael@0 | 323 | this->quickInsideClip(*info.getDevBounds())) { |
michael@0 | 324 | acr.set(this->drawState()); |
michael@0 | 325 | } |
michael@0 | 326 | |
michael@0 | 327 | if (this->needsNewClip()) { |
michael@0 | 328 | this->recordClip(); |
michael@0 | 329 | } |
michael@0 | 330 | if (this->needsNewState()) { |
michael@0 | 331 | this->recordState(); |
michael@0 | 332 | } |
michael@0 | 333 | |
michael@0 | 334 | DrawRecord* draw; |
michael@0 | 335 | if (info.isInstanced()) { |
michael@0 | 336 | int instancesConcated = this->concatInstancedDraw(info); |
michael@0 | 337 | if (info.instanceCount() > instancesConcated) { |
michael@0 | 338 | draw = this->recordDraw(info); |
michael@0 | 339 | draw->adjustInstanceCount(-instancesConcated); |
michael@0 | 340 | } else { |
michael@0 | 341 | return; |
michael@0 | 342 | } |
michael@0 | 343 | } else { |
michael@0 | 344 | draw = this->recordDraw(info); |
michael@0 | 345 | } |
michael@0 | 346 | |
michael@0 | 347 | switch (this->getGeomSrc().fVertexSrc) { |
michael@0 | 348 | case kBuffer_GeometrySrcType: |
michael@0 | 349 | draw->fVertexBuffer = this->getGeomSrc().fVertexBuffer; |
michael@0 | 350 | break; |
michael@0 | 351 | case kReserved_GeometrySrcType: // fallthrough |
michael@0 | 352 | case kArray_GeometrySrcType: { |
michael@0 | 353 | size_t vertexBytes = (info.vertexCount() + info.startVertex()) * |
michael@0 | 354 | drawState.getVertexSize(); |
michael@0 | 355 | poolState.fUsedPoolVertexBytes = GrMax(poolState.fUsedPoolVertexBytes, vertexBytes); |
michael@0 | 356 | draw->fVertexBuffer = poolState.fPoolVertexBuffer; |
michael@0 | 357 | draw->adjustStartVertex(poolState.fPoolStartVertex); |
michael@0 | 358 | break; |
michael@0 | 359 | } |
michael@0 | 360 | default: |
michael@0 | 361 | GrCrash("unknown geom src type"); |
michael@0 | 362 | } |
michael@0 | 363 | draw->fVertexBuffer->ref(); |
michael@0 | 364 | |
michael@0 | 365 | if (info.isIndexed()) { |
michael@0 | 366 | switch (this->getGeomSrc().fIndexSrc) { |
michael@0 | 367 | case kBuffer_GeometrySrcType: |
michael@0 | 368 | draw->fIndexBuffer = this->getGeomSrc().fIndexBuffer; |
michael@0 | 369 | break; |
michael@0 | 370 | case kReserved_GeometrySrcType: // fallthrough |
michael@0 | 371 | case kArray_GeometrySrcType: { |
michael@0 | 372 | size_t indexBytes = (info.indexCount() + info.startIndex()) * sizeof(uint16_t); |
michael@0 | 373 | poolState.fUsedPoolIndexBytes = GrMax(poolState.fUsedPoolIndexBytes, indexBytes); |
michael@0 | 374 | draw->fIndexBuffer = poolState.fPoolIndexBuffer; |
michael@0 | 375 | draw->adjustStartIndex(poolState.fPoolStartIndex); |
michael@0 | 376 | break; |
michael@0 | 377 | } |
michael@0 | 378 | default: |
michael@0 | 379 | GrCrash("unknown geom src type"); |
michael@0 | 380 | } |
michael@0 | 381 | draw->fIndexBuffer->ref(); |
michael@0 | 382 | } else { |
michael@0 | 383 | draw->fIndexBuffer = NULL; |
michael@0 | 384 | } |
michael@0 | 385 | } |
michael@0 | 386 | |
michael@0 | 387 | GrInOrderDrawBuffer::StencilPath::StencilPath() {} |
michael@0 | 388 | GrInOrderDrawBuffer::DrawPath::DrawPath() {} |
michael@0 | 389 | |
michael@0 | 390 | void GrInOrderDrawBuffer::onStencilPath(const GrPath* path, SkPath::FillType fill) { |
michael@0 | 391 | if (this->needsNewClip()) { |
michael@0 | 392 | this->recordClip(); |
michael@0 | 393 | } |
michael@0 | 394 | // Only compare the subset of GrDrawState relevant to path stenciling? |
michael@0 | 395 | if (this->needsNewState()) { |
michael@0 | 396 | this->recordState(); |
michael@0 | 397 | } |
michael@0 | 398 | StencilPath* sp = this->recordStencilPath(); |
michael@0 | 399 | sp->fPath.reset(path); |
michael@0 | 400 | path->ref(); |
michael@0 | 401 | sp->fFill = fill; |
michael@0 | 402 | } |
michael@0 | 403 | |
michael@0 | 404 | void GrInOrderDrawBuffer::onDrawPath(const GrPath* path, |
michael@0 | 405 | SkPath::FillType fill, const GrDeviceCoordTexture* dstCopy) { |
michael@0 | 406 | if (this->needsNewClip()) { |
michael@0 | 407 | this->recordClip(); |
michael@0 | 408 | } |
michael@0 | 409 | // TODO: Only compare the subset of GrDrawState relevant to path covering? |
michael@0 | 410 | if (this->needsNewState()) { |
michael@0 | 411 | this->recordState(); |
michael@0 | 412 | } |
michael@0 | 413 | DrawPath* cp = this->recordDrawPath(); |
michael@0 | 414 | cp->fPath.reset(path); |
michael@0 | 415 | path->ref(); |
michael@0 | 416 | cp->fFill = fill; |
michael@0 | 417 | if (NULL != dstCopy) { |
michael@0 | 418 | cp->fDstCopy = *dstCopy; |
michael@0 | 419 | } |
michael@0 | 420 | } |
michael@0 | 421 | |
michael@0 | 422 | void GrInOrderDrawBuffer::clear(const SkIRect* rect, GrColor color, |
michael@0 | 423 | bool canIgnoreRect, GrRenderTarget* renderTarget) { |
michael@0 | 424 | SkIRect r; |
michael@0 | 425 | if (NULL == renderTarget) { |
michael@0 | 426 | renderTarget = this->drawState()->getRenderTarget(); |
michael@0 | 427 | SkASSERT(NULL != renderTarget); |
michael@0 | 428 | } |
michael@0 | 429 | if (NULL == rect) { |
michael@0 | 430 | // We could do something smart and remove previous draws and clears to |
michael@0 | 431 | // the current render target. If we get that smart we have to make sure |
michael@0 | 432 | // those draws aren't read before this clear (render-to-texture). |
michael@0 | 433 | r.setLTRB(0, 0, renderTarget->width(), renderTarget->height()); |
michael@0 | 434 | rect = &r; |
michael@0 | 435 | } |
michael@0 | 436 | Clear* clr = this->recordClear(); |
michael@0 | 437 | clr->fColor = color; |
michael@0 | 438 | clr->fRect = *rect; |
michael@0 | 439 | clr->fCanIgnoreRect = canIgnoreRect; |
michael@0 | 440 | clr->fRenderTarget = renderTarget; |
michael@0 | 441 | renderTarget->ref(); |
michael@0 | 442 | } |
michael@0 | 443 | |
michael@0 | 444 | void GrInOrderDrawBuffer::onInstantGpuTraceEvent(const char* marker) { |
michael@0 | 445 | // TODO: adds command to buffer |
michael@0 | 446 | } |
michael@0 | 447 | |
michael@0 | 448 | void GrInOrderDrawBuffer::onPushGpuTraceEvent(const char* marker) { |
michael@0 | 449 | // TODO: adds command to buffer |
michael@0 | 450 | } |
michael@0 | 451 | |
michael@0 | 452 | void GrInOrderDrawBuffer::onPopGpuTraceEvent() { |
michael@0 | 453 | // TODO: adds command to buffer |
michael@0 | 454 | } |
michael@0 | 455 | |
michael@0 | 456 | void GrInOrderDrawBuffer::reset() { |
michael@0 | 457 | SkASSERT(1 == fGeoPoolStateStack.count()); |
michael@0 | 458 | this->resetVertexSource(); |
michael@0 | 459 | this->resetIndexSource(); |
michael@0 | 460 | int numDraws = fDraws.count(); |
michael@0 | 461 | for (int d = 0; d < numDraws; ++d) { |
michael@0 | 462 | // we always have a VB, but not always an IB |
michael@0 | 463 | SkASSERT(NULL != fDraws[d].fVertexBuffer); |
michael@0 | 464 | fDraws[d].fVertexBuffer->unref(); |
michael@0 | 465 | SkSafeUnref(fDraws[d].fIndexBuffer); |
michael@0 | 466 | } |
michael@0 | 467 | fCmds.reset(); |
michael@0 | 468 | fDraws.reset(); |
michael@0 | 469 | fStencilPaths.reset(); |
michael@0 | 470 | fDrawPaths.reset(); |
michael@0 | 471 | fStates.reset(); |
michael@0 | 472 | fClears.reset(); |
michael@0 | 473 | fVertexPool.reset(); |
michael@0 | 474 | fIndexPool.reset(); |
michael@0 | 475 | fClips.reset(); |
michael@0 | 476 | fClipOrigins.reset(); |
michael@0 | 477 | fCopySurfaces.reset(); |
michael@0 | 478 | fClipSet = true; |
michael@0 | 479 | } |
michael@0 | 480 | |
michael@0 | 481 | void GrInOrderDrawBuffer::flush() { |
michael@0 | 482 | if (fFlushing) { |
michael@0 | 483 | return; |
michael@0 | 484 | } |
michael@0 | 485 | |
michael@0 | 486 | SkASSERT(kReserved_GeometrySrcType != this->getGeomSrc().fVertexSrc); |
michael@0 | 487 | SkASSERT(kReserved_GeometrySrcType != this->getGeomSrc().fIndexSrc); |
michael@0 | 488 | |
michael@0 | 489 | int numCmds = fCmds.count(); |
michael@0 | 490 | if (0 == numCmds) { |
michael@0 | 491 | return; |
michael@0 | 492 | } |
michael@0 | 493 | |
michael@0 | 494 | GrAutoTRestore<bool> flushRestore(&fFlushing); |
michael@0 | 495 | fFlushing = true; |
michael@0 | 496 | |
michael@0 | 497 | fVertexPool.unlock(); |
michael@0 | 498 | fIndexPool.unlock(); |
michael@0 | 499 | |
michael@0 | 500 | GrDrawTarget::AutoClipRestore acr(fDstGpu); |
michael@0 | 501 | AutoGeometryAndStatePush agasp(fDstGpu, kPreserve_ASRInit); |
michael@0 | 502 | |
michael@0 | 503 | GrDrawState playbackState; |
michael@0 | 504 | GrDrawState* prevDrawState = fDstGpu->drawState(); |
michael@0 | 505 | prevDrawState->ref(); |
michael@0 | 506 | fDstGpu->setDrawState(&playbackState); |
michael@0 | 507 | |
michael@0 | 508 | GrClipData clipData; |
michael@0 | 509 | |
michael@0 | 510 | int currState = 0; |
michael@0 | 511 | int currClip = 0; |
michael@0 | 512 | int currClear = 0; |
michael@0 | 513 | int currDraw = 0; |
michael@0 | 514 | int currStencilPath = 0; |
michael@0 | 515 | int currDrawPath = 0; |
michael@0 | 516 | int currCopySurface = 0; |
michael@0 | 517 | |
michael@0 | 518 | for (int c = 0; c < numCmds; ++c) { |
michael@0 | 519 | switch (fCmds[c]) { |
michael@0 | 520 | case kDraw_Cmd: { |
michael@0 | 521 | const DrawRecord& draw = fDraws[currDraw]; |
michael@0 | 522 | fDstGpu->setVertexSourceToBuffer(draw.fVertexBuffer); |
michael@0 | 523 | if (draw.isIndexed()) { |
michael@0 | 524 | fDstGpu->setIndexSourceToBuffer(draw.fIndexBuffer); |
michael@0 | 525 | } |
michael@0 | 526 | fDstGpu->executeDraw(draw); |
michael@0 | 527 | |
michael@0 | 528 | ++currDraw; |
michael@0 | 529 | break; |
michael@0 | 530 | } |
michael@0 | 531 | case kStencilPath_Cmd: { |
michael@0 | 532 | const StencilPath& sp = fStencilPaths[currStencilPath]; |
michael@0 | 533 | fDstGpu->stencilPath(sp.fPath.get(), sp.fFill); |
michael@0 | 534 | ++currStencilPath; |
michael@0 | 535 | break; |
michael@0 | 536 | } |
michael@0 | 537 | case kDrawPath_Cmd: { |
michael@0 | 538 | const DrawPath& cp = fDrawPaths[currDrawPath]; |
michael@0 | 539 | fDstGpu->executeDrawPath(cp.fPath.get(), cp.fFill, |
michael@0 | 540 | NULL != cp.fDstCopy.texture() ? &cp.fDstCopy : NULL); |
michael@0 | 541 | ++currDrawPath; |
michael@0 | 542 | break; |
michael@0 | 543 | } |
michael@0 | 544 | case kSetState_Cmd: |
michael@0 | 545 | fStates[currState].restoreTo(&playbackState); |
michael@0 | 546 | ++currState; |
michael@0 | 547 | break; |
michael@0 | 548 | case kSetClip_Cmd: |
michael@0 | 549 | clipData.fClipStack = &fClips[currClip]; |
michael@0 | 550 | clipData.fOrigin = fClipOrigins[currClip]; |
michael@0 | 551 | fDstGpu->setClip(&clipData); |
michael@0 | 552 | ++currClip; |
michael@0 | 553 | break; |
michael@0 | 554 | case kClear_Cmd: |
michael@0 | 555 | fDstGpu->clear(&fClears[currClear].fRect, |
michael@0 | 556 | fClears[currClear].fColor, |
michael@0 | 557 | fClears[currClear].fCanIgnoreRect, |
michael@0 | 558 | fClears[currClear].fRenderTarget); |
michael@0 | 559 | ++currClear; |
michael@0 | 560 | break; |
michael@0 | 561 | case kCopySurface_Cmd: |
michael@0 | 562 | fDstGpu->copySurface(fCopySurfaces[currCopySurface].fDst.get(), |
michael@0 | 563 | fCopySurfaces[currCopySurface].fSrc.get(), |
michael@0 | 564 | fCopySurfaces[currCopySurface].fSrcRect, |
michael@0 | 565 | fCopySurfaces[currCopySurface].fDstPoint); |
michael@0 | 566 | ++currCopySurface; |
michael@0 | 567 | break; |
michael@0 | 568 | } |
michael@0 | 569 | } |
michael@0 | 570 | // we should have consumed all the states, clips, etc. |
michael@0 | 571 | SkASSERT(fStates.count() == currState); |
michael@0 | 572 | SkASSERT(fClips.count() == currClip); |
michael@0 | 573 | SkASSERT(fClipOrigins.count() == currClip); |
michael@0 | 574 | SkASSERT(fClears.count() == currClear); |
michael@0 | 575 | SkASSERT(fDraws.count() == currDraw); |
michael@0 | 576 | SkASSERT(fCopySurfaces.count() == currCopySurface); |
michael@0 | 577 | |
michael@0 | 578 | fDstGpu->setDrawState(prevDrawState); |
michael@0 | 579 | prevDrawState->unref(); |
michael@0 | 580 | this->reset(); |
michael@0 | 581 | ++fDrawID; |
michael@0 | 582 | } |
michael@0 | 583 | |
michael@0 | 584 | bool GrInOrderDrawBuffer::onCopySurface(GrSurface* dst, |
michael@0 | 585 | GrSurface* src, |
michael@0 | 586 | const SkIRect& srcRect, |
michael@0 | 587 | const SkIPoint& dstPoint) { |
michael@0 | 588 | if (fDstGpu->canCopySurface(dst, src, srcRect, dstPoint)) { |
michael@0 | 589 | CopySurface* cs = this->recordCopySurface(); |
michael@0 | 590 | cs->fDst.reset(SkRef(dst)); |
michael@0 | 591 | cs->fSrc.reset(SkRef(src)); |
michael@0 | 592 | cs->fSrcRect = srcRect; |
michael@0 | 593 | cs->fDstPoint = dstPoint; |
michael@0 | 594 | return true; |
michael@0 | 595 | } else { |
michael@0 | 596 | return false; |
michael@0 | 597 | } |
michael@0 | 598 | } |
michael@0 | 599 | |
michael@0 | 600 | bool GrInOrderDrawBuffer::onCanCopySurface(GrSurface* dst, |
michael@0 | 601 | GrSurface* src, |
michael@0 | 602 | const SkIRect& srcRect, |
michael@0 | 603 | const SkIPoint& dstPoint) { |
michael@0 | 604 | return fDstGpu->canCopySurface(dst, src, srcRect, dstPoint); |
michael@0 | 605 | } |
michael@0 | 606 | |
michael@0 | 607 | void GrInOrderDrawBuffer::initCopySurfaceDstDesc(const GrSurface* src, GrTextureDesc* desc) { |
michael@0 | 608 | fDstGpu->initCopySurfaceDstDesc(src, desc); |
michael@0 | 609 | } |
michael@0 | 610 | |
michael@0 | 611 | void GrInOrderDrawBuffer::willReserveVertexAndIndexSpace(int vertexCount, |
michael@0 | 612 | int indexCount) { |
michael@0 | 613 | // We use geometryHints() to know whether to flush the draw buffer. We |
michael@0 | 614 | // can't flush if we are inside an unbalanced pushGeometrySource. |
michael@0 | 615 | // Moreover, flushing blows away vertex and index data that was |
michael@0 | 616 | // previously reserved. So if the vertex or index data is pulled from |
michael@0 | 617 | // reserved space and won't be released by this request then we can't |
michael@0 | 618 | // flush. |
michael@0 | 619 | bool insideGeoPush = fGeoPoolStateStack.count() > 1; |
michael@0 | 620 | |
michael@0 | 621 | bool unreleasedVertexSpace = |
michael@0 | 622 | !vertexCount && |
michael@0 | 623 | kReserved_GeometrySrcType == this->getGeomSrc().fVertexSrc; |
michael@0 | 624 | |
michael@0 | 625 | bool unreleasedIndexSpace = |
michael@0 | 626 | !indexCount && |
michael@0 | 627 | kReserved_GeometrySrcType == this->getGeomSrc().fIndexSrc; |
michael@0 | 628 | |
michael@0 | 629 | // we don't want to finalize any reserved geom on the target since |
michael@0 | 630 | // we don't know that the client has finished writing to it. |
michael@0 | 631 | bool targetHasReservedGeom = fDstGpu->hasReservedVerticesOrIndices(); |
michael@0 | 632 | |
michael@0 | 633 | int vcount = vertexCount; |
michael@0 | 634 | int icount = indexCount; |
michael@0 | 635 | |
michael@0 | 636 | if (!insideGeoPush && |
michael@0 | 637 | !unreleasedVertexSpace && |
michael@0 | 638 | !unreleasedIndexSpace && |
michael@0 | 639 | !targetHasReservedGeom && |
michael@0 | 640 | this->geometryHints(&vcount, &icount)) { |
michael@0 | 641 | |
michael@0 | 642 | this->flush(); |
michael@0 | 643 | } |
michael@0 | 644 | } |
michael@0 | 645 | |
michael@0 | 646 | bool GrInOrderDrawBuffer::geometryHints(int* vertexCount, |
michael@0 | 647 | int* indexCount) const { |
michael@0 | 648 | // we will recommend a flush if the data could fit in a single |
michael@0 | 649 | // preallocated buffer but none are left and it can't fit |
michael@0 | 650 | // in the current buffer (which may not be prealloced). |
michael@0 | 651 | bool flush = false; |
michael@0 | 652 | if (NULL != indexCount) { |
michael@0 | 653 | int32_t currIndices = fIndexPool.currentBufferIndices(); |
michael@0 | 654 | if (*indexCount > currIndices && |
michael@0 | 655 | (!fIndexPool.preallocatedBuffersRemaining() && |
michael@0 | 656 | *indexCount <= fIndexPool.preallocatedBufferIndices())) { |
michael@0 | 657 | |
michael@0 | 658 | flush = true; |
michael@0 | 659 | } |
michael@0 | 660 | *indexCount = currIndices; |
michael@0 | 661 | } |
michael@0 | 662 | if (NULL != vertexCount) { |
michael@0 | 663 | size_t vertexSize = this->getDrawState().getVertexSize(); |
michael@0 | 664 | int32_t currVertices = fVertexPool.currentBufferVertices(vertexSize); |
michael@0 | 665 | if (*vertexCount > currVertices && |
michael@0 | 666 | (!fVertexPool.preallocatedBuffersRemaining() && |
michael@0 | 667 | *vertexCount <= fVertexPool.preallocatedBufferVertices(vertexSize))) { |
michael@0 | 668 | |
michael@0 | 669 | flush = true; |
michael@0 | 670 | } |
michael@0 | 671 | *vertexCount = currVertices; |
michael@0 | 672 | } |
michael@0 | 673 | return flush; |
michael@0 | 674 | } |
michael@0 | 675 | |
michael@0 | 676 | bool GrInOrderDrawBuffer::onReserveVertexSpace(size_t vertexSize, |
michael@0 | 677 | int vertexCount, |
michael@0 | 678 | void** vertices) { |
michael@0 | 679 | GeometryPoolState& poolState = fGeoPoolStateStack.back(); |
michael@0 | 680 | SkASSERT(vertexCount > 0); |
michael@0 | 681 | SkASSERT(NULL != vertices); |
michael@0 | 682 | SkASSERT(0 == poolState.fUsedPoolVertexBytes); |
michael@0 | 683 | |
michael@0 | 684 | *vertices = fVertexPool.makeSpace(vertexSize, |
michael@0 | 685 | vertexCount, |
michael@0 | 686 | &poolState.fPoolVertexBuffer, |
michael@0 | 687 | &poolState.fPoolStartVertex); |
michael@0 | 688 | return NULL != *vertices; |
michael@0 | 689 | } |
michael@0 | 690 | |
michael@0 | 691 | bool GrInOrderDrawBuffer::onReserveIndexSpace(int indexCount, void** indices) { |
michael@0 | 692 | GeometryPoolState& poolState = fGeoPoolStateStack.back(); |
michael@0 | 693 | SkASSERT(indexCount > 0); |
michael@0 | 694 | SkASSERT(NULL != indices); |
michael@0 | 695 | SkASSERT(0 == poolState.fUsedPoolIndexBytes); |
michael@0 | 696 | |
michael@0 | 697 | *indices = fIndexPool.makeSpace(indexCount, |
michael@0 | 698 | &poolState.fPoolIndexBuffer, |
michael@0 | 699 | &poolState.fPoolStartIndex); |
michael@0 | 700 | return NULL != *indices; |
michael@0 | 701 | } |
michael@0 | 702 | |
michael@0 | 703 | void GrInOrderDrawBuffer::releaseReservedVertexSpace() { |
michael@0 | 704 | GeometryPoolState& poolState = fGeoPoolStateStack.back(); |
michael@0 | 705 | const GeometrySrcState& geoSrc = this->getGeomSrc(); |
michael@0 | 706 | |
michael@0 | 707 | // If we get a release vertex space call then our current source should either be reserved |
michael@0 | 708 | // or array (which we copied into reserved space). |
michael@0 | 709 | SkASSERT(kReserved_GeometrySrcType == geoSrc.fVertexSrc || |
michael@0 | 710 | kArray_GeometrySrcType == geoSrc.fVertexSrc); |
michael@0 | 711 | |
michael@0 | 712 | // When the caller reserved vertex buffer space we gave it back a pointer |
michael@0 | 713 | // provided by the vertex buffer pool. At each draw we tracked the largest |
michael@0 | 714 | // offset into the pool's pointer that was referenced. Now we return to the |
michael@0 | 715 | // pool any portion at the tail of the allocation that no draw referenced. |
michael@0 | 716 | size_t reservedVertexBytes = geoSrc.fVertexSize * geoSrc.fVertexCount; |
michael@0 | 717 | fVertexPool.putBack(reservedVertexBytes - |
michael@0 | 718 | poolState.fUsedPoolVertexBytes); |
michael@0 | 719 | poolState.fUsedPoolVertexBytes = 0; |
michael@0 | 720 | poolState.fPoolVertexBuffer = NULL; |
michael@0 | 721 | poolState.fPoolStartVertex = 0; |
michael@0 | 722 | } |
michael@0 | 723 | |
michael@0 | 724 | void GrInOrderDrawBuffer::releaseReservedIndexSpace() { |
michael@0 | 725 | GeometryPoolState& poolState = fGeoPoolStateStack.back(); |
michael@0 | 726 | const GeometrySrcState& geoSrc = this->getGeomSrc(); |
michael@0 | 727 | |
michael@0 | 728 | // If we get a release index space call then our current source should either be reserved |
michael@0 | 729 | // or array (which we copied into reserved space). |
michael@0 | 730 | SkASSERT(kReserved_GeometrySrcType == geoSrc.fIndexSrc || |
michael@0 | 731 | kArray_GeometrySrcType == geoSrc.fIndexSrc); |
michael@0 | 732 | |
michael@0 | 733 | // Similar to releaseReservedVertexSpace we return any unused portion at |
michael@0 | 734 | // the tail |
michael@0 | 735 | size_t reservedIndexBytes = sizeof(uint16_t) * geoSrc.fIndexCount; |
michael@0 | 736 | fIndexPool.putBack(reservedIndexBytes - poolState.fUsedPoolIndexBytes); |
michael@0 | 737 | poolState.fUsedPoolIndexBytes = 0; |
michael@0 | 738 | poolState.fPoolIndexBuffer = NULL; |
michael@0 | 739 | poolState.fPoolStartIndex = 0; |
michael@0 | 740 | } |
michael@0 | 741 | |
michael@0 | 742 | void GrInOrderDrawBuffer::onSetVertexSourceToArray(const void* vertexArray, |
michael@0 | 743 | int vertexCount) { |
michael@0 | 744 | |
michael@0 | 745 | GeometryPoolState& poolState = fGeoPoolStateStack.back(); |
michael@0 | 746 | SkASSERT(0 == poolState.fUsedPoolVertexBytes); |
michael@0 | 747 | #ifdef SK_DEBUG |
michael@0 | 748 | bool success = |
michael@0 | 749 | #endif |
michael@0 | 750 | fVertexPool.appendVertices(this->getVertexSize(), |
michael@0 | 751 | vertexCount, |
michael@0 | 752 | vertexArray, |
michael@0 | 753 | &poolState.fPoolVertexBuffer, |
michael@0 | 754 | &poolState.fPoolStartVertex); |
michael@0 | 755 | GR_DEBUGASSERT(success); |
michael@0 | 756 | } |
michael@0 | 757 | |
michael@0 | 758 | void GrInOrderDrawBuffer::onSetIndexSourceToArray(const void* indexArray, |
michael@0 | 759 | int indexCount) { |
michael@0 | 760 | GeometryPoolState& poolState = fGeoPoolStateStack.back(); |
michael@0 | 761 | SkASSERT(0 == poolState.fUsedPoolIndexBytes); |
michael@0 | 762 | #ifdef SK_DEBUG |
michael@0 | 763 | bool success = |
michael@0 | 764 | #endif |
michael@0 | 765 | fIndexPool.appendIndices(indexCount, |
michael@0 | 766 | indexArray, |
michael@0 | 767 | &poolState.fPoolIndexBuffer, |
michael@0 | 768 | &poolState.fPoolStartIndex); |
michael@0 | 769 | GR_DEBUGASSERT(success); |
michael@0 | 770 | } |
michael@0 | 771 | |
michael@0 | 772 | void GrInOrderDrawBuffer::releaseVertexArray() { |
michael@0 | 773 | // When the client provides an array as the vertex source we handled it |
michael@0 | 774 | // by copying their array into reserved space. |
michael@0 | 775 | this->GrInOrderDrawBuffer::releaseReservedVertexSpace(); |
michael@0 | 776 | } |
michael@0 | 777 | |
michael@0 | 778 | void GrInOrderDrawBuffer::releaseIndexArray() { |
michael@0 | 779 | // When the client provides an array as the index source we handled it |
michael@0 | 780 | // by copying their array into reserved space. |
michael@0 | 781 | this->GrInOrderDrawBuffer::releaseReservedIndexSpace(); |
michael@0 | 782 | } |
michael@0 | 783 | |
michael@0 | 784 | void GrInOrderDrawBuffer::geometrySourceWillPush() { |
michael@0 | 785 | GeometryPoolState& poolState = fGeoPoolStateStack.push_back(); |
michael@0 | 786 | poolState.fUsedPoolVertexBytes = 0; |
michael@0 | 787 | poolState.fUsedPoolIndexBytes = 0; |
michael@0 | 788 | #ifdef SK_DEBUG |
michael@0 | 789 | poolState.fPoolVertexBuffer = (GrVertexBuffer*)~0; |
michael@0 | 790 | poolState.fPoolStartVertex = ~0; |
michael@0 | 791 | poolState.fPoolIndexBuffer = (GrIndexBuffer*)~0; |
michael@0 | 792 | poolState.fPoolStartIndex = ~0; |
michael@0 | 793 | #endif |
michael@0 | 794 | } |
michael@0 | 795 | |
michael@0 | 796 | void GrInOrderDrawBuffer::geometrySourceWillPop( |
michael@0 | 797 | const GeometrySrcState& restoredState) { |
michael@0 | 798 | SkASSERT(fGeoPoolStateStack.count() > 1); |
michael@0 | 799 | fGeoPoolStateStack.pop_back(); |
michael@0 | 800 | GeometryPoolState& poolState = fGeoPoolStateStack.back(); |
michael@0 | 801 | // we have to assume that any slack we had in our vertex/index data |
michael@0 | 802 | // is now unreleasable because data may have been appended later in the |
michael@0 | 803 | // pool. |
michael@0 | 804 | if (kReserved_GeometrySrcType == restoredState.fVertexSrc || |
michael@0 | 805 | kArray_GeometrySrcType == restoredState.fVertexSrc) { |
michael@0 | 806 | poolState.fUsedPoolVertexBytes = restoredState.fVertexSize * restoredState.fVertexCount; |
michael@0 | 807 | } |
michael@0 | 808 | if (kReserved_GeometrySrcType == restoredState.fIndexSrc || |
michael@0 | 809 | kArray_GeometrySrcType == restoredState.fIndexSrc) { |
michael@0 | 810 | poolState.fUsedPoolIndexBytes = sizeof(uint16_t) * |
michael@0 | 811 | restoredState.fIndexCount; |
michael@0 | 812 | } |
michael@0 | 813 | } |
michael@0 | 814 | |
michael@0 | 815 | bool GrInOrderDrawBuffer::needsNewState() const { |
michael@0 | 816 | return fStates.empty() || !fStates.back().isEqual(this->getDrawState()); |
michael@0 | 817 | } |
michael@0 | 818 | |
michael@0 | 819 | bool GrInOrderDrawBuffer::needsNewClip() const { |
michael@0 | 820 | SkASSERT(fClips.count() == fClipOrigins.count()); |
michael@0 | 821 | if (this->getDrawState().isClipState()) { |
michael@0 | 822 | if (fClipSet && |
michael@0 | 823 | (fClips.empty() || |
michael@0 | 824 | fClips.back() != *this->getClip()->fClipStack || |
michael@0 | 825 | fClipOrigins.back() != this->getClip()->fOrigin)) { |
michael@0 | 826 | return true; |
michael@0 | 827 | } |
michael@0 | 828 | } |
michael@0 | 829 | return false; |
michael@0 | 830 | } |
michael@0 | 831 | |
michael@0 | 832 | void GrInOrderDrawBuffer::recordClip() { |
michael@0 | 833 | fClips.push_back() = *this->getClip()->fClipStack; |
michael@0 | 834 | fClipOrigins.push_back() = this->getClip()->fOrigin; |
michael@0 | 835 | fClipSet = false; |
michael@0 | 836 | fCmds.push_back(kSetClip_Cmd); |
michael@0 | 837 | } |
michael@0 | 838 | |
michael@0 | 839 | void GrInOrderDrawBuffer::recordState() { |
michael@0 | 840 | fStates.push_back().saveFrom(this->getDrawState()); |
michael@0 | 841 | fCmds.push_back(kSetState_Cmd); |
michael@0 | 842 | } |
michael@0 | 843 | |
michael@0 | 844 | GrInOrderDrawBuffer::DrawRecord* GrInOrderDrawBuffer::recordDraw(const DrawInfo& info) { |
michael@0 | 845 | fCmds.push_back(kDraw_Cmd); |
michael@0 | 846 | return &fDraws.push_back(info); |
michael@0 | 847 | } |
michael@0 | 848 | |
michael@0 | 849 | GrInOrderDrawBuffer::StencilPath* GrInOrderDrawBuffer::recordStencilPath() { |
michael@0 | 850 | fCmds.push_back(kStencilPath_Cmd); |
michael@0 | 851 | return &fStencilPaths.push_back(); |
michael@0 | 852 | } |
michael@0 | 853 | |
michael@0 | 854 | GrInOrderDrawBuffer::DrawPath* GrInOrderDrawBuffer::recordDrawPath() { |
michael@0 | 855 | fCmds.push_back(kDrawPath_Cmd); |
michael@0 | 856 | return &fDrawPaths.push_back(); |
michael@0 | 857 | } |
michael@0 | 858 | |
michael@0 | 859 | GrInOrderDrawBuffer::Clear* GrInOrderDrawBuffer::recordClear() { |
michael@0 | 860 | fCmds.push_back(kClear_Cmd); |
michael@0 | 861 | return &fClears.push_back(); |
michael@0 | 862 | } |
michael@0 | 863 | |
michael@0 | 864 | GrInOrderDrawBuffer::CopySurface* GrInOrderDrawBuffer::recordCopySurface() { |
michael@0 | 865 | fCmds.push_back(kCopySurface_Cmd); |
michael@0 | 866 | return &fCopySurfaces.push_back(); |
michael@0 | 867 | } |
michael@0 | 868 | |
michael@0 | 869 | |
michael@0 | 870 | void GrInOrderDrawBuffer::clipWillBeSet(const GrClipData* newClipData) { |
michael@0 | 871 | INHERITED::clipWillBeSet(newClipData); |
michael@0 | 872 | fClipSet = true; |
michael@0 | 873 | fClipProxyState = kUnknown_ClipProxyState; |
michael@0 | 874 | } |