Sat, 03 Jan 2015 20:18:00 +0100
Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.
1 /*
2 * Copyright 2011 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
8 #include "GrInOrderDrawBuffer.h"
10 #include "GrBufferAllocPool.h"
11 #include "GrDrawTargetCaps.h"
12 #include "GrGpu.h"
13 #include "GrIndexBuffer.h"
14 #include "GrPath.h"
15 #include "GrPoint.h"
16 #include "GrRenderTarget.h"
17 #include "GrTemplates.h"
18 #include "GrTexture.h"
19 #include "GrVertexBuffer.h"
21 GrInOrderDrawBuffer::GrInOrderDrawBuffer(GrGpu* gpu,
22 GrVertexBufferAllocPool* vertexPool,
23 GrIndexBufferAllocPool* indexPool)
24 : GrDrawTarget(gpu->getContext())
25 , fDstGpu(gpu)
26 , fClipSet(true)
27 , fClipProxyState(kUnknown_ClipProxyState)
28 , fVertexPool(*vertexPool)
29 , fIndexPool(*indexPool)
30 , fFlushing(false)
31 , fDrawID(0) {
33 fDstGpu->ref();
34 fCaps.reset(SkRef(fDstGpu->caps()));
36 SkASSERT(NULL != vertexPool);
37 SkASSERT(NULL != indexPool);
39 GeometryPoolState& poolState = fGeoPoolStateStack.push_back();
40 poolState.fUsedPoolVertexBytes = 0;
41 poolState.fUsedPoolIndexBytes = 0;
42 #ifdef SK_DEBUG
43 poolState.fPoolVertexBuffer = (GrVertexBuffer*)~0;
44 poolState.fPoolStartVertex = ~0;
45 poolState.fPoolIndexBuffer = (GrIndexBuffer*)~0;
46 poolState.fPoolStartIndex = ~0;
47 #endif
48 this->reset();
49 }
51 GrInOrderDrawBuffer::~GrInOrderDrawBuffer() {
52 this->reset();
53 // This must be called by before the GrDrawTarget destructor
54 this->releaseGeometry();
55 fDstGpu->unref();
56 }
58 ////////////////////////////////////////////////////////////////////////////////
60 namespace {
61 void get_vertex_bounds(const void* vertices,
62 size_t vertexSize,
63 int vertexCount,
64 SkRect* bounds) {
65 SkASSERT(vertexSize >= sizeof(GrPoint));
66 SkASSERT(vertexCount > 0);
67 const GrPoint* point = static_cast<const GrPoint*>(vertices);
68 bounds->fLeft = bounds->fRight = point->fX;
69 bounds->fTop = bounds->fBottom = point->fY;
70 for (int i = 1; i < vertexCount; ++i) {
71 point = reinterpret_cast<GrPoint*>(reinterpret_cast<intptr_t>(point) + vertexSize);
72 bounds->growToInclude(point->fX, point->fY);
73 }
74 }
75 }
78 namespace {
80 extern const GrVertexAttrib kRectPosColorUVAttribs[] = {
81 {kVec2f_GrVertexAttribType, 0, kPosition_GrVertexAttribBinding},
82 {kVec4ub_GrVertexAttribType, sizeof(GrPoint), kColor_GrVertexAttribBinding},
83 {kVec2f_GrVertexAttribType, sizeof(GrPoint)+sizeof(GrColor),
84 kLocalCoord_GrVertexAttribBinding},
85 };
87 extern const GrVertexAttrib kRectPosUVAttribs[] = {
88 {kVec2f_GrVertexAttribType, 0, kPosition_GrVertexAttribBinding},
89 {kVec2f_GrVertexAttribType, sizeof(GrPoint), kLocalCoord_GrVertexAttribBinding},
90 };
92 static void set_vertex_attributes(GrDrawState* drawState,
93 bool hasColor, bool hasUVs,
94 int* colorOffset, int* localOffset) {
95 *colorOffset = -1;
96 *localOffset = -1;
98 // Using per-vertex colors allows batching across colors. (A lot of rects in a row differing
99 // only in color is a common occurrence in tables). However, having per-vertex colors disables
100 // blending optimizations because we don't know if the color will be solid or not. These
101 // optimizations help determine whether coverage and color can be blended correctly when
102 // dual-source blending isn't available. This comes into play when there is coverage. If colors
103 // were a stage it could take a hint that every vertex's color will be opaque.
104 if (hasColor && hasUVs) {
105 *colorOffset = sizeof(GrPoint);
106 *localOffset = sizeof(GrPoint) + sizeof(GrColor);
107 drawState->setVertexAttribs<kRectPosColorUVAttribs>(3);
108 } else if (hasColor) {
109 *colorOffset = sizeof(GrPoint);
110 drawState->setVertexAttribs<kRectPosColorUVAttribs>(2);
111 } else if (hasUVs) {
112 *localOffset = sizeof(GrPoint);
113 drawState->setVertexAttribs<kRectPosUVAttribs>(2);
114 } else {
115 drawState->setVertexAttribs<kRectPosUVAttribs>(1);
116 }
117 }
119 };
121 void GrInOrderDrawBuffer::onDrawRect(const SkRect& rect,
122 const SkMatrix* matrix,
123 const SkRect* localRect,
124 const SkMatrix* localMatrix) {
125 GrDrawState::AutoColorRestore acr;
127 GrDrawState* drawState = this->drawState();
129 GrColor color = drawState->getColor();
131 int colorOffset, localOffset;
132 set_vertex_attributes(drawState,
133 this->caps()->dualSourceBlendingSupport() || drawState->hasSolidCoverage(),
134 NULL != localRect,
135 &colorOffset, &localOffset);
136 if (colorOffset >= 0) {
137 // We set the draw state's color to white here. This is done so that any batching performed
138 // in our subclass's onDraw() won't get a false from GrDrawState::op== due to a color
139 // mismatch. TODO: Once vertex layout is owned by GrDrawState it should skip comparing the
140 // constant color in its op== when the kColor layout bit is set and then we can remove
141 // this.
142 acr.set(drawState, 0xFFFFFFFF);
143 }
145 AutoReleaseGeometry geo(this, 4, 0);
146 if (!geo.succeeded()) {
147 GrPrintf("Failed to get space for vertices!\n");
148 return;
149 }
151 // Go to device coords to allow batching across matrix changes
152 SkMatrix combinedMatrix;
153 if (NULL != matrix) {
154 combinedMatrix = *matrix;
155 } else {
156 combinedMatrix.reset();
157 }
158 combinedMatrix.postConcat(drawState->getViewMatrix());
159 // When the caller has provided an explicit source rect for a stage then we don't want to
160 // modify that stage's matrix. Otherwise if the effect is generating its source rect from
161 // the vertex positions then we have to account for the view matrix change.
162 GrDrawState::AutoViewMatrixRestore avmr;
163 if (!avmr.setIdentity(drawState)) {
164 return;
165 }
167 size_t vsize = drawState->getVertexSize();
169 geo.positions()->setRectFan(rect.fLeft, rect.fTop, rect.fRight, rect.fBottom, vsize);
170 combinedMatrix.mapPointsWithStride(geo.positions(), vsize, 4);
172 SkRect devBounds;
173 // since we already computed the dev verts, set the bounds hint. This will help us avoid
174 // unnecessary clipping in our onDraw().
175 get_vertex_bounds(geo.vertices(), vsize, 4, &devBounds);
177 if (localOffset >= 0) {
178 GrPoint* coords = GrTCast<GrPoint*>(GrTCast<intptr_t>(geo.vertices()) + localOffset);
179 coords->setRectFan(localRect->fLeft, localRect->fTop,
180 localRect->fRight, localRect->fBottom,
181 vsize);
182 if (NULL != localMatrix) {
183 localMatrix->mapPointsWithStride(coords, vsize, 4);
184 }
185 }
187 if (colorOffset >= 0) {
188 GrColor* vertColor = GrTCast<GrColor*>(GrTCast<intptr_t>(geo.vertices()) + colorOffset);
189 for (int i = 0; i < 4; ++i) {
190 *vertColor = color;
191 vertColor = (GrColor*) ((intptr_t) vertColor + vsize);
192 }
193 }
195 this->setIndexSourceToBuffer(this->getContext()->getQuadIndexBuffer());
196 this->drawIndexedInstances(kTriangles_GrPrimitiveType, 1, 4, 6, &devBounds);
198 // to ensure that stashing the drawState ptr is valid
199 SkASSERT(this->drawState() == drawState);
200 }
202 bool GrInOrderDrawBuffer::quickInsideClip(const SkRect& devBounds) {
203 if (!this->getDrawState().isClipState()) {
204 return true;
205 }
206 if (kUnknown_ClipProxyState == fClipProxyState) {
207 SkIRect rect;
208 bool iior;
209 this->getClip()->getConservativeBounds(this->getDrawState().getRenderTarget(), &rect, &iior);
210 if (iior) {
211 // The clip is a rect. We will remember that in fProxyClip. It is common for an edge (or
212 // all edges) of the clip to be at the edge of the RT. However, we get that clipping for
213 // free via the viewport. We don't want to think that clipping must be enabled in this
214 // case. So we extend the clip outward from the edge to avoid these false negatives.
215 fClipProxyState = kValid_ClipProxyState;
216 fClipProxy = SkRect::Make(rect);
218 if (fClipProxy.fLeft <= 0) {
219 fClipProxy.fLeft = SK_ScalarMin;
220 }
221 if (fClipProxy.fTop <= 0) {
222 fClipProxy.fTop = SK_ScalarMin;
223 }
224 if (fClipProxy.fRight >= this->getDrawState().getRenderTarget()->width()) {
225 fClipProxy.fRight = SK_ScalarMax;
226 }
227 if (fClipProxy.fBottom >= this->getDrawState().getRenderTarget()->height()) {
228 fClipProxy.fBottom = SK_ScalarMax;
229 }
230 } else {
231 fClipProxyState = kInvalid_ClipProxyState;
232 }
233 }
234 if (kValid_ClipProxyState == fClipProxyState) {
235 return fClipProxy.contains(devBounds);
236 }
237 SkPoint originOffset = {SkIntToScalar(this->getClip()->fOrigin.fX),
238 SkIntToScalar(this->getClip()->fOrigin.fY)};
239 SkRect clipSpaceBounds = devBounds;
240 clipSpaceBounds.offset(originOffset);
241 return this->getClip()->fClipStack->quickContains(clipSpaceBounds);
242 }
244 int GrInOrderDrawBuffer::concatInstancedDraw(const DrawInfo& info) {
245 SkASSERT(info.isInstanced());
247 const GeometrySrcState& geomSrc = this->getGeomSrc();
248 const GrDrawState& drawState = this->getDrawState();
250 // we only attempt to concat the case when reserved verts are used with a client-specified index
251 // buffer. To make this work with client-specified VBs we'd need to know if the VB was updated
252 // between draws.
253 if (kReserved_GeometrySrcType != geomSrc.fVertexSrc ||
254 kBuffer_GeometrySrcType != geomSrc.fIndexSrc) {
255 return 0;
256 }
257 // Check if there is a draw info that is compatible that uses the same VB from the pool and
258 // the same IB
259 if (kDraw_Cmd != fCmds.back()) {
260 return 0;
261 }
263 DrawRecord* draw = &fDraws.back();
264 GeometryPoolState& poolState = fGeoPoolStateStack.back();
265 const GrVertexBuffer* vertexBuffer = poolState.fPoolVertexBuffer;
267 if (!draw->isInstanced() ||
268 draw->verticesPerInstance() != info.verticesPerInstance() ||
269 draw->indicesPerInstance() != info.indicesPerInstance() ||
270 draw->fVertexBuffer != vertexBuffer ||
271 draw->fIndexBuffer != geomSrc.fIndexBuffer) {
272 return 0;
273 }
274 // info does not yet account for the offset from the start of the pool's VB while the previous
275 // draw record does.
276 int adjustedStartVertex = poolState.fPoolStartVertex + info.startVertex();
277 if (draw->startVertex() + draw->vertexCount() != adjustedStartVertex) {
278 return 0;
279 }
281 SkASSERT(poolState.fPoolStartVertex == draw->startVertex() + draw->vertexCount());
283 // how many instances can be concat'ed onto draw given the size of the index buffer
284 int instancesToConcat = this->indexCountInCurrentSource() / info.indicesPerInstance();
285 instancesToConcat -= draw->instanceCount();
286 instancesToConcat = GrMin(instancesToConcat, info.instanceCount());
288 // update the amount of reserved vertex data actually referenced in draws
289 size_t vertexBytes = instancesToConcat * info.verticesPerInstance() *
290 drawState.getVertexSize();
291 poolState.fUsedPoolVertexBytes = GrMax(poolState.fUsedPoolVertexBytes, vertexBytes);
293 draw->adjustInstanceCount(instancesToConcat);
294 return instancesToConcat;
295 }
297 class AutoClipReenable {
298 public:
299 AutoClipReenable() : fDrawState(NULL) {}
300 ~AutoClipReenable() {
301 if (NULL != fDrawState) {
302 fDrawState->enableState(GrDrawState::kClip_StateBit);
303 }
304 }
305 void set(GrDrawState* drawState) {
306 if (drawState->isClipState()) {
307 fDrawState = drawState;
308 drawState->disableState(GrDrawState::kClip_StateBit);
309 }
310 }
311 private:
312 GrDrawState* fDrawState;
313 };
315 void GrInOrderDrawBuffer::onDraw(const DrawInfo& info) {
317 GeometryPoolState& poolState = fGeoPoolStateStack.back();
318 const GrDrawState& drawState = this->getDrawState();
319 AutoClipReenable acr;
321 if (drawState.isClipState() &&
322 NULL != info.getDevBounds() &&
323 this->quickInsideClip(*info.getDevBounds())) {
324 acr.set(this->drawState());
325 }
327 if (this->needsNewClip()) {
328 this->recordClip();
329 }
330 if (this->needsNewState()) {
331 this->recordState();
332 }
334 DrawRecord* draw;
335 if (info.isInstanced()) {
336 int instancesConcated = this->concatInstancedDraw(info);
337 if (info.instanceCount() > instancesConcated) {
338 draw = this->recordDraw(info);
339 draw->adjustInstanceCount(-instancesConcated);
340 } else {
341 return;
342 }
343 } else {
344 draw = this->recordDraw(info);
345 }
347 switch (this->getGeomSrc().fVertexSrc) {
348 case kBuffer_GeometrySrcType:
349 draw->fVertexBuffer = this->getGeomSrc().fVertexBuffer;
350 break;
351 case kReserved_GeometrySrcType: // fallthrough
352 case kArray_GeometrySrcType: {
353 size_t vertexBytes = (info.vertexCount() + info.startVertex()) *
354 drawState.getVertexSize();
355 poolState.fUsedPoolVertexBytes = GrMax(poolState.fUsedPoolVertexBytes, vertexBytes);
356 draw->fVertexBuffer = poolState.fPoolVertexBuffer;
357 draw->adjustStartVertex(poolState.fPoolStartVertex);
358 break;
359 }
360 default:
361 GrCrash("unknown geom src type");
362 }
363 draw->fVertexBuffer->ref();
365 if (info.isIndexed()) {
366 switch (this->getGeomSrc().fIndexSrc) {
367 case kBuffer_GeometrySrcType:
368 draw->fIndexBuffer = this->getGeomSrc().fIndexBuffer;
369 break;
370 case kReserved_GeometrySrcType: // fallthrough
371 case kArray_GeometrySrcType: {
372 size_t indexBytes = (info.indexCount() + info.startIndex()) * sizeof(uint16_t);
373 poolState.fUsedPoolIndexBytes = GrMax(poolState.fUsedPoolIndexBytes, indexBytes);
374 draw->fIndexBuffer = poolState.fPoolIndexBuffer;
375 draw->adjustStartIndex(poolState.fPoolStartIndex);
376 break;
377 }
378 default:
379 GrCrash("unknown geom src type");
380 }
381 draw->fIndexBuffer->ref();
382 } else {
383 draw->fIndexBuffer = NULL;
384 }
385 }
387 GrInOrderDrawBuffer::StencilPath::StencilPath() {}
388 GrInOrderDrawBuffer::DrawPath::DrawPath() {}
390 void GrInOrderDrawBuffer::onStencilPath(const GrPath* path, SkPath::FillType fill) {
391 if (this->needsNewClip()) {
392 this->recordClip();
393 }
394 // Only compare the subset of GrDrawState relevant to path stenciling?
395 if (this->needsNewState()) {
396 this->recordState();
397 }
398 StencilPath* sp = this->recordStencilPath();
399 sp->fPath.reset(path);
400 path->ref();
401 sp->fFill = fill;
402 }
404 void GrInOrderDrawBuffer::onDrawPath(const GrPath* path,
405 SkPath::FillType fill, const GrDeviceCoordTexture* dstCopy) {
406 if (this->needsNewClip()) {
407 this->recordClip();
408 }
409 // TODO: Only compare the subset of GrDrawState relevant to path covering?
410 if (this->needsNewState()) {
411 this->recordState();
412 }
413 DrawPath* cp = this->recordDrawPath();
414 cp->fPath.reset(path);
415 path->ref();
416 cp->fFill = fill;
417 if (NULL != dstCopy) {
418 cp->fDstCopy = *dstCopy;
419 }
420 }
422 void GrInOrderDrawBuffer::clear(const SkIRect* rect, GrColor color,
423 bool canIgnoreRect, GrRenderTarget* renderTarget) {
424 SkIRect r;
425 if (NULL == renderTarget) {
426 renderTarget = this->drawState()->getRenderTarget();
427 SkASSERT(NULL != renderTarget);
428 }
429 if (NULL == rect) {
430 // We could do something smart and remove previous draws and clears to
431 // the current render target. If we get that smart we have to make sure
432 // those draws aren't read before this clear (render-to-texture).
433 r.setLTRB(0, 0, renderTarget->width(), renderTarget->height());
434 rect = &r;
435 }
436 Clear* clr = this->recordClear();
437 clr->fColor = color;
438 clr->fRect = *rect;
439 clr->fCanIgnoreRect = canIgnoreRect;
440 clr->fRenderTarget = renderTarget;
441 renderTarget->ref();
442 }
444 void GrInOrderDrawBuffer::onInstantGpuTraceEvent(const char* marker) {
445 // TODO: adds command to buffer
446 }
448 void GrInOrderDrawBuffer::onPushGpuTraceEvent(const char* marker) {
449 // TODO: adds command to buffer
450 }
452 void GrInOrderDrawBuffer::onPopGpuTraceEvent() {
453 // TODO: adds command to buffer
454 }
456 void GrInOrderDrawBuffer::reset() {
457 SkASSERT(1 == fGeoPoolStateStack.count());
458 this->resetVertexSource();
459 this->resetIndexSource();
460 int numDraws = fDraws.count();
461 for (int d = 0; d < numDraws; ++d) {
462 // we always have a VB, but not always an IB
463 SkASSERT(NULL != fDraws[d].fVertexBuffer);
464 fDraws[d].fVertexBuffer->unref();
465 SkSafeUnref(fDraws[d].fIndexBuffer);
466 }
467 fCmds.reset();
468 fDraws.reset();
469 fStencilPaths.reset();
470 fDrawPaths.reset();
471 fStates.reset();
472 fClears.reset();
473 fVertexPool.reset();
474 fIndexPool.reset();
475 fClips.reset();
476 fClipOrigins.reset();
477 fCopySurfaces.reset();
478 fClipSet = true;
479 }
481 void GrInOrderDrawBuffer::flush() {
482 if (fFlushing) {
483 return;
484 }
486 SkASSERT(kReserved_GeometrySrcType != this->getGeomSrc().fVertexSrc);
487 SkASSERT(kReserved_GeometrySrcType != this->getGeomSrc().fIndexSrc);
489 int numCmds = fCmds.count();
490 if (0 == numCmds) {
491 return;
492 }
494 GrAutoTRestore<bool> flushRestore(&fFlushing);
495 fFlushing = true;
497 fVertexPool.unlock();
498 fIndexPool.unlock();
500 GrDrawTarget::AutoClipRestore acr(fDstGpu);
501 AutoGeometryAndStatePush agasp(fDstGpu, kPreserve_ASRInit);
503 GrDrawState playbackState;
504 GrDrawState* prevDrawState = fDstGpu->drawState();
505 prevDrawState->ref();
506 fDstGpu->setDrawState(&playbackState);
508 GrClipData clipData;
510 int currState = 0;
511 int currClip = 0;
512 int currClear = 0;
513 int currDraw = 0;
514 int currStencilPath = 0;
515 int currDrawPath = 0;
516 int currCopySurface = 0;
518 for (int c = 0; c < numCmds; ++c) {
519 switch (fCmds[c]) {
520 case kDraw_Cmd: {
521 const DrawRecord& draw = fDraws[currDraw];
522 fDstGpu->setVertexSourceToBuffer(draw.fVertexBuffer);
523 if (draw.isIndexed()) {
524 fDstGpu->setIndexSourceToBuffer(draw.fIndexBuffer);
525 }
526 fDstGpu->executeDraw(draw);
528 ++currDraw;
529 break;
530 }
531 case kStencilPath_Cmd: {
532 const StencilPath& sp = fStencilPaths[currStencilPath];
533 fDstGpu->stencilPath(sp.fPath.get(), sp.fFill);
534 ++currStencilPath;
535 break;
536 }
537 case kDrawPath_Cmd: {
538 const DrawPath& cp = fDrawPaths[currDrawPath];
539 fDstGpu->executeDrawPath(cp.fPath.get(), cp.fFill,
540 NULL != cp.fDstCopy.texture() ? &cp.fDstCopy : NULL);
541 ++currDrawPath;
542 break;
543 }
544 case kSetState_Cmd:
545 fStates[currState].restoreTo(&playbackState);
546 ++currState;
547 break;
548 case kSetClip_Cmd:
549 clipData.fClipStack = &fClips[currClip];
550 clipData.fOrigin = fClipOrigins[currClip];
551 fDstGpu->setClip(&clipData);
552 ++currClip;
553 break;
554 case kClear_Cmd:
555 fDstGpu->clear(&fClears[currClear].fRect,
556 fClears[currClear].fColor,
557 fClears[currClear].fCanIgnoreRect,
558 fClears[currClear].fRenderTarget);
559 ++currClear;
560 break;
561 case kCopySurface_Cmd:
562 fDstGpu->copySurface(fCopySurfaces[currCopySurface].fDst.get(),
563 fCopySurfaces[currCopySurface].fSrc.get(),
564 fCopySurfaces[currCopySurface].fSrcRect,
565 fCopySurfaces[currCopySurface].fDstPoint);
566 ++currCopySurface;
567 break;
568 }
569 }
570 // we should have consumed all the states, clips, etc.
571 SkASSERT(fStates.count() == currState);
572 SkASSERT(fClips.count() == currClip);
573 SkASSERT(fClipOrigins.count() == currClip);
574 SkASSERT(fClears.count() == currClear);
575 SkASSERT(fDraws.count() == currDraw);
576 SkASSERT(fCopySurfaces.count() == currCopySurface);
578 fDstGpu->setDrawState(prevDrawState);
579 prevDrawState->unref();
580 this->reset();
581 ++fDrawID;
582 }
584 bool GrInOrderDrawBuffer::onCopySurface(GrSurface* dst,
585 GrSurface* src,
586 const SkIRect& srcRect,
587 const SkIPoint& dstPoint) {
588 if (fDstGpu->canCopySurface(dst, src, srcRect, dstPoint)) {
589 CopySurface* cs = this->recordCopySurface();
590 cs->fDst.reset(SkRef(dst));
591 cs->fSrc.reset(SkRef(src));
592 cs->fSrcRect = srcRect;
593 cs->fDstPoint = dstPoint;
594 return true;
595 } else {
596 return false;
597 }
598 }
600 bool GrInOrderDrawBuffer::onCanCopySurface(GrSurface* dst,
601 GrSurface* src,
602 const SkIRect& srcRect,
603 const SkIPoint& dstPoint) {
604 return fDstGpu->canCopySurface(dst, src, srcRect, dstPoint);
605 }
607 void GrInOrderDrawBuffer::initCopySurfaceDstDesc(const GrSurface* src, GrTextureDesc* desc) {
608 fDstGpu->initCopySurfaceDstDesc(src, desc);
609 }
611 void GrInOrderDrawBuffer::willReserveVertexAndIndexSpace(int vertexCount,
612 int indexCount) {
613 // We use geometryHints() to know whether to flush the draw buffer. We
614 // can't flush if we are inside an unbalanced pushGeometrySource.
615 // Moreover, flushing blows away vertex and index data that was
616 // previously reserved. So if the vertex or index data is pulled from
617 // reserved space and won't be released by this request then we can't
618 // flush.
619 bool insideGeoPush = fGeoPoolStateStack.count() > 1;
621 bool unreleasedVertexSpace =
622 !vertexCount &&
623 kReserved_GeometrySrcType == this->getGeomSrc().fVertexSrc;
625 bool unreleasedIndexSpace =
626 !indexCount &&
627 kReserved_GeometrySrcType == this->getGeomSrc().fIndexSrc;
629 // we don't want to finalize any reserved geom on the target since
630 // we don't know that the client has finished writing to it.
631 bool targetHasReservedGeom = fDstGpu->hasReservedVerticesOrIndices();
633 int vcount = vertexCount;
634 int icount = indexCount;
636 if (!insideGeoPush &&
637 !unreleasedVertexSpace &&
638 !unreleasedIndexSpace &&
639 !targetHasReservedGeom &&
640 this->geometryHints(&vcount, &icount)) {
642 this->flush();
643 }
644 }
646 bool GrInOrderDrawBuffer::geometryHints(int* vertexCount,
647 int* indexCount) const {
648 // we will recommend a flush if the data could fit in a single
649 // preallocated buffer but none are left and it can't fit
650 // in the current buffer (which may not be prealloced).
651 bool flush = false;
652 if (NULL != indexCount) {
653 int32_t currIndices = fIndexPool.currentBufferIndices();
654 if (*indexCount > currIndices &&
655 (!fIndexPool.preallocatedBuffersRemaining() &&
656 *indexCount <= fIndexPool.preallocatedBufferIndices())) {
658 flush = true;
659 }
660 *indexCount = currIndices;
661 }
662 if (NULL != vertexCount) {
663 size_t vertexSize = this->getDrawState().getVertexSize();
664 int32_t currVertices = fVertexPool.currentBufferVertices(vertexSize);
665 if (*vertexCount > currVertices &&
666 (!fVertexPool.preallocatedBuffersRemaining() &&
667 *vertexCount <= fVertexPool.preallocatedBufferVertices(vertexSize))) {
669 flush = true;
670 }
671 *vertexCount = currVertices;
672 }
673 return flush;
674 }
676 bool GrInOrderDrawBuffer::onReserveVertexSpace(size_t vertexSize,
677 int vertexCount,
678 void** vertices) {
679 GeometryPoolState& poolState = fGeoPoolStateStack.back();
680 SkASSERT(vertexCount > 0);
681 SkASSERT(NULL != vertices);
682 SkASSERT(0 == poolState.fUsedPoolVertexBytes);
684 *vertices = fVertexPool.makeSpace(vertexSize,
685 vertexCount,
686 &poolState.fPoolVertexBuffer,
687 &poolState.fPoolStartVertex);
688 return NULL != *vertices;
689 }
691 bool GrInOrderDrawBuffer::onReserveIndexSpace(int indexCount, void** indices) {
692 GeometryPoolState& poolState = fGeoPoolStateStack.back();
693 SkASSERT(indexCount > 0);
694 SkASSERT(NULL != indices);
695 SkASSERT(0 == poolState.fUsedPoolIndexBytes);
697 *indices = fIndexPool.makeSpace(indexCount,
698 &poolState.fPoolIndexBuffer,
699 &poolState.fPoolStartIndex);
700 return NULL != *indices;
701 }
703 void GrInOrderDrawBuffer::releaseReservedVertexSpace() {
704 GeometryPoolState& poolState = fGeoPoolStateStack.back();
705 const GeometrySrcState& geoSrc = this->getGeomSrc();
707 // If we get a release vertex space call then our current source should either be reserved
708 // or array (which we copied into reserved space).
709 SkASSERT(kReserved_GeometrySrcType == geoSrc.fVertexSrc ||
710 kArray_GeometrySrcType == geoSrc.fVertexSrc);
712 // When the caller reserved vertex buffer space we gave it back a pointer
713 // provided by the vertex buffer pool. At each draw we tracked the largest
714 // offset into the pool's pointer that was referenced. Now we return to the
715 // pool any portion at the tail of the allocation that no draw referenced.
716 size_t reservedVertexBytes = geoSrc.fVertexSize * geoSrc.fVertexCount;
717 fVertexPool.putBack(reservedVertexBytes -
718 poolState.fUsedPoolVertexBytes);
719 poolState.fUsedPoolVertexBytes = 0;
720 poolState.fPoolVertexBuffer = NULL;
721 poolState.fPoolStartVertex = 0;
722 }
724 void GrInOrderDrawBuffer::releaseReservedIndexSpace() {
725 GeometryPoolState& poolState = fGeoPoolStateStack.back();
726 const GeometrySrcState& geoSrc = this->getGeomSrc();
728 // If we get a release index space call then our current source should either be reserved
729 // or array (which we copied into reserved space).
730 SkASSERT(kReserved_GeometrySrcType == geoSrc.fIndexSrc ||
731 kArray_GeometrySrcType == geoSrc.fIndexSrc);
733 // Similar to releaseReservedVertexSpace we return any unused portion at
734 // the tail
735 size_t reservedIndexBytes = sizeof(uint16_t) * geoSrc.fIndexCount;
736 fIndexPool.putBack(reservedIndexBytes - poolState.fUsedPoolIndexBytes);
737 poolState.fUsedPoolIndexBytes = 0;
738 poolState.fPoolIndexBuffer = NULL;
739 poolState.fPoolStartIndex = 0;
740 }
742 void GrInOrderDrawBuffer::onSetVertexSourceToArray(const void* vertexArray,
743 int vertexCount) {
745 GeometryPoolState& poolState = fGeoPoolStateStack.back();
746 SkASSERT(0 == poolState.fUsedPoolVertexBytes);
747 #ifdef SK_DEBUG
748 bool success =
749 #endif
750 fVertexPool.appendVertices(this->getVertexSize(),
751 vertexCount,
752 vertexArray,
753 &poolState.fPoolVertexBuffer,
754 &poolState.fPoolStartVertex);
755 GR_DEBUGASSERT(success);
756 }
758 void GrInOrderDrawBuffer::onSetIndexSourceToArray(const void* indexArray,
759 int indexCount) {
760 GeometryPoolState& poolState = fGeoPoolStateStack.back();
761 SkASSERT(0 == poolState.fUsedPoolIndexBytes);
762 #ifdef SK_DEBUG
763 bool success =
764 #endif
765 fIndexPool.appendIndices(indexCount,
766 indexArray,
767 &poolState.fPoolIndexBuffer,
768 &poolState.fPoolStartIndex);
769 GR_DEBUGASSERT(success);
770 }
772 void GrInOrderDrawBuffer::releaseVertexArray() {
773 // When the client provides an array as the vertex source we handled it
774 // by copying their array into reserved space.
775 this->GrInOrderDrawBuffer::releaseReservedVertexSpace();
776 }
778 void GrInOrderDrawBuffer::releaseIndexArray() {
779 // When the client provides an array as the index source we handled it
780 // by copying their array into reserved space.
781 this->GrInOrderDrawBuffer::releaseReservedIndexSpace();
782 }
784 void GrInOrderDrawBuffer::geometrySourceWillPush() {
785 GeometryPoolState& poolState = fGeoPoolStateStack.push_back();
786 poolState.fUsedPoolVertexBytes = 0;
787 poolState.fUsedPoolIndexBytes = 0;
788 #ifdef SK_DEBUG
789 poolState.fPoolVertexBuffer = (GrVertexBuffer*)~0;
790 poolState.fPoolStartVertex = ~0;
791 poolState.fPoolIndexBuffer = (GrIndexBuffer*)~0;
792 poolState.fPoolStartIndex = ~0;
793 #endif
794 }
796 void GrInOrderDrawBuffer::geometrySourceWillPop(
797 const GeometrySrcState& restoredState) {
798 SkASSERT(fGeoPoolStateStack.count() > 1);
799 fGeoPoolStateStack.pop_back();
800 GeometryPoolState& poolState = fGeoPoolStateStack.back();
801 // we have to assume that any slack we had in our vertex/index data
802 // is now unreleasable because data may have been appended later in the
803 // pool.
804 if (kReserved_GeometrySrcType == restoredState.fVertexSrc ||
805 kArray_GeometrySrcType == restoredState.fVertexSrc) {
806 poolState.fUsedPoolVertexBytes = restoredState.fVertexSize * restoredState.fVertexCount;
807 }
808 if (kReserved_GeometrySrcType == restoredState.fIndexSrc ||
809 kArray_GeometrySrcType == restoredState.fIndexSrc) {
810 poolState.fUsedPoolIndexBytes = sizeof(uint16_t) *
811 restoredState.fIndexCount;
812 }
813 }
815 bool GrInOrderDrawBuffer::needsNewState() const {
816 return fStates.empty() || !fStates.back().isEqual(this->getDrawState());
817 }
819 bool GrInOrderDrawBuffer::needsNewClip() const {
820 SkASSERT(fClips.count() == fClipOrigins.count());
821 if (this->getDrawState().isClipState()) {
822 if (fClipSet &&
823 (fClips.empty() ||
824 fClips.back() != *this->getClip()->fClipStack ||
825 fClipOrigins.back() != this->getClip()->fOrigin)) {
826 return true;
827 }
828 }
829 return false;
830 }
832 void GrInOrderDrawBuffer::recordClip() {
833 fClips.push_back() = *this->getClip()->fClipStack;
834 fClipOrigins.push_back() = this->getClip()->fOrigin;
835 fClipSet = false;
836 fCmds.push_back(kSetClip_Cmd);
837 }
839 void GrInOrderDrawBuffer::recordState() {
840 fStates.push_back().saveFrom(this->getDrawState());
841 fCmds.push_back(kSetState_Cmd);
842 }
844 GrInOrderDrawBuffer::DrawRecord* GrInOrderDrawBuffer::recordDraw(const DrawInfo& info) {
845 fCmds.push_back(kDraw_Cmd);
846 return &fDraws.push_back(info);
847 }
849 GrInOrderDrawBuffer::StencilPath* GrInOrderDrawBuffer::recordStencilPath() {
850 fCmds.push_back(kStencilPath_Cmd);
851 return &fStencilPaths.push_back();
852 }
854 GrInOrderDrawBuffer::DrawPath* GrInOrderDrawBuffer::recordDrawPath() {
855 fCmds.push_back(kDrawPath_Cmd);
856 return &fDrawPaths.push_back();
857 }
859 GrInOrderDrawBuffer::Clear* GrInOrderDrawBuffer::recordClear() {
860 fCmds.push_back(kClear_Cmd);
861 return &fClears.push_back();
862 }
864 GrInOrderDrawBuffer::CopySurface* GrInOrderDrawBuffer::recordCopySurface() {
865 fCmds.push_back(kCopySurface_Cmd);
866 return &fCopySurfaces.push_back();
867 }
870 void GrInOrderDrawBuffer::clipWillBeSet(const GrClipData* newClipData) {
871 INHERITED::clipWillBeSet(newClipData);
872 fClipSet = true;
873 fClipProxyState = kUnknown_ClipProxyState;
874 }