|
1 |
|
2 /* |
|
3 * Copyright 2010 Google Inc. |
|
4 * |
|
5 * Use of this source code is governed by a BSD-style license that can be |
|
6 * found in the LICENSE file. |
|
7 */ |
|
8 |
|
9 |
|
10 #include "GrGpu.h" |
|
11 |
|
12 #include "GrBufferAllocPool.h" |
|
13 #include "GrContext.h" |
|
14 #include "GrDrawTargetCaps.h" |
|
15 #include "GrIndexBuffer.h" |
|
16 #include "GrStencilBuffer.h" |
|
17 #include "GrVertexBuffer.h" |
|
18 |
|
19 // probably makes no sense for this to be less than a page |
|
20 static const size_t VERTEX_POOL_VB_SIZE = 1 << 18; |
|
21 static const int VERTEX_POOL_VB_COUNT = 4; |
|
22 static const size_t INDEX_POOL_IB_SIZE = 1 << 16; |
|
23 static const int INDEX_POOL_IB_COUNT = 4; |
|
24 |
|
25 //////////////////////////////////////////////////////////////////////////////// |
|
26 |
|
27 #define DEBUG_INVAL_BUFFER 0xdeadcafe |
|
28 #define DEBUG_INVAL_START_IDX -1 |
|
29 |
|
30 GrGpu::GrGpu(GrContext* context) |
|
31 : GrDrawTarget(context) |
|
32 , fResetTimestamp(kExpiredTimestamp+1) |
|
33 , fResetBits(kAll_GrBackendState) |
|
34 , fVertexPool(NULL) |
|
35 , fIndexPool(NULL) |
|
36 , fVertexPoolUseCnt(0) |
|
37 , fIndexPoolUseCnt(0) |
|
38 , fQuadIndexBuffer(NULL) { |
|
39 |
|
40 fClipMaskManager.setGpu(this); |
|
41 |
|
42 fGeomPoolStateStack.push_back(); |
|
43 #ifdef SK_DEBUG |
|
44 GeometryPoolState& poolState = fGeomPoolStateStack.back(); |
|
45 poolState.fPoolVertexBuffer = (GrVertexBuffer*)DEBUG_INVAL_BUFFER; |
|
46 poolState.fPoolStartVertex = DEBUG_INVAL_START_IDX; |
|
47 poolState.fPoolIndexBuffer = (GrIndexBuffer*)DEBUG_INVAL_BUFFER; |
|
48 poolState.fPoolStartIndex = DEBUG_INVAL_START_IDX; |
|
49 #endif |
|
50 } |
|
51 |
|
52 GrGpu::~GrGpu() { |
|
53 this->releaseResources(); |
|
54 } |
|
55 |
|
56 void GrGpu::abandonResources() { |
|
57 |
|
58 fClipMaskManager.releaseResources(); |
|
59 |
|
60 while (NULL != fResourceList.head()) { |
|
61 fResourceList.head()->abandon(); |
|
62 } |
|
63 |
|
64 SkASSERT(NULL == fQuadIndexBuffer || !fQuadIndexBuffer->isValid()); |
|
65 SkSafeSetNull(fQuadIndexBuffer); |
|
66 delete fVertexPool; |
|
67 fVertexPool = NULL; |
|
68 delete fIndexPool; |
|
69 fIndexPool = NULL; |
|
70 } |
|
71 |
|
72 void GrGpu::releaseResources() { |
|
73 |
|
74 fClipMaskManager.releaseResources(); |
|
75 |
|
76 while (NULL != fResourceList.head()) { |
|
77 fResourceList.head()->release(); |
|
78 } |
|
79 |
|
80 SkASSERT(NULL == fQuadIndexBuffer || !fQuadIndexBuffer->isValid()); |
|
81 SkSafeSetNull(fQuadIndexBuffer); |
|
82 delete fVertexPool; |
|
83 fVertexPool = NULL; |
|
84 delete fIndexPool; |
|
85 fIndexPool = NULL; |
|
86 } |
|
87 |
|
88 void GrGpu::insertResource(GrResource* resource) { |
|
89 SkASSERT(NULL != resource); |
|
90 SkASSERT(this == resource->getGpu()); |
|
91 |
|
92 fResourceList.addToHead(resource); |
|
93 } |
|
94 |
|
95 void GrGpu::removeResource(GrResource* resource) { |
|
96 SkASSERT(NULL != resource); |
|
97 SkASSERT(this == resource->getGpu()); |
|
98 |
|
99 fResourceList.remove(resource); |
|
100 } |
|
101 |
|
102 |
|
103 void GrGpu::unimpl(const char msg[]) { |
|
104 #ifdef SK_DEBUG |
|
105 GrPrintf("--- GrGpu unimplemented(\"%s\")\n", msg); |
|
106 #endif |
|
107 } |
|
108 |
|
109 //////////////////////////////////////////////////////////////////////////////// |
|
110 |
|
111 GrTexture* GrGpu::createTexture(const GrTextureDesc& desc, |
|
112 const void* srcData, size_t rowBytes) { |
|
113 if (kUnknown_GrPixelConfig == desc.fConfig) { |
|
114 return NULL; |
|
115 } |
|
116 if ((desc.fFlags & kRenderTarget_GrTextureFlagBit) && |
|
117 !this->caps()->isConfigRenderable(desc.fConfig, desc.fSampleCnt > 0)) { |
|
118 return NULL; |
|
119 } |
|
120 |
|
121 this->handleDirtyContext(); |
|
122 GrTexture* tex = this->onCreateTexture(desc, srcData, rowBytes); |
|
123 if (NULL != tex && |
|
124 (kRenderTarget_GrTextureFlagBit & desc.fFlags) && |
|
125 !(kNoStencil_GrTextureFlagBit & desc.fFlags)) { |
|
126 SkASSERT(NULL != tex->asRenderTarget()); |
|
127 // TODO: defer this and attach dynamically |
|
128 if (!this->attachStencilBufferToRenderTarget(tex->asRenderTarget())) { |
|
129 tex->unref(); |
|
130 return NULL; |
|
131 } |
|
132 } |
|
133 return tex; |
|
134 } |
|
135 |
|
136 bool GrGpu::attachStencilBufferToRenderTarget(GrRenderTarget* rt) { |
|
137 SkASSERT(NULL == rt->getStencilBuffer()); |
|
138 GrStencilBuffer* sb = |
|
139 this->getContext()->findStencilBuffer(rt->width(), |
|
140 rt->height(), |
|
141 rt->numSamples()); |
|
142 if (NULL != sb) { |
|
143 rt->setStencilBuffer(sb); |
|
144 bool attached = this->attachStencilBufferToRenderTarget(sb, rt); |
|
145 if (!attached) { |
|
146 rt->setStencilBuffer(NULL); |
|
147 } |
|
148 return attached; |
|
149 } |
|
150 if (this->createStencilBufferForRenderTarget(rt, |
|
151 rt->width(), rt->height())) { |
|
152 // Right now we're clearing the stencil buffer here after it is |
|
153 // attached to an RT for the first time. When we start matching |
|
154 // stencil buffers with smaller color targets this will no longer |
|
155 // be correct because it won't be guaranteed to clear the entire |
|
156 // sb. |
|
157 // We used to clear down in the GL subclass using a special purpose |
|
158 // FBO. But iOS doesn't allow a stencil-only FBO. It reports unsupported |
|
159 // FBO status. |
|
160 GrDrawState::AutoRenderTargetRestore artr(this->drawState(), rt); |
|
161 this->clearStencil(); |
|
162 return true; |
|
163 } else { |
|
164 return false; |
|
165 } |
|
166 } |
|
167 |
|
168 GrTexture* GrGpu::wrapBackendTexture(const GrBackendTextureDesc& desc) { |
|
169 this->handleDirtyContext(); |
|
170 GrTexture* tex = this->onWrapBackendTexture(desc); |
|
171 if (NULL == tex) { |
|
172 return NULL; |
|
173 } |
|
174 // TODO: defer this and attach dynamically |
|
175 GrRenderTarget* tgt = tex->asRenderTarget(); |
|
176 if (NULL != tgt && |
|
177 !this->attachStencilBufferToRenderTarget(tgt)) { |
|
178 tex->unref(); |
|
179 return NULL; |
|
180 } else { |
|
181 return tex; |
|
182 } |
|
183 } |
|
184 |
|
185 GrRenderTarget* GrGpu::wrapBackendRenderTarget(const GrBackendRenderTargetDesc& desc) { |
|
186 this->handleDirtyContext(); |
|
187 return this->onWrapBackendRenderTarget(desc); |
|
188 } |
|
189 |
|
190 GrVertexBuffer* GrGpu::createVertexBuffer(size_t size, bool dynamic) { |
|
191 this->handleDirtyContext(); |
|
192 return this->onCreateVertexBuffer(size, dynamic); |
|
193 } |
|
194 |
|
195 GrIndexBuffer* GrGpu::createIndexBuffer(size_t size, bool dynamic) { |
|
196 this->handleDirtyContext(); |
|
197 return this->onCreateIndexBuffer(size, dynamic); |
|
198 } |
|
199 |
|
200 GrPath* GrGpu::createPath(const SkPath& path, const SkStrokeRec& stroke) { |
|
201 SkASSERT(this->caps()->pathRenderingSupport()); |
|
202 this->handleDirtyContext(); |
|
203 return this->onCreatePath(path, stroke); |
|
204 } |
|
205 |
|
206 void GrGpu::clear(const SkIRect* rect, |
|
207 GrColor color, |
|
208 bool canIgnoreRect, |
|
209 GrRenderTarget* renderTarget) { |
|
210 GrDrawState::AutoRenderTargetRestore art; |
|
211 if (NULL != renderTarget) { |
|
212 art.set(this->drawState(), renderTarget); |
|
213 } |
|
214 if (NULL == this->getDrawState().getRenderTarget()) { |
|
215 SkASSERT(0); |
|
216 return; |
|
217 } |
|
218 this->handleDirtyContext(); |
|
219 this->onClear(rect, color, canIgnoreRect); |
|
220 } |
|
221 |
|
222 void GrGpu::forceRenderTargetFlush() { |
|
223 this->handleDirtyContext(); |
|
224 this->onForceRenderTargetFlush(); |
|
225 } |
|
226 |
|
227 bool GrGpu::readPixels(GrRenderTarget* target, |
|
228 int left, int top, int width, int height, |
|
229 GrPixelConfig config, void* buffer, |
|
230 size_t rowBytes) { |
|
231 this->handleDirtyContext(); |
|
232 return this->onReadPixels(target, left, top, width, height, |
|
233 config, buffer, rowBytes); |
|
234 } |
|
235 |
|
236 bool GrGpu::writeTexturePixels(GrTexture* texture, |
|
237 int left, int top, int width, int height, |
|
238 GrPixelConfig config, const void* buffer, |
|
239 size_t rowBytes) { |
|
240 this->handleDirtyContext(); |
|
241 return this->onWriteTexturePixels(texture, left, top, width, height, |
|
242 config, buffer, rowBytes); |
|
243 } |
|
244 |
|
245 void GrGpu::resolveRenderTarget(GrRenderTarget* target) { |
|
246 SkASSERT(target); |
|
247 this->handleDirtyContext(); |
|
248 this->onResolveRenderTarget(target); |
|
249 } |
|
250 |
|
251 static const GrStencilSettings& winding_path_stencil_settings() { |
|
252 GR_STATIC_CONST_SAME_STENCIL_STRUCT(gSettings, |
|
253 kIncClamp_StencilOp, |
|
254 kIncClamp_StencilOp, |
|
255 kAlwaysIfInClip_StencilFunc, |
|
256 0xFFFF, 0xFFFF, 0xFFFF); |
|
257 return *GR_CONST_STENCIL_SETTINGS_PTR_FROM_STRUCT_PTR(&gSettings); |
|
258 } |
|
259 |
|
260 static const GrStencilSettings& even_odd_path_stencil_settings() { |
|
261 GR_STATIC_CONST_SAME_STENCIL_STRUCT(gSettings, |
|
262 kInvert_StencilOp, |
|
263 kInvert_StencilOp, |
|
264 kAlwaysIfInClip_StencilFunc, |
|
265 0xFFFF, 0xFFFF, 0xFFFF); |
|
266 return *GR_CONST_STENCIL_SETTINGS_PTR_FROM_STRUCT_PTR(&gSettings); |
|
267 } |
|
268 |
|
269 void GrGpu::getPathStencilSettingsForFillType(SkPath::FillType fill, GrStencilSettings* outStencilSettings) { |
|
270 |
|
271 switch (fill) { |
|
272 default: |
|
273 GrCrash("Unexpected path fill."); |
|
274 /* fallthrough */; |
|
275 case SkPath::kWinding_FillType: |
|
276 case SkPath::kInverseWinding_FillType: |
|
277 *outStencilSettings = winding_path_stencil_settings(); |
|
278 break; |
|
279 case SkPath::kEvenOdd_FillType: |
|
280 case SkPath::kInverseEvenOdd_FillType: |
|
281 *outStencilSettings = even_odd_path_stencil_settings(); |
|
282 break; |
|
283 } |
|
284 fClipMaskManager.adjustPathStencilParams(outStencilSettings); |
|
285 } |
|
286 |
|
287 |
|
288 //////////////////////////////////////////////////////////////////////////////// |
|
289 |
|
290 static const int MAX_QUADS = 1 << 12; // max possible: (1 << 14) - 1; |
|
291 |
|
292 GR_STATIC_ASSERT(4 * MAX_QUADS <= 65535); |
|
293 |
|
294 static inline void fill_indices(uint16_t* indices, int quadCount) { |
|
295 for (int i = 0; i < quadCount; ++i) { |
|
296 indices[6 * i + 0] = 4 * i + 0; |
|
297 indices[6 * i + 1] = 4 * i + 1; |
|
298 indices[6 * i + 2] = 4 * i + 2; |
|
299 indices[6 * i + 3] = 4 * i + 0; |
|
300 indices[6 * i + 4] = 4 * i + 2; |
|
301 indices[6 * i + 5] = 4 * i + 3; |
|
302 } |
|
303 } |
|
304 |
|
305 const GrIndexBuffer* GrGpu::getQuadIndexBuffer() const { |
|
306 if (NULL == fQuadIndexBuffer) { |
|
307 static const int SIZE = sizeof(uint16_t) * 6 * MAX_QUADS; |
|
308 GrGpu* me = const_cast<GrGpu*>(this); |
|
309 fQuadIndexBuffer = me->createIndexBuffer(SIZE, false); |
|
310 if (NULL != fQuadIndexBuffer) { |
|
311 uint16_t* indices = (uint16_t*)fQuadIndexBuffer->lock(); |
|
312 if (NULL != indices) { |
|
313 fill_indices(indices, MAX_QUADS); |
|
314 fQuadIndexBuffer->unlock(); |
|
315 } else { |
|
316 indices = (uint16_t*)sk_malloc_throw(SIZE); |
|
317 fill_indices(indices, MAX_QUADS); |
|
318 if (!fQuadIndexBuffer->updateData(indices, SIZE)) { |
|
319 fQuadIndexBuffer->unref(); |
|
320 fQuadIndexBuffer = NULL; |
|
321 GrCrash("Can't get indices into buffer!"); |
|
322 } |
|
323 sk_free(indices); |
|
324 } |
|
325 } |
|
326 } |
|
327 |
|
328 return fQuadIndexBuffer; |
|
329 } |
|
330 |
|
331 //////////////////////////////////////////////////////////////////////////////// |
|
332 |
|
333 bool GrGpu::setupClipAndFlushState(DrawType type, const GrDeviceCoordTexture* dstCopy, |
|
334 GrDrawState::AutoRestoreEffects* are, |
|
335 const SkRect* devBounds) { |
|
336 if (!fClipMaskManager.setupClipping(this->getClip(), are, devBounds)) { |
|
337 return false; |
|
338 } |
|
339 |
|
340 if (!this->flushGraphicsState(type, dstCopy)) { |
|
341 return false; |
|
342 } |
|
343 |
|
344 return true; |
|
345 } |
|
346 |
|
347 //////////////////////////////////////////////////////////////////////////////// |
|
348 |
|
349 void GrGpu::geometrySourceWillPush() { |
|
350 const GeometrySrcState& geoSrc = this->getGeomSrc(); |
|
351 if (kArray_GeometrySrcType == geoSrc.fVertexSrc || |
|
352 kReserved_GeometrySrcType == geoSrc.fVertexSrc) { |
|
353 this->finalizeReservedVertices(); |
|
354 } |
|
355 if (kArray_GeometrySrcType == geoSrc.fIndexSrc || |
|
356 kReserved_GeometrySrcType == geoSrc.fIndexSrc) { |
|
357 this->finalizeReservedIndices(); |
|
358 } |
|
359 GeometryPoolState& newState = fGeomPoolStateStack.push_back(); |
|
360 #ifdef SK_DEBUG |
|
361 newState.fPoolVertexBuffer = (GrVertexBuffer*)DEBUG_INVAL_BUFFER; |
|
362 newState.fPoolStartVertex = DEBUG_INVAL_START_IDX; |
|
363 newState.fPoolIndexBuffer = (GrIndexBuffer*)DEBUG_INVAL_BUFFER; |
|
364 newState.fPoolStartIndex = DEBUG_INVAL_START_IDX; |
|
365 #else |
|
366 (void) newState; // silence compiler warning |
|
367 #endif |
|
368 } |
|
369 |
|
370 void GrGpu::geometrySourceWillPop(const GeometrySrcState& restoredState) { |
|
371 // if popping last entry then pops are unbalanced with pushes |
|
372 SkASSERT(fGeomPoolStateStack.count() > 1); |
|
373 fGeomPoolStateStack.pop_back(); |
|
374 } |
|
375 |
|
376 void GrGpu::onDraw(const DrawInfo& info) { |
|
377 this->handleDirtyContext(); |
|
378 GrDrawState::AutoRestoreEffects are; |
|
379 if (!this->setupClipAndFlushState(PrimTypeToDrawType(info.primitiveType()), |
|
380 info.getDstCopy(), &are, info.getDevBounds())) { |
|
381 return; |
|
382 } |
|
383 this->onGpuDraw(info); |
|
384 } |
|
385 |
|
386 void GrGpu::onStencilPath(const GrPath* path, SkPath::FillType fill) { |
|
387 this->handleDirtyContext(); |
|
388 |
|
389 GrDrawState::AutoRestoreEffects are; |
|
390 if (!this->setupClipAndFlushState(kStencilPath_DrawType, NULL, &are, NULL)) { |
|
391 return; |
|
392 } |
|
393 |
|
394 this->onGpuStencilPath(path, fill); |
|
395 } |
|
396 |
|
397 |
|
398 void GrGpu::onDrawPath(const GrPath* path, SkPath::FillType fill, |
|
399 const GrDeviceCoordTexture* dstCopy) { |
|
400 this->handleDirtyContext(); |
|
401 |
|
402 drawState()->setDefaultVertexAttribs(); |
|
403 |
|
404 GrDrawState::AutoRestoreEffects are; |
|
405 if (!this->setupClipAndFlushState(kDrawPath_DrawType, dstCopy, &are, NULL)) { |
|
406 return; |
|
407 } |
|
408 |
|
409 this->onGpuDrawPath(path, fill); |
|
410 } |
|
411 |
|
412 void GrGpu::finalizeReservedVertices() { |
|
413 SkASSERT(NULL != fVertexPool); |
|
414 fVertexPool->unlock(); |
|
415 } |
|
416 |
|
417 void GrGpu::finalizeReservedIndices() { |
|
418 SkASSERT(NULL != fIndexPool); |
|
419 fIndexPool->unlock(); |
|
420 } |
|
421 |
|
422 void GrGpu::prepareVertexPool() { |
|
423 if (NULL == fVertexPool) { |
|
424 SkASSERT(0 == fVertexPoolUseCnt); |
|
425 fVertexPool = SkNEW_ARGS(GrVertexBufferAllocPool, (this, true, |
|
426 VERTEX_POOL_VB_SIZE, |
|
427 VERTEX_POOL_VB_COUNT)); |
|
428 fVertexPool->releaseGpuRef(); |
|
429 } else if (!fVertexPoolUseCnt) { |
|
430 // the client doesn't have valid data in the pool |
|
431 fVertexPool->reset(); |
|
432 } |
|
433 } |
|
434 |
|
435 void GrGpu::prepareIndexPool() { |
|
436 if (NULL == fIndexPool) { |
|
437 SkASSERT(0 == fIndexPoolUseCnt); |
|
438 fIndexPool = SkNEW_ARGS(GrIndexBufferAllocPool, (this, true, |
|
439 INDEX_POOL_IB_SIZE, |
|
440 INDEX_POOL_IB_COUNT)); |
|
441 fIndexPool->releaseGpuRef(); |
|
442 } else if (!fIndexPoolUseCnt) { |
|
443 // the client doesn't have valid data in the pool |
|
444 fIndexPool->reset(); |
|
445 } |
|
446 } |
|
447 |
|
448 bool GrGpu::onReserveVertexSpace(size_t vertexSize, |
|
449 int vertexCount, |
|
450 void** vertices) { |
|
451 GeometryPoolState& geomPoolState = fGeomPoolStateStack.back(); |
|
452 |
|
453 SkASSERT(vertexCount > 0); |
|
454 SkASSERT(NULL != vertices); |
|
455 |
|
456 this->prepareVertexPool(); |
|
457 |
|
458 *vertices = fVertexPool->makeSpace(vertexSize, |
|
459 vertexCount, |
|
460 &geomPoolState.fPoolVertexBuffer, |
|
461 &geomPoolState.fPoolStartVertex); |
|
462 if (NULL == *vertices) { |
|
463 return false; |
|
464 } |
|
465 ++fVertexPoolUseCnt; |
|
466 return true; |
|
467 } |
|
468 |
|
469 bool GrGpu::onReserveIndexSpace(int indexCount, void** indices) { |
|
470 GeometryPoolState& geomPoolState = fGeomPoolStateStack.back(); |
|
471 |
|
472 SkASSERT(indexCount > 0); |
|
473 SkASSERT(NULL != indices); |
|
474 |
|
475 this->prepareIndexPool(); |
|
476 |
|
477 *indices = fIndexPool->makeSpace(indexCount, |
|
478 &geomPoolState.fPoolIndexBuffer, |
|
479 &geomPoolState.fPoolStartIndex); |
|
480 if (NULL == *indices) { |
|
481 return false; |
|
482 } |
|
483 ++fIndexPoolUseCnt; |
|
484 return true; |
|
485 } |
|
486 |
|
487 void GrGpu::releaseReservedVertexSpace() { |
|
488 const GeometrySrcState& geoSrc = this->getGeomSrc(); |
|
489 SkASSERT(kReserved_GeometrySrcType == geoSrc.fVertexSrc); |
|
490 size_t bytes = geoSrc.fVertexCount * geoSrc.fVertexSize; |
|
491 fVertexPool->putBack(bytes); |
|
492 --fVertexPoolUseCnt; |
|
493 } |
|
494 |
|
495 void GrGpu::releaseReservedIndexSpace() { |
|
496 const GeometrySrcState& geoSrc = this->getGeomSrc(); |
|
497 SkASSERT(kReserved_GeometrySrcType == geoSrc.fIndexSrc); |
|
498 size_t bytes = geoSrc.fIndexCount * sizeof(uint16_t); |
|
499 fIndexPool->putBack(bytes); |
|
500 --fIndexPoolUseCnt; |
|
501 } |
|
502 |
|
503 void GrGpu::onSetVertexSourceToArray(const void* vertexArray, int vertexCount) { |
|
504 this->prepareVertexPool(); |
|
505 GeometryPoolState& geomPoolState = fGeomPoolStateStack.back(); |
|
506 #ifdef SK_DEBUG |
|
507 bool success = |
|
508 #endif |
|
509 fVertexPool->appendVertices(this->getVertexSize(), |
|
510 vertexCount, |
|
511 vertexArray, |
|
512 &geomPoolState.fPoolVertexBuffer, |
|
513 &geomPoolState.fPoolStartVertex); |
|
514 ++fVertexPoolUseCnt; |
|
515 GR_DEBUGASSERT(success); |
|
516 } |
|
517 |
|
518 void GrGpu::onSetIndexSourceToArray(const void* indexArray, int indexCount) { |
|
519 this->prepareIndexPool(); |
|
520 GeometryPoolState& geomPoolState = fGeomPoolStateStack.back(); |
|
521 #ifdef SK_DEBUG |
|
522 bool success = |
|
523 #endif |
|
524 fIndexPool->appendIndices(indexCount, |
|
525 indexArray, |
|
526 &geomPoolState.fPoolIndexBuffer, |
|
527 &geomPoolState.fPoolStartIndex); |
|
528 ++fIndexPoolUseCnt; |
|
529 GR_DEBUGASSERT(success); |
|
530 } |
|
531 |
|
532 void GrGpu::releaseVertexArray() { |
|
533 // if vertex source was array, we stowed data in the pool |
|
534 const GeometrySrcState& geoSrc = this->getGeomSrc(); |
|
535 SkASSERT(kArray_GeometrySrcType == geoSrc.fVertexSrc); |
|
536 size_t bytes = geoSrc.fVertexCount * geoSrc.fVertexSize; |
|
537 fVertexPool->putBack(bytes); |
|
538 --fVertexPoolUseCnt; |
|
539 } |
|
540 |
|
541 void GrGpu::releaseIndexArray() { |
|
542 // if index source was array, we stowed data in the pool |
|
543 const GeometrySrcState& geoSrc = this->getGeomSrc(); |
|
544 SkASSERT(kArray_GeometrySrcType == geoSrc.fIndexSrc); |
|
545 size_t bytes = geoSrc.fIndexCount * sizeof(uint16_t); |
|
546 fIndexPool->putBack(bytes); |
|
547 --fIndexPoolUseCnt; |
|
548 } |