michael@0: michael@0: /* michael@0: * Copyright 2010 Google Inc. michael@0: * michael@0: * Use of this source code is governed by a BSD-style license that can be michael@0: * found in the LICENSE file. michael@0: */ michael@0: michael@0: michael@0: #include "GrBufferAllocPool.h" michael@0: #include "GrDrawTargetCaps.h" michael@0: #include "GrGpu.h" michael@0: #include "GrIndexBuffer.h" michael@0: #include "GrTypes.h" michael@0: #include "GrVertexBuffer.h" michael@0: michael@0: #ifdef SK_DEBUG michael@0: #define VALIDATE validate michael@0: #else michael@0: static void VALIDATE(bool = false) {} michael@0: #endif michael@0: michael@0: // page size michael@0: #define GrBufferAllocPool_MIN_BLOCK_SIZE ((size_t)1 << 12) michael@0: michael@0: GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu, michael@0: BufferType bufferType, michael@0: bool frequentResetHint, michael@0: size_t blockSize, michael@0: int preallocBufferCnt) : michael@0: fBlocks(GrMax(8, 2*preallocBufferCnt)) { michael@0: michael@0: SkASSERT(NULL != gpu); michael@0: fGpu = gpu; michael@0: fGpu->ref(); michael@0: fGpuIsReffed = true; michael@0: michael@0: fBufferType = bufferType; michael@0: fFrequentResetHint = frequentResetHint; michael@0: fBufferPtr = NULL; michael@0: fMinBlockSize = GrMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize); michael@0: michael@0: fBytesInUse = 0; michael@0: michael@0: fPreallocBuffersInUse = 0; michael@0: fPreallocBufferStartIdx = 0; michael@0: for (int i = 0; i < preallocBufferCnt; ++i) { michael@0: GrGeometryBuffer* buffer = this->createBuffer(fMinBlockSize); michael@0: if (NULL != buffer) { michael@0: *fPreallocBuffers.append() = buffer; michael@0: } michael@0: } michael@0: } michael@0: michael@0: GrBufferAllocPool::~GrBufferAllocPool() { michael@0: VALIDATE(); michael@0: if (fBlocks.count()) { michael@0: GrGeometryBuffer* buffer = fBlocks.back().fBuffer; michael@0: if (buffer->isLocked()) { michael@0: buffer->unlock(); michael@0: } michael@0: } michael@0: while (!fBlocks.empty()) { michael@0: destroyBlock(); michael@0: } michael@0: fPreallocBuffers.unrefAll(); michael@0: releaseGpuRef(); michael@0: } michael@0: michael@0: void GrBufferAllocPool::releaseGpuRef() { michael@0: if (fGpuIsReffed) { michael@0: fGpu->unref(); michael@0: fGpuIsReffed = false; michael@0: } michael@0: } michael@0: michael@0: void GrBufferAllocPool::reset() { michael@0: VALIDATE(); michael@0: fBytesInUse = 0; michael@0: if (fBlocks.count()) { michael@0: GrGeometryBuffer* buffer = fBlocks.back().fBuffer; michael@0: if (buffer->isLocked()) { michael@0: buffer->unlock(); michael@0: } michael@0: } michael@0: // fPreallocBuffersInUse will be decremented down to zero in the while loop michael@0: int preallocBuffersInUse = fPreallocBuffersInUse; michael@0: while (!fBlocks.empty()) { michael@0: this->destroyBlock(); michael@0: } michael@0: if (fPreallocBuffers.count()) { michael@0: // must set this after above loop. michael@0: fPreallocBufferStartIdx = (fPreallocBufferStartIdx + michael@0: preallocBuffersInUse) % michael@0: fPreallocBuffers.count(); michael@0: } michael@0: // we may have created a large cpu mirror of a large VB. Reset the size michael@0: // to match our pre-allocated VBs. michael@0: fCpuData.reset(fMinBlockSize); michael@0: SkASSERT(0 == fPreallocBuffersInUse); michael@0: VALIDATE(); michael@0: } michael@0: michael@0: void GrBufferAllocPool::unlock() { michael@0: VALIDATE(); michael@0: michael@0: if (NULL != fBufferPtr) { michael@0: BufferBlock& block = fBlocks.back(); michael@0: if (block.fBuffer->isLocked()) { michael@0: block.fBuffer->unlock(); michael@0: } else { michael@0: size_t flushSize = block.fBuffer->sizeInBytes() - block.fBytesFree; michael@0: flushCpuData(fBlocks.back().fBuffer, flushSize); michael@0: } michael@0: fBufferPtr = NULL; michael@0: } michael@0: VALIDATE(); michael@0: } michael@0: michael@0: #ifdef SK_DEBUG michael@0: void GrBufferAllocPool::validate(bool unusedBlockAllowed) const { michael@0: if (NULL != fBufferPtr) { michael@0: SkASSERT(!fBlocks.empty()); michael@0: if (fBlocks.back().fBuffer->isLocked()) { michael@0: GrGeometryBuffer* buf = fBlocks.back().fBuffer; michael@0: SkASSERT(buf->lockPtr() == fBufferPtr); michael@0: } else { michael@0: SkASSERT(fCpuData.get() == fBufferPtr); michael@0: } michael@0: } else { michael@0: SkASSERT(fBlocks.empty() || !fBlocks.back().fBuffer->isLocked()); michael@0: } michael@0: size_t bytesInUse = 0; michael@0: for (int i = 0; i < fBlocks.count() - 1; ++i) { michael@0: SkASSERT(!fBlocks[i].fBuffer->isLocked()); michael@0: } michael@0: for (int i = 0; i < fBlocks.count(); ++i) { michael@0: size_t bytes = fBlocks[i].fBuffer->sizeInBytes() - fBlocks[i].fBytesFree; michael@0: bytesInUse += bytes; michael@0: SkASSERT(bytes || unusedBlockAllowed); michael@0: } michael@0: michael@0: SkASSERT(bytesInUse == fBytesInUse); michael@0: if (unusedBlockAllowed) { michael@0: SkASSERT((fBytesInUse && !fBlocks.empty()) || michael@0: (!fBytesInUse && (fBlocks.count() < 2))); michael@0: } else { michael@0: SkASSERT((0 == fBytesInUse) == fBlocks.empty()); michael@0: } michael@0: } michael@0: #endif michael@0: michael@0: void* GrBufferAllocPool::makeSpace(size_t size, michael@0: size_t alignment, michael@0: const GrGeometryBuffer** buffer, michael@0: size_t* offset) { michael@0: VALIDATE(); michael@0: michael@0: SkASSERT(NULL != buffer); michael@0: SkASSERT(NULL != offset); michael@0: michael@0: if (NULL != fBufferPtr) { michael@0: BufferBlock& back = fBlocks.back(); michael@0: size_t usedBytes = back.fBuffer->sizeInBytes() - back.fBytesFree; michael@0: size_t pad = GrSizeAlignUpPad(usedBytes, michael@0: alignment); michael@0: if ((size + pad) <= back.fBytesFree) { michael@0: usedBytes += pad; michael@0: *offset = usedBytes; michael@0: *buffer = back.fBuffer; michael@0: back.fBytesFree -= size + pad; michael@0: fBytesInUse += size + pad; michael@0: VALIDATE(); michael@0: return (void*)(reinterpret_cast(fBufferPtr) + usedBytes); michael@0: } michael@0: } michael@0: michael@0: // We could honor the space request using by a partial update of the current michael@0: // VB (if there is room). But we don't currently use draw calls to GL that michael@0: // allow the driver to know that previously issued draws won't read from michael@0: // the part of the buffer we update. Also, the GL buffer implementation michael@0: // may be cheating on the actual buffer size by shrinking the buffer on michael@0: // updateData() if the amount of data passed is less than the full buffer michael@0: // size. michael@0: michael@0: if (!createBlock(size)) { michael@0: return NULL; michael@0: } michael@0: SkASSERT(NULL != fBufferPtr); michael@0: michael@0: *offset = 0; michael@0: BufferBlock& back = fBlocks.back(); michael@0: *buffer = back.fBuffer; michael@0: back.fBytesFree -= size; michael@0: fBytesInUse += size; michael@0: VALIDATE(); michael@0: return fBufferPtr; michael@0: } michael@0: michael@0: int GrBufferAllocPool::currentBufferItems(size_t itemSize) const { michael@0: VALIDATE(); michael@0: if (NULL != fBufferPtr) { michael@0: const BufferBlock& back = fBlocks.back(); michael@0: size_t usedBytes = back.fBuffer->sizeInBytes() - back.fBytesFree; michael@0: size_t pad = GrSizeAlignUpPad(usedBytes, itemSize); michael@0: return static_cast((back.fBytesFree - pad) / itemSize); michael@0: } else if (fPreallocBuffersInUse < fPreallocBuffers.count()) { michael@0: return static_cast(fMinBlockSize / itemSize); michael@0: } michael@0: return 0; michael@0: } michael@0: michael@0: int GrBufferAllocPool::preallocatedBuffersRemaining() const { michael@0: return fPreallocBuffers.count() - fPreallocBuffersInUse; michael@0: } michael@0: michael@0: int GrBufferAllocPool::preallocatedBufferCount() const { michael@0: return fPreallocBuffers.count(); michael@0: } michael@0: michael@0: void GrBufferAllocPool::putBack(size_t bytes) { michael@0: VALIDATE(); michael@0: michael@0: // if the putBack unwinds all the preallocated buffers then we will michael@0: // advance the starting index. As blocks are destroyed fPreallocBuffersInUse michael@0: // will be decremented. I will reach zero if all blocks using preallocated michael@0: // buffers are released. michael@0: int preallocBuffersInUse = fPreallocBuffersInUse; michael@0: michael@0: while (bytes) { michael@0: // caller shouldnt try to put back more than they've taken michael@0: SkASSERT(!fBlocks.empty()); michael@0: BufferBlock& block = fBlocks.back(); michael@0: size_t bytesUsed = block.fBuffer->sizeInBytes() - block.fBytesFree; michael@0: if (bytes >= bytesUsed) { michael@0: bytes -= bytesUsed; michael@0: fBytesInUse -= bytesUsed; michael@0: // if we locked a vb to satisfy the make space and we're releasing michael@0: // beyond it, then unlock it. michael@0: if (block.fBuffer->isLocked()) { michael@0: block.fBuffer->unlock(); michael@0: } michael@0: this->destroyBlock(); michael@0: } else { michael@0: block.fBytesFree += bytes; michael@0: fBytesInUse -= bytes; michael@0: bytes = 0; michael@0: break; michael@0: } michael@0: } michael@0: if (!fPreallocBuffersInUse && fPreallocBuffers.count()) { michael@0: fPreallocBufferStartIdx = (fPreallocBufferStartIdx + michael@0: preallocBuffersInUse) % michael@0: fPreallocBuffers.count(); michael@0: } michael@0: VALIDATE(); michael@0: } michael@0: michael@0: bool GrBufferAllocPool::createBlock(size_t requestSize) { michael@0: michael@0: size_t size = GrMax(requestSize, fMinBlockSize); michael@0: SkASSERT(size >= GrBufferAllocPool_MIN_BLOCK_SIZE); michael@0: michael@0: VALIDATE(); michael@0: michael@0: BufferBlock& block = fBlocks.push_back(); michael@0: michael@0: if (size == fMinBlockSize && michael@0: fPreallocBuffersInUse < fPreallocBuffers.count()) { michael@0: michael@0: uint32_t nextBuffer = (fPreallocBuffersInUse + michael@0: fPreallocBufferStartIdx) % michael@0: fPreallocBuffers.count(); michael@0: block.fBuffer = fPreallocBuffers[nextBuffer]; michael@0: block.fBuffer->ref(); michael@0: ++fPreallocBuffersInUse; michael@0: } else { michael@0: block.fBuffer = this->createBuffer(size); michael@0: if (NULL == block.fBuffer) { michael@0: fBlocks.pop_back(); michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: block.fBytesFree = size; michael@0: if (NULL != fBufferPtr) { michael@0: SkASSERT(fBlocks.count() > 1); michael@0: BufferBlock& prev = fBlocks.fromBack(1); michael@0: if (prev.fBuffer->isLocked()) { michael@0: prev.fBuffer->unlock(); michael@0: } else { michael@0: flushCpuData(prev.fBuffer, michael@0: prev.fBuffer->sizeInBytes() - prev.fBytesFree); michael@0: } michael@0: fBufferPtr = NULL; michael@0: } michael@0: michael@0: SkASSERT(NULL == fBufferPtr); michael@0: michael@0: // If the buffer is CPU-backed we lock it because it is free to do so and saves a copy. michael@0: // Otherwise when buffer locking is supported: michael@0: // a) If the frequently reset hint is set we only lock when the requested size meets a michael@0: // threshold (since we don't expect it is likely that we will see more vertex data) michael@0: // b) If the hint is not set we lock if the buffer size is greater than the threshold. michael@0: bool attemptLock = block.fBuffer->isCPUBacked(); michael@0: if (!attemptLock && fGpu->caps()->bufferLockSupport()) { michael@0: if (fFrequentResetHint) { michael@0: attemptLock = requestSize > GR_GEOM_BUFFER_LOCK_THRESHOLD; michael@0: } else { michael@0: attemptLock = size > GR_GEOM_BUFFER_LOCK_THRESHOLD; michael@0: } michael@0: } michael@0: michael@0: if (attemptLock) { michael@0: fBufferPtr = block.fBuffer->lock(); michael@0: } michael@0: michael@0: if (NULL == fBufferPtr) { michael@0: fBufferPtr = fCpuData.reset(size); michael@0: } michael@0: michael@0: VALIDATE(true); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: void GrBufferAllocPool::destroyBlock() { michael@0: SkASSERT(!fBlocks.empty()); michael@0: michael@0: BufferBlock& block = fBlocks.back(); michael@0: if (fPreallocBuffersInUse > 0) { michael@0: uint32_t prevPreallocBuffer = (fPreallocBuffersInUse + michael@0: fPreallocBufferStartIdx + michael@0: (fPreallocBuffers.count() - 1)) % michael@0: fPreallocBuffers.count(); michael@0: if (block.fBuffer == fPreallocBuffers[prevPreallocBuffer]) { michael@0: --fPreallocBuffersInUse; michael@0: } michael@0: } michael@0: SkASSERT(!block.fBuffer->isLocked()); michael@0: block.fBuffer->unref(); michael@0: fBlocks.pop_back(); michael@0: fBufferPtr = NULL; michael@0: } michael@0: michael@0: void GrBufferAllocPool::flushCpuData(GrGeometryBuffer* buffer, michael@0: size_t flushSize) { michael@0: SkASSERT(NULL != buffer); michael@0: SkASSERT(!buffer->isLocked()); michael@0: SkASSERT(fCpuData.get() == fBufferPtr); michael@0: SkASSERT(flushSize <= buffer->sizeInBytes()); michael@0: VALIDATE(true); michael@0: michael@0: if (fGpu->caps()->bufferLockSupport() && michael@0: flushSize > GR_GEOM_BUFFER_LOCK_THRESHOLD) { michael@0: void* data = buffer->lock(); michael@0: if (NULL != data) { michael@0: memcpy(data, fBufferPtr, flushSize); michael@0: buffer->unlock(); michael@0: return; michael@0: } michael@0: } michael@0: buffer->updateData(fBufferPtr, flushSize); michael@0: VALIDATE(true); michael@0: } michael@0: michael@0: GrGeometryBuffer* GrBufferAllocPool::createBuffer(size_t size) { michael@0: if (kIndex_BufferType == fBufferType) { michael@0: return fGpu->createIndexBuffer(size, true); michael@0: } else { michael@0: SkASSERT(kVertex_BufferType == fBufferType); michael@0: return fGpu->createVertexBuffer(size, true); michael@0: } michael@0: } michael@0: michael@0: //////////////////////////////////////////////////////////////////////////////// michael@0: michael@0: GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu, michael@0: bool frequentResetHint, michael@0: size_t bufferSize, michael@0: int preallocBufferCnt) michael@0: : GrBufferAllocPool(gpu, michael@0: kVertex_BufferType, michael@0: frequentResetHint, michael@0: bufferSize, michael@0: preallocBufferCnt) { michael@0: } michael@0: michael@0: void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize, michael@0: int vertexCount, michael@0: const GrVertexBuffer** buffer, michael@0: int* startVertex) { michael@0: michael@0: SkASSERT(vertexCount >= 0); michael@0: SkASSERT(NULL != buffer); michael@0: SkASSERT(NULL != startVertex); michael@0: michael@0: size_t offset = 0; // assign to suppress warning michael@0: const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning michael@0: void* ptr = INHERITED::makeSpace(vertexSize * vertexCount, michael@0: vertexSize, michael@0: &geomBuffer, michael@0: &offset); michael@0: michael@0: *buffer = (const GrVertexBuffer*) geomBuffer; michael@0: SkASSERT(0 == offset % vertexSize); michael@0: *startVertex = static_cast(offset / vertexSize); michael@0: return ptr; michael@0: } michael@0: michael@0: bool GrVertexBufferAllocPool::appendVertices(size_t vertexSize, michael@0: int vertexCount, michael@0: const void* vertices, michael@0: const GrVertexBuffer** buffer, michael@0: int* startVertex) { michael@0: void* space = makeSpace(vertexSize, vertexCount, buffer, startVertex); michael@0: if (NULL != space) { michael@0: memcpy(space, michael@0: vertices, michael@0: vertexSize * vertexCount); michael@0: return true; michael@0: } else { michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: int GrVertexBufferAllocPool::preallocatedBufferVertices(size_t vertexSize) const { michael@0: return static_cast(INHERITED::preallocatedBufferSize() / vertexSize); michael@0: } michael@0: michael@0: int GrVertexBufferAllocPool::currentBufferVertices(size_t vertexSize) const { michael@0: return currentBufferItems(vertexSize); michael@0: } michael@0: michael@0: //////////////////////////////////////////////////////////////////////////////// michael@0: michael@0: GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu, michael@0: bool frequentResetHint, michael@0: size_t bufferSize, michael@0: int preallocBufferCnt) michael@0: : GrBufferAllocPool(gpu, michael@0: kIndex_BufferType, michael@0: frequentResetHint, michael@0: bufferSize, michael@0: preallocBufferCnt) { michael@0: } michael@0: michael@0: void* GrIndexBufferAllocPool::makeSpace(int indexCount, michael@0: const GrIndexBuffer** buffer, michael@0: int* startIndex) { michael@0: michael@0: SkASSERT(indexCount >= 0); michael@0: SkASSERT(NULL != buffer); michael@0: SkASSERT(NULL != startIndex); michael@0: michael@0: size_t offset = 0; // assign to suppress warning michael@0: const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning michael@0: void* ptr = INHERITED::makeSpace(indexCount * sizeof(uint16_t), michael@0: sizeof(uint16_t), michael@0: &geomBuffer, michael@0: &offset); michael@0: michael@0: *buffer = (const GrIndexBuffer*) geomBuffer; michael@0: SkASSERT(0 == offset % sizeof(uint16_t)); michael@0: *startIndex = static_cast(offset / sizeof(uint16_t)); michael@0: return ptr; michael@0: } michael@0: michael@0: bool GrIndexBufferAllocPool::appendIndices(int indexCount, michael@0: const void* indices, michael@0: const GrIndexBuffer** buffer, michael@0: int* startIndex) { michael@0: void* space = makeSpace(indexCount, buffer, startIndex); michael@0: if (NULL != space) { michael@0: memcpy(space, indices, sizeof(uint16_t) * indexCount); michael@0: return true; michael@0: } else { michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: int GrIndexBufferAllocPool::preallocatedBufferIndices() const { michael@0: return static_cast(INHERITED::preallocatedBufferSize() / sizeof(uint16_t)); michael@0: } michael@0: michael@0: int GrIndexBufferAllocPool::currentBufferIndices() const { michael@0: return currentBufferItems(sizeof(uint16_t)); michael@0: }