gfx/skia/trunk/src/gpu/GrBufferAllocPool.cpp

Sat, 03 Jan 2015 20:18:00 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Sat, 03 Jan 2015 20:18:00 +0100
branch
TOR_BUG_3246
changeset 7
129ffea94266
permissions
-rw-r--r--

Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.

michael@0 1
michael@0 2 /*
michael@0 3 * Copyright 2010 Google Inc.
michael@0 4 *
michael@0 5 * Use of this source code is governed by a BSD-style license that can be
michael@0 6 * found in the LICENSE file.
michael@0 7 */
michael@0 8
michael@0 9
michael@0 10 #include "GrBufferAllocPool.h"
michael@0 11 #include "GrDrawTargetCaps.h"
michael@0 12 #include "GrGpu.h"
michael@0 13 #include "GrIndexBuffer.h"
michael@0 14 #include "GrTypes.h"
michael@0 15 #include "GrVertexBuffer.h"
michael@0 16
michael@0 17 #ifdef SK_DEBUG
michael@0 18 #define VALIDATE validate
michael@0 19 #else
michael@0 20 static void VALIDATE(bool = false) {}
michael@0 21 #endif
michael@0 22
michael@0 23 // page size
michael@0 24 #define GrBufferAllocPool_MIN_BLOCK_SIZE ((size_t)1 << 12)
michael@0 25
michael@0 26 GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
michael@0 27 BufferType bufferType,
michael@0 28 bool frequentResetHint,
michael@0 29 size_t blockSize,
michael@0 30 int preallocBufferCnt) :
michael@0 31 fBlocks(GrMax(8, 2*preallocBufferCnt)) {
michael@0 32
michael@0 33 SkASSERT(NULL != gpu);
michael@0 34 fGpu = gpu;
michael@0 35 fGpu->ref();
michael@0 36 fGpuIsReffed = true;
michael@0 37
michael@0 38 fBufferType = bufferType;
michael@0 39 fFrequentResetHint = frequentResetHint;
michael@0 40 fBufferPtr = NULL;
michael@0 41 fMinBlockSize = GrMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize);
michael@0 42
michael@0 43 fBytesInUse = 0;
michael@0 44
michael@0 45 fPreallocBuffersInUse = 0;
michael@0 46 fPreallocBufferStartIdx = 0;
michael@0 47 for (int i = 0; i < preallocBufferCnt; ++i) {
michael@0 48 GrGeometryBuffer* buffer = this->createBuffer(fMinBlockSize);
michael@0 49 if (NULL != buffer) {
michael@0 50 *fPreallocBuffers.append() = buffer;
michael@0 51 }
michael@0 52 }
michael@0 53 }
michael@0 54
michael@0 55 GrBufferAllocPool::~GrBufferAllocPool() {
michael@0 56 VALIDATE();
michael@0 57 if (fBlocks.count()) {
michael@0 58 GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
michael@0 59 if (buffer->isLocked()) {
michael@0 60 buffer->unlock();
michael@0 61 }
michael@0 62 }
michael@0 63 while (!fBlocks.empty()) {
michael@0 64 destroyBlock();
michael@0 65 }
michael@0 66 fPreallocBuffers.unrefAll();
michael@0 67 releaseGpuRef();
michael@0 68 }
michael@0 69
michael@0 70 void GrBufferAllocPool::releaseGpuRef() {
michael@0 71 if (fGpuIsReffed) {
michael@0 72 fGpu->unref();
michael@0 73 fGpuIsReffed = false;
michael@0 74 }
michael@0 75 }
michael@0 76
michael@0 77 void GrBufferAllocPool::reset() {
michael@0 78 VALIDATE();
michael@0 79 fBytesInUse = 0;
michael@0 80 if (fBlocks.count()) {
michael@0 81 GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
michael@0 82 if (buffer->isLocked()) {
michael@0 83 buffer->unlock();
michael@0 84 }
michael@0 85 }
michael@0 86 // fPreallocBuffersInUse will be decremented down to zero in the while loop
michael@0 87 int preallocBuffersInUse = fPreallocBuffersInUse;
michael@0 88 while (!fBlocks.empty()) {
michael@0 89 this->destroyBlock();
michael@0 90 }
michael@0 91 if (fPreallocBuffers.count()) {
michael@0 92 // must set this after above loop.
michael@0 93 fPreallocBufferStartIdx = (fPreallocBufferStartIdx +
michael@0 94 preallocBuffersInUse) %
michael@0 95 fPreallocBuffers.count();
michael@0 96 }
michael@0 97 // we may have created a large cpu mirror of a large VB. Reset the size
michael@0 98 // to match our pre-allocated VBs.
michael@0 99 fCpuData.reset(fMinBlockSize);
michael@0 100 SkASSERT(0 == fPreallocBuffersInUse);
michael@0 101 VALIDATE();
michael@0 102 }
michael@0 103
michael@0 104 void GrBufferAllocPool::unlock() {
michael@0 105 VALIDATE();
michael@0 106
michael@0 107 if (NULL != fBufferPtr) {
michael@0 108 BufferBlock& block = fBlocks.back();
michael@0 109 if (block.fBuffer->isLocked()) {
michael@0 110 block.fBuffer->unlock();
michael@0 111 } else {
michael@0 112 size_t flushSize = block.fBuffer->sizeInBytes() - block.fBytesFree;
michael@0 113 flushCpuData(fBlocks.back().fBuffer, flushSize);
michael@0 114 }
michael@0 115 fBufferPtr = NULL;
michael@0 116 }
michael@0 117 VALIDATE();
michael@0 118 }
michael@0 119
michael@0 120 #ifdef SK_DEBUG
michael@0 121 void GrBufferAllocPool::validate(bool unusedBlockAllowed) const {
michael@0 122 if (NULL != fBufferPtr) {
michael@0 123 SkASSERT(!fBlocks.empty());
michael@0 124 if (fBlocks.back().fBuffer->isLocked()) {
michael@0 125 GrGeometryBuffer* buf = fBlocks.back().fBuffer;
michael@0 126 SkASSERT(buf->lockPtr() == fBufferPtr);
michael@0 127 } else {
michael@0 128 SkASSERT(fCpuData.get() == fBufferPtr);
michael@0 129 }
michael@0 130 } else {
michael@0 131 SkASSERT(fBlocks.empty() || !fBlocks.back().fBuffer->isLocked());
michael@0 132 }
michael@0 133 size_t bytesInUse = 0;
michael@0 134 for (int i = 0; i < fBlocks.count() - 1; ++i) {
michael@0 135 SkASSERT(!fBlocks[i].fBuffer->isLocked());
michael@0 136 }
michael@0 137 for (int i = 0; i < fBlocks.count(); ++i) {
michael@0 138 size_t bytes = fBlocks[i].fBuffer->sizeInBytes() - fBlocks[i].fBytesFree;
michael@0 139 bytesInUse += bytes;
michael@0 140 SkASSERT(bytes || unusedBlockAllowed);
michael@0 141 }
michael@0 142
michael@0 143 SkASSERT(bytesInUse == fBytesInUse);
michael@0 144 if (unusedBlockAllowed) {
michael@0 145 SkASSERT((fBytesInUse && !fBlocks.empty()) ||
michael@0 146 (!fBytesInUse && (fBlocks.count() < 2)));
michael@0 147 } else {
michael@0 148 SkASSERT((0 == fBytesInUse) == fBlocks.empty());
michael@0 149 }
michael@0 150 }
michael@0 151 #endif
michael@0 152
michael@0 153 void* GrBufferAllocPool::makeSpace(size_t size,
michael@0 154 size_t alignment,
michael@0 155 const GrGeometryBuffer** buffer,
michael@0 156 size_t* offset) {
michael@0 157 VALIDATE();
michael@0 158
michael@0 159 SkASSERT(NULL != buffer);
michael@0 160 SkASSERT(NULL != offset);
michael@0 161
michael@0 162 if (NULL != fBufferPtr) {
michael@0 163 BufferBlock& back = fBlocks.back();
michael@0 164 size_t usedBytes = back.fBuffer->sizeInBytes() - back.fBytesFree;
michael@0 165 size_t pad = GrSizeAlignUpPad(usedBytes,
michael@0 166 alignment);
michael@0 167 if ((size + pad) <= back.fBytesFree) {
michael@0 168 usedBytes += pad;
michael@0 169 *offset = usedBytes;
michael@0 170 *buffer = back.fBuffer;
michael@0 171 back.fBytesFree -= size + pad;
michael@0 172 fBytesInUse += size + pad;
michael@0 173 VALIDATE();
michael@0 174 return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes);
michael@0 175 }
michael@0 176 }
michael@0 177
michael@0 178 // We could honor the space request using by a partial update of the current
michael@0 179 // VB (if there is room). But we don't currently use draw calls to GL that
michael@0 180 // allow the driver to know that previously issued draws won't read from
michael@0 181 // the part of the buffer we update. Also, the GL buffer implementation
michael@0 182 // may be cheating on the actual buffer size by shrinking the buffer on
michael@0 183 // updateData() if the amount of data passed is less than the full buffer
michael@0 184 // size.
michael@0 185
michael@0 186 if (!createBlock(size)) {
michael@0 187 return NULL;
michael@0 188 }
michael@0 189 SkASSERT(NULL != fBufferPtr);
michael@0 190
michael@0 191 *offset = 0;
michael@0 192 BufferBlock& back = fBlocks.back();
michael@0 193 *buffer = back.fBuffer;
michael@0 194 back.fBytesFree -= size;
michael@0 195 fBytesInUse += size;
michael@0 196 VALIDATE();
michael@0 197 return fBufferPtr;
michael@0 198 }
michael@0 199
michael@0 200 int GrBufferAllocPool::currentBufferItems(size_t itemSize) const {
michael@0 201 VALIDATE();
michael@0 202 if (NULL != fBufferPtr) {
michael@0 203 const BufferBlock& back = fBlocks.back();
michael@0 204 size_t usedBytes = back.fBuffer->sizeInBytes() - back.fBytesFree;
michael@0 205 size_t pad = GrSizeAlignUpPad(usedBytes, itemSize);
michael@0 206 return static_cast<int>((back.fBytesFree - pad) / itemSize);
michael@0 207 } else if (fPreallocBuffersInUse < fPreallocBuffers.count()) {
michael@0 208 return static_cast<int>(fMinBlockSize / itemSize);
michael@0 209 }
michael@0 210 return 0;
michael@0 211 }
michael@0 212
michael@0 213 int GrBufferAllocPool::preallocatedBuffersRemaining() const {
michael@0 214 return fPreallocBuffers.count() - fPreallocBuffersInUse;
michael@0 215 }
michael@0 216
michael@0 217 int GrBufferAllocPool::preallocatedBufferCount() const {
michael@0 218 return fPreallocBuffers.count();
michael@0 219 }
michael@0 220
michael@0 221 void GrBufferAllocPool::putBack(size_t bytes) {
michael@0 222 VALIDATE();
michael@0 223
michael@0 224 // if the putBack unwinds all the preallocated buffers then we will
michael@0 225 // advance the starting index. As blocks are destroyed fPreallocBuffersInUse
michael@0 226 // will be decremented. I will reach zero if all blocks using preallocated
michael@0 227 // buffers are released.
michael@0 228 int preallocBuffersInUse = fPreallocBuffersInUse;
michael@0 229
michael@0 230 while (bytes) {
michael@0 231 // caller shouldnt try to put back more than they've taken
michael@0 232 SkASSERT(!fBlocks.empty());
michael@0 233 BufferBlock& block = fBlocks.back();
michael@0 234 size_t bytesUsed = block.fBuffer->sizeInBytes() - block.fBytesFree;
michael@0 235 if (bytes >= bytesUsed) {
michael@0 236 bytes -= bytesUsed;
michael@0 237 fBytesInUse -= bytesUsed;
michael@0 238 // if we locked a vb to satisfy the make space and we're releasing
michael@0 239 // beyond it, then unlock it.
michael@0 240 if (block.fBuffer->isLocked()) {
michael@0 241 block.fBuffer->unlock();
michael@0 242 }
michael@0 243 this->destroyBlock();
michael@0 244 } else {
michael@0 245 block.fBytesFree += bytes;
michael@0 246 fBytesInUse -= bytes;
michael@0 247 bytes = 0;
michael@0 248 break;
michael@0 249 }
michael@0 250 }
michael@0 251 if (!fPreallocBuffersInUse && fPreallocBuffers.count()) {
michael@0 252 fPreallocBufferStartIdx = (fPreallocBufferStartIdx +
michael@0 253 preallocBuffersInUse) %
michael@0 254 fPreallocBuffers.count();
michael@0 255 }
michael@0 256 VALIDATE();
michael@0 257 }
michael@0 258
michael@0 259 bool GrBufferAllocPool::createBlock(size_t requestSize) {
michael@0 260
michael@0 261 size_t size = GrMax(requestSize, fMinBlockSize);
michael@0 262 SkASSERT(size >= GrBufferAllocPool_MIN_BLOCK_SIZE);
michael@0 263
michael@0 264 VALIDATE();
michael@0 265
michael@0 266 BufferBlock& block = fBlocks.push_back();
michael@0 267
michael@0 268 if (size == fMinBlockSize &&
michael@0 269 fPreallocBuffersInUse < fPreallocBuffers.count()) {
michael@0 270
michael@0 271 uint32_t nextBuffer = (fPreallocBuffersInUse +
michael@0 272 fPreallocBufferStartIdx) %
michael@0 273 fPreallocBuffers.count();
michael@0 274 block.fBuffer = fPreallocBuffers[nextBuffer];
michael@0 275 block.fBuffer->ref();
michael@0 276 ++fPreallocBuffersInUse;
michael@0 277 } else {
michael@0 278 block.fBuffer = this->createBuffer(size);
michael@0 279 if (NULL == block.fBuffer) {
michael@0 280 fBlocks.pop_back();
michael@0 281 return false;
michael@0 282 }
michael@0 283 }
michael@0 284
michael@0 285 block.fBytesFree = size;
michael@0 286 if (NULL != fBufferPtr) {
michael@0 287 SkASSERT(fBlocks.count() > 1);
michael@0 288 BufferBlock& prev = fBlocks.fromBack(1);
michael@0 289 if (prev.fBuffer->isLocked()) {
michael@0 290 prev.fBuffer->unlock();
michael@0 291 } else {
michael@0 292 flushCpuData(prev.fBuffer,
michael@0 293 prev.fBuffer->sizeInBytes() - prev.fBytesFree);
michael@0 294 }
michael@0 295 fBufferPtr = NULL;
michael@0 296 }
michael@0 297
michael@0 298 SkASSERT(NULL == fBufferPtr);
michael@0 299
michael@0 300 // If the buffer is CPU-backed we lock it because it is free to do so and saves a copy.
michael@0 301 // Otherwise when buffer locking is supported:
michael@0 302 // a) If the frequently reset hint is set we only lock when the requested size meets a
michael@0 303 // threshold (since we don't expect it is likely that we will see more vertex data)
michael@0 304 // b) If the hint is not set we lock if the buffer size is greater than the threshold.
michael@0 305 bool attemptLock = block.fBuffer->isCPUBacked();
michael@0 306 if (!attemptLock && fGpu->caps()->bufferLockSupport()) {
michael@0 307 if (fFrequentResetHint) {
michael@0 308 attemptLock = requestSize > GR_GEOM_BUFFER_LOCK_THRESHOLD;
michael@0 309 } else {
michael@0 310 attemptLock = size > GR_GEOM_BUFFER_LOCK_THRESHOLD;
michael@0 311 }
michael@0 312 }
michael@0 313
michael@0 314 if (attemptLock) {
michael@0 315 fBufferPtr = block.fBuffer->lock();
michael@0 316 }
michael@0 317
michael@0 318 if (NULL == fBufferPtr) {
michael@0 319 fBufferPtr = fCpuData.reset(size);
michael@0 320 }
michael@0 321
michael@0 322 VALIDATE(true);
michael@0 323
michael@0 324 return true;
michael@0 325 }
michael@0 326
michael@0 327 void GrBufferAllocPool::destroyBlock() {
michael@0 328 SkASSERT(!fBlocks.empty());
michael@0 329
michael@0 330 BufferBlock& block = fBlocks.back();
michael@0 331 if (fPreallocBuffersInUse > 0) {
michael@0 332 uint32_t prevPreallocBuffer = (fPreallocBuffersInUse +
michael@0 333 fPreallocBufferStartIdx +
michael@0 334 (fPreallocBuffers.count() - 1)) %
michael@0 335 fPreallocBuffers.count();
michael@0 336 if (block.fBuffer == fPreallocBuffers[prevPreallocBuffer]) {
michael@0 337 --fPreallocBuffersInUse;
michael@0 338 }
michael@0 339 }
michael@0 340 SkASSERT(!block.fBuffer->isLocked());
michael@0 341 block.fBuffer->unref();
michael@0 342 fBlocks.pop_back();
michael@0 343 fBufferPtr = NULL;
michael@0 344 }
michael@0 345
michael@0 346 void GrBufferAllocPool::flushCpuData(GrGeometryBuffer* buffer,
michael@0 347 size_t flushSize) {
michael@0 348 SkASSERT(NULL != buffer);
michael@0 349 SkASSERT(!buffer->isLocked());
michael@0 350 SkASSERT(fCpuData.get() == fBufferPtr);
michael@0 351 SkASSERT(flushSize <= buffer->sizeInBytes());
michael@0 352 VALIDATE(true);
michael@0 353
michael@0 354 if (fGpu->caps()->bufferLockSupport() &&
michael@0 355 flushSize > GR_GEOM_BUFFER_LOCK_THRESHOLD) {
michael@0 356 void* data = buffer->lock();
michael@0 357 if (NULL != data) {
michael@0 358 memcpy(data, fBufferPtr, flushSize);
michael@0 359 buffer->unlock();
michael@0 360 return;
michael@0 361 }
michael@0 362 }
michael@0 363 buffer->updateData(fBufferPtr, flushSize);
michael@0 364 VALIDATE(true);
michael@0 365 }
michael@0 366
michael@0 367 GrGeometryBuffer* GrBufferAllocPool::createBuffer(size_t size) {
michael@0 368 if (kIndex_BufferType == fBufferType) {
michael@0 369 return fGpu->createIndexBuffer(size, true);
michael@0 370 } else {
michael@0 371 SkASSERT(kVertex_BufferType == fBufferType);
michael@0 372 return fGpu->createVertexBuffer(size, true);
michael@0 373 }
michael@0 374 }
michael@0 375
michael@0 376 ////////////////////////////////////////////////////////////////////////////////
michael@0 377
michael@0 378 GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu,
michael@0 379 bool frequentResetHint,
michael@0 380 size_t bufferSize,
michael@0 381 int preallocBufferCnt)
michael@0 382 : GrBufferAllocPool(gpu,
michael@0 383 kVertex_BufferType,
michael@0 384 frequentResetHint,
michael@0 385 bufferSize,
michael@0 386 preallocBufferCnt) {
michael@0 387 }
michael@0 388
michael@0 389 void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
michael@0 390 int vertexCount,
michael@0 391 const GrVertexBuffer** buffer,
michael@0 392 int* startVertex) {
michael@0 393
michael@0 394 SkASSERT(vertexCount >= 0);
michael@0 395 SkASSERT(NULL != buffer);
michael@0 396 SkASSERT(NULL != startVertex);
michael@0 397
michael@0 398 size_t offset = 0; // assign to suppress warning
michael@0 399 const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning
michael@0 400 void* ptr = INHERITED::makeSpace(vertexSize * vertexCount,
michael@0 401 vertexSize,
michael@0 402 &geomBuffer,
michael@0 403 &offset);
michael@0 404
michael@0 405 *buffer = (const GrVertexBuffer*) geomBuffer;
michael@0 406 SkASSERT(0 == offset % vertexSize);
michael@0 407 *startVertex = static_cast<int>(offset / vertexSize);
michael@0 408 return ptr;
michael@0 409 }
michael@0 410
michael@0 411 bool GrVertexBufferAllocPool::appendVertices(size_t vertexSize,
michael@0 412 int vertexCount,
michael@0 413 const void* vertices,
michael@0 414 const GrVertexBuffer** buffer,
michael@0 415 int* startVertex) {
michael@0 416 void* space = makeSpace(vertexSize, vertexCount, buffer, startVertex);
michael@0 417 if (NULL != space) {
michael@0 418 memcpy(space,
michael@0 419 vertices,
michael@0 420 vertexSize * vertexCount);
michael@0 421 return true;
michael@0 422 } else {
michael@0 423 return false;
michael@0 424 }
michael@0 425 }
michael@0 426
michael@0 427 int GrVertexBufferAllocPool::preallocatedBufferVertices(size_t vertexSize) const {
michael@0 428 return static_cast<int>(INHERITED::preallocatedBufferSize() / vertexSize);
michael@0 429 }
michael@0 430
michael@0 431 int GrVertexBufferAllocPool::currentBufferVertices(size_t vertexSize) const {
michael@0 432 return currentBufferItems(vertexSize);
michael@0 433 }
michael@0 434
michael@0 435 ////////////////////////////////////////////////////////////////////////////////
michael@0 436
michael@0 437 GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu,
michael@0 438 bool frequentResetHint,
michael@0 439 size_t bufferSize,
michael@0 440 int preallocBufferCnt)
michael@0 441 : GrBufferAllocPool(gpu,
michael@0 442 kIndex_BufferType,
michael@0 443 frequentResetHint,
michael@0 444 bufferSize,
michael@0 445 preallocBufferCnt) {
michael@0 446 }
michael@0 447
michael@0 448 void* GrIndexBufferAllocPool::makeSpace(int indexCount,
michael@0 449 const GrIndexBuffer** buffer,
michael@0 450 int* startIndex) {
michael@0 451
michael@0 452 SkASSERT(indexCount >= 0);
michael@0 453 SkASSERT(NULL != buffer);
michael@0 454 SkASSERT(NULL != startIndex);
michael@0 455
michael@0 456 size_t offset = 0; // assign to suppress warning
michael@0 457 const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning
michael@0 458 void* ptr = INHERITED::makeSpace(indexCount * sizeof(uint16_t),
michael@0 459 sizeof(uint16_t),
michael@0 460 &geomBuffer,
michael@0 461 &offset);
michael@0 462
michael@0 463 *buffer = (const GrIndexBuffer*) geomBuffer;
michael@0 464 SkASSERT(0 == offset % sizeof(uint16_t));
michael@0 465 *startIndex = static_cast<int>(offset / sizeof(uint16_t));
michael@0 466 return ptr;
michael@0 467 }
michael@0 468
michael@0 469 bool GrIndexBufferAllocPool::appendIndices(int indexCount,
michael@0 470 const void* indices,
michael@0 471 const GrIndexBuffer** buffer,
michael@0 472 int* startIndex) {
michael@0 473 void* space = makeSpace(indexCount, buffer, startIndex);
michael@0 474 if (NULL != space) {
michael@0 475 memcpy(space, indices, sizeof(uint16_t) * indexCount);
michael@0 476 return true;
michael@0 477 } else {
michael@0 478 return false;
michael@0 479 }
michael@0 480 }
michael@0 481
michael@0 482 int GrIndexBufferAllocPool::preallocatedBufferIndices() const {
michael@0 483 return static_cast<int>(INHERITED::preallocatedBufferSize() / sizeof(uint16_t));
michael@0 484 }
michael@0 485
michael@0 486 int GrIndexBufferAllocPool::currentBufferIndices() const {
michael@0 487 return currentBufferItems(sizeof(uint16_t));
michael@0 488 }

mercurial