gfx/skia/trunk/src/gpu/GrBufferAllocPool.cpp

Sat, 03 Jan 2015 20:18:00 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Sat, 03 Jan 2015 20:18:00 +0100
branch
TOR_BUG_3246
changeset 7
129ffea94266
permissions
-rw-r--r--

Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.

     2 /*
     3  * Copyright 2010 Google Inc.
     4  *
     5  * Use of this source code is governed by a BSD-style license that can be
     6  * found in the LICENSE file.
     7  */
    10 #include "GrBufferAllocPool.h"
    11 #include "GrDrawTargetCaps.h"
    12 #include "GrGpu.h"
    13 #include "GrIndexBuffer.h"
    14 #include "GrTypes.h"
    15 #include "GrVertexBuffer.h"
    17 #ifdef SK_DEBUG
    18     #define VALIDATE validate
    19 #else
    20     static void VALIDATE(bool = false) {}
    21 #endif
    23 // page size
    24 #define GrBufferAllocPool_MIN_BLOCK_SIZE ((size_t)1 << 12)
    26 GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
    27                                      BufferType bufferType,
    28                                      bool frequentResetHint,
    29                                      size_t blockSize,
    30                                      int preallocBufferCnt) :
    31         fBlocks(GrMax(8, 2*preallocBufferCnt)) {
    33     SkASSERT(NULL != gpu);
    34     fGpu = gpu;
    35     fGpu->ref();
    36     fGpuIsReffed = true;
    38     fBufferType = bufferType;
    39     fFrequentResetHint = frequentResetHint;
    40     fBufferPtr = NULL;
    41     fMinBlockSize = GrMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize);
    43     fBytesInUse = 0;
    45     fPreallocBuffersInUse = 0;
    46     fPreallocBufferStartIdx = 0;
    47     for (int i = 0; i < preallocBufferCnt; ++i) {
    48         GrGeometryBuffer* buffer = this->createBuffer(fMinBlockSize);
    49         if (NULL != buffer) {
    50             *fPreallocBuffers.append() = buffer;
    51         }
    52     }
    53 }
    55 GrBufferAllocPool::~GrBufferAllocPool() {
    56     VALIDATE();
    57     if (fBlocks.count()) {
    58         GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
    59         if (buffer->isLocked()) {
    60             buffer->unlock();
    61         }
    62     }
    63     while (!fBlocks.empty()) {
    64         destroyBlock();
    65     }
    66     fPreallocBuffers.unrefAll();
    67     releaseGpuRef();
    68 }
    70 void GrBufferAllocPool::releaseGpuRef() {
    71     if (fGpuIsReffed) {
    72         fGpu->unref();
    73         fGpuIsReffed = false;
    74     }
    75 }
    77 void GrBufferAllocPool::reset() {
    78     VALIDATE();
    79     fBytesInUse = 0;
    80     if (fBlocks.count()) {
    81         GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
    82         if (buffer->isLocked()) {
    83             buffer->unlock();
    84         }
    85     }
    86     // fPreallocBuffersInUse will be decremented down to zero in the while loop
    87     int preallocBuffersInUse = fPreallocBuffersInUse;
    88     while (!fBlocks.empty()) {
    89         this->destroyBlock();
    90     }
    91     if (fPreallocBuffers.count()) {
    92         // must set this after above loop.
    93         fPreallocBufferStartIdx = (fPreallocBufferStartIdx +
    94                                    preallocBuffersInUse) %
    95                                   fPreallocBuffers.count();
    96     }
    97     // we may have created a large cpu mirror of a large VB. Reset the size
    98     // to match our pre-allocated VBs.
    99     fCpuData.reset(fMinBlockSize);
   100     SkASSERT(0 == fPreallocBuffersInUse);
   101     VALIDATE();
   102 }
   104 void GrBufferAllocPool::unlock() {
   105     VALIDATE();
   107     if (NULL != fBufferPtr) {
   108         BufferBlock& block = fBlocks.back();
   109         if (block.fBuffer->isLocked()) {
   110             block.fBuffer->unlock();
   111         } else {
   112             size_t flushSize = block.fBuffer->sizeInBytes() - block.fBytesFree;
   113             flushCpuData(fBlocks.back().fBuffer, flushSize);
   114         }
   115         fBufferPtr = NULL;
   116     }
   117     VALIDATE();
   118 }
   120 #ifdef SK_DEBUG
   121 void GrBufferAllocPool::validate(bool unusedBlockAllowed) const {
   122     if (NULL != fBufferPtr) {
   123         SkASSERT(!fBlocks.empty());
   124         if (fBlocks.back().fBuffer->isLocked()) {
   125             GrGeometryBuffer* buf = fBlocks.back().fBuffer;
   126             SkASSERT(buf->lockPtr() == fBufferPtr);
   127         } else {
   128             SkASSERT(fCpuData.get() == fBufferPtr);
   129         }
   130     } else {
   131         SkASSERT(fBlocks.empty() || !fBlocks.back().fBuffer->isLocked());
   132     }
   133     size_t bytesInUse = 0;
   134     for (int i = 0; i < fBlocks.count() - 1; ++i) {
   135         SkASSERT(!fBlocks[i].fBuffer->isLocked());
   136     }
   137     for (int i = 0; i < fBlocks.count(); ++i) {
   138         size_t bytes = fBlocks[i].fBuffer->sizeInBytes() - fBlocks[i].fBytesFree;
   139         bytesInUse += bytes;
   140         SkASSERT(bytes || unusedBlockAllowed);
   141     }
   143     SkASSERT(bytesInUse == fBytesInUse);
   144     if (unusedBlockAllowed) {
   145         SkASSERT((fBytesInUse && !fBlocks.empty()) ||
   146                  (!fBytesInUse && (fBlocks.count() < 2)));
   147     } else {
   148         SkASSERT((0 == fBytesInUse) == fBlocks.empty());
   149     }
   150 }
   151 #endif
   153 void* GrBufferAllocPool::makeSpace(size_t size,
   154                                    size_t alignment,
   155                                    const GrGeometryBuffer** buffer,
   156                                    size_t* offset) {
   157     VALIDATE();
   159     SkASSERT(NULL != buffer);
   160     SkASSERT(NULL != offset);
   162     if (NULL != fBufferPtr) {
   163         BufferBlock& back = fBlocks.back();
   164         size_t usedBytes = back.fBuffer->sizeInBytes() - back.fBytesFree;
   165         size_t pad = GrSizeAlignUpPad(usedBytes,
   166                                       alignment);
   167         if ((size + pad) <= back.fBytesFree) {
   168             usedBytes += pad;
   169             *offset = usedBytes;
   170             *buffer = back.fBuffer;
   171             back.fBytesFree -= size + pad;
   172             fBytesInUse += size + pad;
   173             VALIDATE();
   174             return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes);
   175         }
   176     }
   178     // We could honor the space request using by a partial update of the current
   179     // VB (if there is room). But we don't currently use draw calls to GL that
   180     // allow the driver to know that previously issued draws won't read from
   181     // the part of the buffer we update. Also, the GL buffer implementation
   182     // may be cheating on the actual buffer size by shrinking the buffer on
   183     // updateData() if the amount of data passed is less than the full buffer
   184     // size.
   186     if (!createBlock(size)) {
   187         return NULL;
   188     }
   189     SkASSERT(NULL != fBufferPtr);
   191     *offset = 0;
   192     BufferBlock& back = fBlocks.back();
   193     *buffer = back.fBuffer;
   194     back.fBytesFree -= size;
   195     fBytesInUse += size;
   196     VALIDATE();
   197     return fBufferPtr;
   198 }
   200 int GrBufferAllocPool::currentBufferItems(size_t itemSize) const {
   201     VALIDATE();
   202     if (NULL != fBufferPtr) {
   203         const BufferBlock& back = fBlocks.back();
   204         size_t usedBytes = back.fBuffer->sizeInBytes() - back.fBytesFree;
   205         size_t pad = GrSizeAlignUpPad(usedBytes, itemSize);
   206         return static_cast<int>((back.fBytesFree - pad) / itemSize);
   207     } else if (fPreallocBuffersInUse < fPreallocBuffers.count()) {
   208         return static_cast<int>(fMinBlockSize / itemSize);
   209     }
   210     return 0;
   211 }
   213 int GrBufferAllocPool::preallocatedBuffersRemaining() const {
   214     return fPreallocBuffers.count() - fPreallocBuffersInUse;
   215 }
   217 int GrBufferAllocPool::preallocatedBufferCount() const {
   218     return fPreallocBuffers.count();
   219 }
   221 void GrBufferAllocPool::putBack(size_t bytes) {
   222     VALIDATE();
   224     // if the putBack unwinds all the preallocated buffers then we will
   225     // advance the starting index. As blocks are destroyed fPreallocBuffersInUse
   226     // will be decremented. I will reach zero if all blocks using preallocated
   227     // buffers are released.
   228     int preallocBuffersInUse = fPreallocBuffersInUse;
   230     while (bytes) {
   231         // caller shouldnt try to put back more than they've taken
   232         SkASSERT(!fBlocks.empty());
   233         BufferBlock& block = fBlocks.back();
   234         size_t bytesUsed = block.fBuffer->sizeInBytes() - block.fBytesFree;
   235         if (bytes >= bytesUsed) {
   236             bytes -= bytesUsed;
   237             fBytesInUse -= bytesUsed;
   238             // if we locked a vb to satisfy the make space and we're releasing
   239             // beyond it, then unlock it.
   240             if (block.fBuffer->isLocked()) {
   241                 block.fBuffer->unlock();
   242             }
   243             this->destroyBlock();
   244         } else {
   245             block.fBytesFree += bytes;
   246             fBytesInUse -= bytes;
   247             bytes = 0;
   248             break;
   249         }
   250     }
   251     if (!fPreallocBuffersInUse && fPreallocBuffers.count()) {
   252             fPreallocBufferStartIdx = (fPreallocBufferStartIdx +
   253                                        preallocBuffersInUse) %
   254                                       fPreallocBuffers.count();
   255     }
   256     VALIDATE();
   257 }
   259 bool GrBufferAllocPool::createBlock(size_t requestSize) {
   261     size_t size = GrMax(requestSize, fMinBlockSize);
   262     SkASSERT(size >= GrBufferAllocPool_MIN_BLOCK_SIZE);
   264     VALIDATE();
   266     BufferBlock& block = fBlocks.push_back();
   268     if (size == fMinBlockSize &&
   269         fPreallocBuffersInUse < fPreallocBuffers.count()) {
   271         uint32_t nextBuffer = (fPreallocBuffersInUse +
   272                                fPreallocBufferStartIdx) %
   273                               fPreallocBuffers.count();
   274         block.fBuffer = fPreallocBuffers[nextBuffer];
   275         block.fBuffer->ref();
   276         ++fPreallocBuffersInUse;
   277     } else {
   278         block.fBuffer = this->createBuffer(size);
   279         if (NULL == block.fBuffer) {
   280             fBlocks.pop_back();
   281             return false;
   282         }
   283     }
   285     block.fBytesFree = size;
   286     if (NULL != fBufferPtr) {
   287         SkASSERT(fBlocks.count() > 1);
   288         BufferBlock& prev = fBlocks.fromBack(1);
   289         if (prev.fBuffer->isLocked()) {
   290             prev.fBuffer->unlock();
   291         } else {
   292             flushCpuData(prev.fBuffer,
   293                          prev.fBuffer->sizeInBytes() - prev.fBytesFree);
   294         }
   295         fBufferPtr = NULL;
   296     }
   298     SkASSERT(NULL == fBufferPtr);
   300     // If the buffer is CPU-backed we lock it because it is free to do so and saves a copy.
   301     // Otherwise when buffer locking is supported:
   302     //      a) If the frequently reset hint is set we only lock when the requested size meets a
   303     //      threshold (since we don't expect it is likely that we will see more vertex data)
   304     //      b) If the hint is not set we lock if the buffer size is greater than the threshold.
   305     bool attemptLock = block.fBuffer->isCPUBacked();
   306     if (!attemptLock && fGpu->caps()->bufferLockSupport()) {
   307         if (fFrequentResetHint) {
   308             attemptLock = requestSize > GR_GEOM_BUFFER_LOCK_THRESHOLD;
   309         } else {
   310             attemptLock = size > GR_GEOM_BUFFER_LOCK_THRESHOLD;
   311         }
   312     }
   314     if (attemptLock) {
   315         fBufferPtr = block.fBuffer->lock();
   316     }
   318     if (NULL == fBufferPtr) {
   319         fBufferPtr = fCpuData.reset(size);
   320     }
   322     VALIDATE(true);
   324     return true;
   325 }
   327 void GrBufferAllocPool::destroyBlock() {
   328     SkASSERT(!fBlocks.empty());
   330     BufferBlock& block = fBlocks.back();
   331     if (fPreallocBuffersInUse > 0) {
   332         uint32_t prevPreallocBuffer = (fPreallocBuffersInUse +
   333                                        fPreallocBufferStartIdx +
   334                                        (fPreallocBuffers.count() - 1)) %
   335                                       fPreallocBuffers.count();
   336         if (block.fBuffer == fPreallocBuffers[prevPreallocBuffer]) {
   337             --fPreallocBuffersInUse;
   338         }
   339     }
   340     SkASSERT(!block.fBuffer->isLocked());
   341     block.fBuffer->unref();
   342     fBlocks.pop_back();
   343     fBufferPtr = NULL;
   344 }
   346 void GrBufferAllocPool::flushCpuData(GrGeometryBuffer* buffer,
   347                                      size_t flushSize) {
   348     SkASSERT(NULL != buffer);
   349     SkASSERT(!buffer->isLocked());
   350     SkASSERT(fCpuData.get() == fBufferPtr);
   351     SkASSERT(flushSize <= buffer->sizeInBytes());
   352     VALIDATE(true);
   354     if (fGpu->caps()->bufferLockSupport() &&
   355         flushSize > GR_GEOM_BUFFER_LOCK_THRESHOLD) {
   356         void* data = buffer->lock();
   357         if (NULL != data) {
   358             memcpy(data, fBufferPtr, flushSize);
   359             buffer->unlock();
   360             return;
   361         }
   362     }
   363     buffer->updateData(fBufferPtr, flushSize);
   364     VALIDATE(true);
   365 }
   367 GrGeometryBuffer* GrBufferAllocPool::createBuffer(size_t size) {
   368     if (kIndex_BufferType == fBufferType) {
   369         return fGpu->createIndexBuffer(size, true);
   370     } else {
   371         SkASSERT(kVertex_BufferType == fBufferType);
   372         return fGpu->createVertexBuffer(size, true);
   373     }
   374 }
   376 ////////////////////////////////////////////////////////////////////////////////
   378 GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu,
   379                                                  bool frequentResetHint,
   380                                                  size_t bufferSize,
   381                                                  int preallocBufferCnt)
   382 : GrBufferAllocPool(gpu,
   383                     kVertex_BufferType,
   384                     frequentResetHint,
   385                     bufferSize,
   386                     preallocBufferCnt) {
   387 }
   389 void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
   390                                          int vertexCount,
   391                                          const GrVertexBuffer** buffer,
   392                                          int* startVertex) {
   394     SkASSERT(vertexCount >= 0);
   395     SkASSERT(NULL != buffer);
   396     SkASSERT(NULL != startVertex);
   398     size_t offset = 0; // assign to suppress warning
   399     const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning
   400     void* ptr = INHERITED::makeSpace(vertexSize * vertexCount,
   401                                      vertexSize,
   402                                      &geomBuffer,
   403                                      &offset);
   405     *buffer = (const GrVertexBuffer*) geomBuffer;
   406     SkASSERT(0 == offset % vertexSize);
   407     *startVertex = static_cast<int>(offset / vertexSize);
   408     return ptr;
   409 }
   411 bool GrVertexBufferAllocPool::appendVertices(size_t vertexSize,
   412                                              int vertexCount,
   413                                              const void* vertices,
   414                                              const GrVertexBuffer** buffer,
   415                                              int* startVertex) {
   416     void* space = makeSpace(vertexSize, vertexCount, buffer, startVertex);
   417     if (NULL != space) {
   418         memcpy(space,
   419                vertices,
   420                vertexSize * vertexCount);
   421         return true;
   422     } else {
   423         return false;
   424     }
   425 }
   427 int GrVertexBufferAllocPool::preallocatedBufferVertices(size_t vertexSize) const {
   428     return static_cast<int>(INHERITED::preallocatedBufferSize() / vertexSize);
   429 }
   431 int GrVertexBufferAllocPool::currentBufferVertices(size_t vertexSize) const {
   432     return currentBufferItems(vertexSize);
   433 }
   435 ////////////////////////////////////////////////////////////////////////////////
   437 GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu,
   438                                                bool frequentResetHint,
   439                                                size_t bufferSize,
   440                                                int preallocBufferCnt)
   441 : GrBufferAllocPool(gpu,
   442                     kIndex_BufferType,
   443                     frequentResetHint,
   444                     bufferSize,
   445                     preallocBufferCnt) {
   446 }
   448 void* GrIndexBufferAllocPool::makeSpace(int indexCount,
   449                                         const GrIndexBuffer** buffer,
   450                                         int* startIndex) {
   452     SkASSERT(indexCount >= 0);
   453     SkASSERT(NULL != buffer);
   454     SkASSERT(NULL != startIndex);
   456     size_t offset = 0; // assign to suppress warning
   457     const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning
   458     void* ptr = INHERITED::makeSpace(indexCount * sizeof(uint16_t),
   459                                      sizeof(uint16_t),
   460                                      &geomBuffer,
   461                                      &offset);
   463     *buffer = (const GrIndexBuffer*) geomBuffer;
   464     SkASSERT(0 == offset % sizeof(uint16_t));
   465     *startIndex = static_cast<int>(offset / sizeof(uint16_t));
   466     return ptr;
   467 }
   469 bool GrIndexBufferAllocPool::appendIndices(int indexCount,
   470                                            const void* indices,
   471                                            const GrIndexBuffer** buffer,
   472                                            int* startIndex) {
   473     void* space = makeSpace(indexCount, buffer, startIndex);
   474     if (NULL != space) {
   475         memcpy(space, indices, sizeof(uint16_t) * indexCount);
   476         return true;
   477     } else {
   478         return false;
   479     }
   480 }
   482 int GrIndexBufferAllocPool::preallocatedBufferIndices() const {
   483     return static_cast<int>(INHERITED::preallocatedBufferSize() / sizeof(uint16_t));
   484 }
   486 int GrIndexBufferAllocPool::currentBufferIndices() const {
   487     return currentBufferItems(sizeof(uint16_t));
   488 }

mercurial