1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/gfx/layers/ipc/ISurfaceAllocator.cpp Wed Dec 31 06:09:35 2014 +0100 1.3 @@ -0,0 +1,312 @@ 1.4 +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- 1.5 + * vim: sw=2 ts=8 et : 1.6 + */ 1.7 +/* This Source Code Form is subject to the terms of the Mozilla Public 1.8 + * License, v. 2.0. If a copy of the MPL was not distributed with this 1.9 + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 1.10 + 1.11 +#include "ISurfaceAllocator.h" 1.12 +#include <sys/types.h> // for int32_t 1.13 +#include "gfx2DGlue.h" // for IntSize 1.14 +#include "gfxPlatform.h" // for gfxPlatform, gfxImageFormat 1.15 +#include "gfxSharedImageSurface.h" // for gfxSharedImageSurface 1.16 +#include "mozilla/Assertions.h" // for MOZ_ASSERT, etc 1.17 +#include "mozilla/Atomics.h" // for PrimitiveIntrinsics 1.18 +#include "mozilla/ipc/SharedMemory.h" // for SharedMemory, etc 1.19 +#include "mozilla/layers/LayersSurfaces.h" // for SurfaceDescriptor, etc 1.20 +#include "ShadowLayerUtils.h" 1.21 +#include "mozilla/mozalloc.h" // for operator delete[], etc 1.22 +#include "nsAutoPtr.h" // for nsRefPtr, getter_AddRefs, etc 1.23 +#include "nsDebug.h" // for NS_RUNTIMEABORT 1.24 +#include "nsXULAppAPI.h" // for XRE_GetProcessType, etc 1.25 +#include "mozilla/ipc/Shmem.h" 1.26 +#include "mozilla/layers/ImageDataSerializer.h" 1.27 +#ifdef DEBUG 1.28 +#include "prenv.h" 1.29 +#endif 1.30 + 1.31 +using namespace mozilla::ipc; 1.32 + 1.33 +namespace mozilla { 1.34 +namespace layers { 1.35 + 1.36 +NS_IMPL_ISUPPORTS(GfxMemoryImageReporter, nsIMemoryReporter) 1.37 + 1.38 +mozilla::Atomic<size_t> GfxMemoryImageReporter::sAmount(0); 1.39 + 1.40 +mozilla::ipc::SharedMemory::SharedMemoryType OptimalShmemType() 1.41 +{ 1.42 + return mozilla::ipc::SharedMemory::TYPE_BASIC; 1.43 +} 1.44 + 1.45 +bool 1.46 +IsSurfaceDescriptorValid(const SurfaceDescriptor& aSurface) 1.47 +{ 1.48 + return aSurface.type() != SurfaceDescriptor::T__None && 1.49 + aSurface.type() != SurfaceDescriptor::Tnull_t; 1.50 +} 1.51 + 1.52 +ISurfaceAllocator::~ISurfaceAllocator() 1.53 +{ 1.54 + // Check if we're not leaking.. 1.55 + MOZ_ASSERT(mUsedShmems.empty()); 1.56 +} 1.57 + 1.58 +void 1.59 +ISurfaceAllocator::Finalize() 1.60 +{ 1.61 + ShrinkShmemSectionHeap(); 1.62 +} 1.63 + 1.64 +static inline uint8_t* 1.65 +GetAddressFromDescriptor(const SurfaceDescriptor& aDescriptor, size_t& aSize) 1.66 +{ 1.67 + MOZ_ASSERT(IsSurfaceDescriptorValid(aDescriptor)); 1.68 + MOZ_ASSERT(aDescriptor.type() == SurfaceDescriptor::TSurfaceDescriptorShmem || 1.69 + aDescriptor.type() == SurfaceDescriptor::TSurfaceDescriptorMemory); 1.70 + if (aDescriptor.type() == SurfaceDescriptor::TSurfaceDescriptorShmem) { 1.71 + Shmem shmem(aDescriptor.get_SurfaceDescriptorShmem().data()); 1.72 + aSize = shmem.Size<uint8_t>(); 1.73 + return shmem.get<uint8_t>(); 1.74 + } else { 1.75 + const SurfaceDescriptorMemory& image = aDescriptor.get_SurfaceDescriptorMemory(); 1.76 + aSize = std::numeric_limits<size_t>::max(); 1.77 + return reinterpret_cast<uint8_t*>(image.data()); 1.78 + } 1.79 +} 1.80 + 1.81 +TemporaryRef<gfx::DrawTarget> 1.82 +GetDrawTargetForDescriptor(const SurfaceDescriptor& aDescriptor, gfx::BackendType aBackend) 1.83 +{ 1.84 + size_t size; 1.85 + uint8_t* data = GetAddressFromDescriptor(aDescriptor, size); 1.86 + ImageDataDeserializer image(data, size); 1.87 + return image.GetAsDrawTarget(aBackend); 1.88 +} 1.89 + 1.90 +TemporaryRef<gfx::DataSourceSurface> 1.91 +GetSurfaceForDescriptor(const SurfaceDescriptor& aDescriptor) 1.92 +{ 1.93 + size_t size; 1.94 + uint8_t* data = GetAddressFromDescriptor(aDescriptor, size); 1.95 + ImageDataDeserializer image(data, size); 1.96 + return image.GetAsSurface(); 1.97 +} 1.98 + 1.99 +bool 1.100 +ISurfaceAllocator::AllocSurfaceDescriptor(const gfx::IntSize& aSize, 1.101 + gfxContentType aContent, 1.102 + SurfaceDescriptor* aBuffer) 1.103 +{ 1.104 + return AllocSurfaceDescriptorWithCaps(aSize, aContent, DEFAULT_BUFFER_CAPS, aBuffer); 1.105 +} 1.106 + 1.107 +bool 1.108 +ISurfaceAllocator::AllocSurfaceDescriptorWithCaps(const gfx::IntSize& aSize, 1.109 + gfxContentType aContent, 1.110 + uint32_t aCaps, 1.111 + SurfaceDescriptor* aBuffer) 1.112 +{ 1.113 + gfx::SurfaceFormat format = 1.114 + gfxPlatform::GetPlatform()->Optimal2DFormatForContent(aContent); 1.115 + size_t size = ImageDataSerializer::ComputeMinBufferSize(aSize, format); 1.116 + if (gfxPlatform::GetPlatform()->PreferMemoryOverShmem()) { 1.117 + uint8_t *data = new (std::nothrow) uint8_t[size]; 1.118 + if (!data) { 1.119 + return false; 1.120 + } 1.121 + GfxMemoryImageReporter::DidAlloc(data); 1.122 +#ifdef XP_MACOSX 1.123 + // Workaround a bug in Quartz where drawing an a8 surface to another a8 1.124 + // surface with OPERATOR_SOURCE still requires the destination to be clear. 1.125 + if (format == gfx::SurfaceFormat::A8) { 1.126 + memset(data, 0, size); 1.127 + } 1.128 +#endif 1.129 + *aBuffer = SurfaceDescriptorMemory((uintptr_t)data, format); 1.130 + } else { 1.131 + 1.132 + mozilla::ipc::SharedMemory::SharedMemoryType shmemType = OptimalShmemType(); 1.133 + mozilla::ipc::Shmem shmem; 1.134 + if (!AllocUnsafeShmem(size, shmemType, &shmem)) { 1.135 + return false; 1.136 + } 1.137 + 1.138 + *aBuffer = SurfaceDescriptorShmem(shmem, format); 1.139 + } 1.140 + 1.141 + uint8_t* data = GetAddressFromDescriptor(*aBuffer, size); 1.142 + ImageDataSerializer serializer(data, size); 1.143 + serializer.InitializeBufferInfo(aSize, format); 1.144 + return true; 1.145 +} 1.146 + 1.147 +/* static */ bool 1.148 +ISurfaceAllocator::IsShmem(SurfaceDescriptor* aSurface) 1.149 +{ 1.150 + return aSurface && (aSurface->type() == SurfaceDescriptor::TSurfaceDescriptorShmem); 1.151 +} 1.152 + 1.153 +void 1.154 +ISurfaceAllocator::DestroySharedSurface(SurfaceDescriptor* aSurface) 1.155 +{ 1.156 + MOZ_ASSERT(aSurface); 1.157 + if (!aSurface) { 1.158 + return; 1.159 + } 1.160 + if (!IPCOpen()) { 1.161 + return; 1.162 + } 1.163 + switch (aSurface->type()) { 1.164 + case SurfaceDescriptor::TSurfaceDescriptorShmem: 1.165 + DeallocShmem(aSurface->get_SurfaceDescriptorShmem().data()); 1.166 + break; 1.167 + case SurfaceDescriptor::TSurfaceDescriptorMemory: 1.168 + GfxMemoryImageReporter::WillFree((uint8_t*)aSurface->get_SurfaceDescriptorMemory().data()); 1.169 + delete [] (uint8_t*)aSurface->get_SurfaceDescriptorMemory().data(); 1.170 + break; 1.171 + case SurfaceDescriptor::Tnull_t: 1.172 + case SurfaceDescriptor::T__None: 1.173 + break; 1.174 + default: 1.175 + NS_RUNTIMEABORT("surface type not implemented!"); 1.176 + } 1.177 + *aSurface = SurfaceDescriptor(); 1.178 +} 1.179 + 1.180 +// XXX - We should actually figure out the minimum shmem allocation size on 1.181 +// a certain platform and use that. 1.182 +const uint32_t sShmemPageSize = 4096; 1.183 +const uint32_t sSupportedBlockSize = 4; 1.184 + 1.185 +enum AllocationStatus 1.186 +{ 1.187 + STATUS_ALLOCATED, 1.188 + STATUS_FREED 1.189 +}; 1.190 + 1.191 +struct ShmemSectionHeapHeader 1.192 +{ 1.193 + Atomic<uint32_t> mTotalBlocks; 1.194 + Atomic<uint32_t> mAllocatedBlocks; 1.195 +}; 1.196 + 1.197 +struct ShmemSectionHeapAllocation 1.198 +{ 1.199 + Atomic<uint32_t> mStatus; 1.200 + uint32_t mSize; 1.201 +}; 1.202 + 1.203 +bool 1.204 +ISurfaceAllocator::AllocShmemSection(size_t aSize, mozilla::layers::ShmemSection* aShmemSection) 1.205 +{ 1.206 + // For now we only support sizes of 4. If we want to support different sizes 1.207 + // some more complicated bookkeeping should be added. 1.208 + MOZ_ASSERT(aSize == sSupportedBlockSize); 1.209 + MOZ_ASSERT(aShmemSection); 1.210 + 1.211 + uint32_t allocationSize = (aSize + sizeof(ShmemSectionHeapAllocation)); 1.212 + 1.213 + for (size_t i = 0; i < mUsedShmems.size(); i++) { 1.214 + ShmemSectionHeapHeader* header = mUsedShmems[i].get<ShmemSectionHeapHeader>(); 1.215 + if ((header->mAllocatedBlocks + 1) * allocationSize + sizeof(ShmemSectionHeapHeader) < sShmemPageSize) { 1.216 + aShmemSection->shmem() = mUsedShmems[i]; 1.217 + MOZ_ASSERT(mUsedShmems[i].IsWritable()); 1.218 + break; 1.219 + } 1.220 + } 1.221 + 1.222 + if (!aShmemSection->shmem().IsWritable()) { 1.223 + ipc::Shmem tmp; 1.224 + if (!AllocUnsafeShmem(sShmemPageSize, ipc::SharedMemory::TYPE_BASIC, &tmp)) { 1.225 + return false; 1.226 + } 1.227 + 1.228 + ShmemSectionHeapHeader* header = tmp.get<ShmemSectionHeapHeader>(); 1.229 + header->mTotalBlocks = 0; 1.230 + header->mAllocatedBlocks = 0; 1.231 + 1.232 + mUsedShmems.push_back(tmp); 1.233 + aShmemSection->shmem() = tmp; 1.234 + } 1.235 + 1.236 + MOZ_ASSERT(aShmemSection->shmem().IsWritable()); 1.237 + 1.238 + ShmemSectionHeapHeader* header = aShmemSection->shmem().get<ShmemSectionHeapHeader>(); 1.239 + uint8_t* heap = aShmemSection->shmem().get<uint8_t>() + sizeof(ShmemSectionHeapHeader); 1.240 + 1.241 + ShmemSectionHeapAllocation* allocHeader = nullptr; 1.242 + 1.243 + if (header->mTotalBlocks > header->mAllocatedBlocks) { 1.244 + // Search for the first available block. 1.245 + for (size_t i = 0; i < header->mTotalBlocks; i++) { 1.246 + allocHeader = reinterpret_cast<ShmemSectionHeapAllocation*>(heap); 1.247 + 1.248 + if (allocHeader->mStatus == STATUS_FREED) { 1.249 + break; 1.250 + } 1.251 + heap += allocationSize; 1.252 + } 1.253 + MOZ_ASSERT(allocHeader && allocHeader->mStatus == STATUS_FREED); 1.254 + MOZ_ASSERT(allocHeader->mSize == sSupportedBlockSize); 1.255 + } else { 1.256 + heap += header->mTotalBlocks * allocationSize; 1.257 + 1.258 + header->mTotalBlocks++; 1.259 + allocHeader = reinterpret_cast<ShmemSectionHeapAllocation*>(heap); 1.260 + allocHeader->mSize = aSize; 1.261 + } 1.262 + 1.263 + MOZ_ASSERT(allocHeader); 1.264 + header->mAllocatedBlocks++; 1.265 + allocHeader->mStatus = STATUS_ALLOCATED; 1.266 + 1.267 + aShmemSection->size() = aSize; 1.268 + aShmemSection->offset() = (heap + sizeof(ShmemSectionHeapAllocation)) - aShmemSection->shmem().get<uint8_t>(); 1.269 + ShrinkShmemSectionHeap(); 1.270 + return true; 1.271 +} 1.272 + 1.273 +void 1.274 +ISurfaceAllocator::FreeShmemSection(mozilla::layers::ShmemSection& aShmemSection) 1.275 +{ 1.276 + MOZ_ASSERT(aShmemSection.size() == sSupportedBlockSize); 1.277 + MOZ_ASSERT(aShmemSection.offset() < sShmemPageSize - sSupportedBlockSize); 1.278 + 1.279 + ShmemSectionHeapAllocation* allocHeader = 1.280 + reinterpret_cast<ShmemSectionHeapAllocation*>(aShmemSection.shmem().get<char>() + 1.281 + aShmemSection.offset() - 1.282 + sizeof(ShmemSectionHeapAllocation)); 1.283 + 1.284 + MOZ_ASSERT(allocHeader->mSize == aShmemSection.size()); 1.285 + 1.286 + DebugOnly<bool> success = allocHeader->mStatus.compareExchange(STATUS_ALLOCATED, STATUS_FREED); 1.287 + // If this fails something really weird is going on. 1.288 + MOZ_ASSERT(success); 1.289 + 1.290 + ShmemSectionHeapHeader* header = aShmemSection.shmem().get<ShmemSectionHeapHeader>(); 1.291 + header->mAllocatedBlocks--; 1.292 + 1.293 + ShrinkShmemSectionHeap(); 1.294 +} 1.295 + 1.296 +void 1.297 +ISurfaceAllocator::ShrinkShmemSectionHeap() 1.298 +{ 1.299 + for (size_t i = 0; i < mUsedShmems.size(); i++) { 1.300 + ShmemSectionHeapHeader* header = mUsedShmems[i].get<ShmemSectionHeapHeader>(); 1.301 + if (header->mAllocatedBlocks == 0) { 1.302 + DeallocShmem(mUsedShmems[i]); 1.303 + 1.304 + // We don't particularly care about order, move the last one in the array 1.305 + // to this position. 1.306 + mUsedShmems[i] = mUsedShmems[mUsedShmems.size() - 1]; 1.307 + mUsedShmems.pop_back(); 1.308 + i--; 1.309 + break; 1.310 + } 1.311 + } 1.312 +} 1.313 + 1.314 +} // namespace 1.315 +} // namespace