Tue, 06 Jan 2015 21:39:09 +0100
Conditionally force memory storage according to privacy.thirdparty.isolate;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- |
michael@0 | 2 | * vim: sw=2 ts=8 et : |
michael@0 | 3 | */ |
michael@0 | 4 | /* This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 7 | |
michael@0 | 8 | #include "ISurfaceAllocator.h" |
michael@0 | 9 | #include <sys/types.h> // for int32_t |
michael@0 | 10 | #include "gfx2DGlue.h" // for IntSize |
michael@0 | 11 | #include "gfxPlatform.h" // for gfxPlatform, gfxImageFormat |
michael@0 | 12 | #include "gfxSharedImageSurface.h" // for gfxSharedImageSurface |
michael@0 | 13 | #include "mozilla/Assertions.h" // for MOZ_ASSERT, etc |
michael@0 | 14 | #include "mozilla/Atomics.h" // for PrimitiveIntrinsics |
michael@0 | 15 | #include "mozilla/ipc/SharedMemory.h" // for SharedMemory, etc |
michael@0 | 16 | #include "mozilla/layers/LayersSurfaces.h" // for SurfaceDescriptor, etc |
michael@0 | 17 | #include "ShadowLayerUtils.h" |
michael@0 | 18 | #include "mozilla/mozalloc.h" // for operator delete[], etc |
michael@0 | 19 | #include "nsAutoPtr.h" // for nsRefPtr, getter_AddRefs, etc |
michael@0 | 20 | #include "nsDebug.h" // for NS_RUNTIMEABORT |
michael@0 | 21 | #include "nsXULAppAPI.h" // for XRE_GetProcessType, etc |
michael@0 | 22 | #include "mozilla/ipc/Shmem.h" |
michael@0 | 23 | #include "mozilla/layers/ImageDataSerializer.h" |
michael@0 | 24 | #ifdef DEBUG |
michael@0 | 25 | #include "prenv.h" |
michael@0 | 26 | #endif |
michael@0 | 27 | |
michael@0 | 28 | using namespace mozilla::ipc; |
michael@0 | 29 | |
michael@0 | 30 | namespace mozilla { |
michael@0 | 31 | namespace layers { |
michael@0 | 32 | |
michael@0 | 33 | NS_IMPL_ISUPPORTS(GfxMemoryImageReporter, nsIMemoryReporter) |
michael@0 | 34 | |
michael@0 | 35 | mozilla::Atomic<size_t> GfxMemoryImageReporter::sAmount(0); |
michael@0 | 36 | |
michael@0 | 37 | mozilla::ipc::SharedMemory::SharedMemoryType OptimalShmemType() |
michael@0 | 38 | { |
michael@0 | 39 | return mozilla::ipc::SharedMemory::TYPE_BASIC; |
michael@0 | 40 | } |
michael@0 | 41 | |
michael@0 | 42 | bool |
michael@0 | 43 | IsSurfaceDescriptorValid(const SurfaceDescriptor& aSurface) |
michael@0 | 44 | { |
michael@0 | 45 | return aSurface.type() != SurfaceDescriptor::T__None && |
michael@0 | 46 | aSurface.type() != SurfaceDescriptor::Tnull_t; |
michael@0 | 47 | } |
michael@0 | 48 | |
michael@0 | 49 | ISurfaceAllocator::~ISurfaceAllocator() |
michael@0 | 50 | { |
michael@0 | 51 | // Check if we're not leaking.. |
michael@0 | 52 | MOZ_ASSERT(mUsedShmems.empty()); |
michael@0 | 53 | } |
michael@0 | 54 | |
michael@0 | 55 | void |
michael@0 | 56 | ISurfaceAllocator::Finalize() |
michael@0 | 57 | { |
michael@0 | 58 | ShrinkShmemSectionHeap(); |
michael@0 | 59 | } |
michael@0 | 60 | |
michael@0 | 61 | static inline uint8_t* |
michael@0 | 62 | GetAddressFromDescriptor(const SurfaceDescriptor& aDescriptor, size_t& aSize) |
michael@0 | 63 | { |
michael@0 | 64 | MOZ_ASSERT(IsSurfaceDescriptorValid(aDescriptor)); |
michael@0 | 65 | MOZ_ASSERT(aDescriptor.type() == SurfaceDescriptor::TSurfaceDescriptorShmem || |
michael@0 | 66 | aDescriptor.type() == SurfaceDescriptor::TSurfaceDescriptorMemory); |
michael@0 | 67 | if (aDescriptor.type() == SurfaceDescriptor::TSurfaceDescriptorShmem) { |
michael@0 | 68 | Shmem shmem(aDescriptor.get_SurfaceDescriptorShmem().data()); |
michael@0 | 69 | aSize = shmem.Size<uint8_t>(); |
michael@0 | 70 | return shmem.get<uint8_t>(); |
michael@0 | 71 | } else { |
michael@0 | 72 | const SurfaceDescriptorMemory& image = aDescriptor.get_SurfaceDescriptorMemory(); |
michael@0 | 73 | aSize = std::numeric_limits<size_t>::max(); |
michael@0 | 74 | return reinterpret_cast<uint8_t*>(image.data()); |
michael@0 | 75 | } |
michael@0 | 76 | } |
michael@0 | 77 | |
michael@0 | 78 | TemporaryRef<gfx::DrawTarget> |
michael@0 | 79 | GetDrawTargetForDescriptor(const SurfaceDescriptor& aDescriptor, gfx::BackendType aBackend) |
michael@0 | 80 | { |
michael@0 | 81 | size_t size; |
michael@0 | 82 | uint8_t* data = GetAddressFromDescriptor(aDescriptor, size); |
michael@0 | 83 | ImageDataDeserializer image(data, size); |
michael@0 | 84 | return image.GetAsDrawTarget(aBackend); |
michael@0 | 85 | } |
michael@0 | 86 | |
michael@0 | 87 | TemporaryRef<gfx::DataSourceSurface> |
michael@0 | 88 | GetSurfaceForDescriptor(const SurfaceDescriptor& aDescriptor) |
michael@0 | 89 | { |
michael@0 | 90 | size_t size; |
michael@0 | 91 | uint8_t* data = GetAddressFromDescriptor(aDescriptor, size); |
michael@0 | 92 | ImageDataDeserializer image(data, size); |
michael@0 | 93 | return image.GetAsSurface(); |
michael@0 | 94 | } |
michael@0 | 95 | |
michael@0 | 96 | bool |
michael@0 | 97 | ISurfaceAllocator::AllocSurfaceDescriptor(const gfx::IntSize& aSize, |
michael@0 | 98 | gfxContentType aContent, |
michael@0 | 99 | SurfaceDescriptor* aBuffer) |
michael@0 | 100 | { |
michael@0 | 101 | return AllocSurfaceDescriptorWithCaps(aSize, aContent, DEFAULT_BUFFER_CAPS, aBuffer); |
michael@0 | 102 | } |
michael@0 | 103 | |
michael@0 | 104 | bool |
michael@0 | 105 | ISurfaceAllocator::AllocSurfaceDescriptorWithCaps(const gfx::IntSize& aSize, |
michael@0 | 106 | gfxContentType aContent, |
michael@0 | 107 | uint32_t aCaps, |
michael@0 | 108 | SurfaceDescriptor* aBuffer) |
michael@0 | 109 | { |
michael@0 | 110 | gfx::SurfaceFormat format = |
michael@0 | 111 | gfxPlatform::GetPlatform()->Optimal2DFormatForContent(aContent); |
michael@0 | 112 | size_t size = ImageDataSerializer::ComputeMinBufferSize(aSize, format); |
michael@0 | 113 | if (gfxPlatform::GetPlatform()->PreferMemoryOverShmem()) { |
michael@0 | 114 | uint8_t *data = new (std::nothrow) uint8_t[size]; |
michael@0 | 115 | if (!data) { |
michael@0 | 116 | return false; |
michael@0 | 117 | } |
michael@0 | 118 | GfxMemoryImageReporter::DidAlloc(data); |
michael@0 | 119 | #ifdef XP_MACOSX |
michael@0 | 120 | // Workaround a bug in Quartz where drawing an a8 surface to another a8 |
michael@0 | 121 | // surface with OPERATOR_SOURCE still requires the destination to be clear. |
michael@0 | 122 | if (format == gfx::SurfaceFormat::A8) { |
michael@0 | 123 | memset(data, 0, size); |
michael@0 | 124 | } |
michael@0 | 125 | #endif |
michael@0 | 126 | *aBuffer = SurfaceDescriptorMemory((uintptr_t)data, format); |
michael@0 | 127 | } else { |
michael@0 | 128 | |
michael@0 | 129 | mozilla::ipc::SharedMemory::SharedMemoryType shmemType = OptimalShmemType(); |
michael@0 | 130 | mozilla::ipc::Shmem shmem; |
michael@0 | 131 | if (!AllocUnsafeShmem(size, shmemType, &shmem)) { |
michael@0 | 132 | return false; |
michael@0 | 133 | } |
michael@0 | 134 | |
michael@0 | 135 | *aBuffer = SurfaceDescriptorShmem(shmem, format); |
michael@0 | 136 | } |
michael@0 | 137 | |
michael@0 | 138 | uint8_t* data = GetAddressFromDescriptor(*aBuffer, size); |
michael@0 | 139 | ImageDataSerializer serializer(data, size); |
michael@0 | 140 | serializer.InitializeBufferInfo(aSize, format); |
michael@0 | 141 | return true; |
michael@0 | 142 | } |
michael@0 | 143 | |
michael@0 | 144 | /* static */ bool |
michael@0 | 145 | ISurfaceAllocator::IsShmem(SurfaceDescriptor* aSurface) |
michael@0 | 146 | { |
michael@0 | 147 | return aSurface && (aSurface->type() == SurfaceDescriptor::TSurfaceDescriptorShmem); |
michael@0 | 148 | } |
michael@0 | 149 | |
michael@0 | 150 | void |
michael@0 | 151 | ISurfaceAllocator::DestroySharedSurface(SurfaceDescriptor* aSurface) |
michael@0 | 152 | { |
michael@0 | 153 | MOZ_ASSERT(aSurface); |
michael@0 | 154 | if (!aSurface) { |
michael@0 | 155 | return; |
michael@0 | 156 | } |
michael@0 | 157 | if (!IPCOpen()) { |
michael@0 | 158 | return; |
michael@0 | 159 | } |
michael@0 | 160 | switch (aSurface->type()) { |
michael@0 | 161 | case SurfaceDescriptor::TSurfaceDescriptorShmem: |
michael@0 | 162 | DeallocShmem(aSurface->get_SurfaceDescriptorShmem().data()); |
michael@0 | 163 | break; |
michael@0 | 164 | case SurfaceDescriptor::TSurfaceDescriptorMemory: |
michael@0 | 165 | GfxMemoryImageReporter::WillFree((uint8_t*)aSurface->get_SurfaceDescriptorMemory().data()); |
michael@0 | 166 | delete [] (uint8_t*)aSurface->get_SurfaceDescriptorMemory().data(); |
michael@0 | 167 | break; |
michael@0 | 168 | case SurfaceDescriptor::Tnull_t: |
michael@0 | 169 | case SurfaceDescriptor::T__None: |
michael@0 | 170 | break; |
michael@0 | 171 | default: |
michael@0 | 172 | NS_RUNTIMEABORT("surface type not implemented!"); |
michael@0 | 173 | } |
michael@0 | 174 | *aSurface = SurfaceDescriptor(); |
michael@0 | 175 | } |
michael@0 | 176 | |
michael@0 | 177 | // XXX - We should actually figure out the minimum shmem allocation size on |
michael@0 | 178 | // a certain platform and use that. |
michael@0 | 179 | const uint32_t sShmemPageSize = 4096; |
michael@0 | 180 | const uint32_t sSupportedBlockSize = 4; |
michael@0 | 181 | |
michael@0 | 182 | enum AllocationStatus |
michael@0 | 183 | { |
michael@0 | 184 | STATUS_ALLOCATED, |
michael@0 | 185 | STATUS_FREED |
michael@0 | 186 | }; |
michael@0 | 187 | |
michael@0 | 188 | struct ShmemSectionHeapHeader |
michael@0 | 189 | { |
michael@0 | 190 | Atomic<uint32_t> mTotalBlocks; |
michael@0 | 191 | Atomic<uint32_t> mAllocatedBlocks; |
michael@0 | 192 | }; |
michael@0 | 193 | |
michael@0 | 194 | struct ShmemSectionHeapAllocation |
michael@0 | 195 | { |
michael@0 | 196 | Atomic<uint32_t> mStatus; |
michael@0 | 197 | uint32_t mSize; |
michael@0 | 198 | }; |
michael@0 | 199 | |
michael@0 | 200 | bool |
michael@0 | 201 | ISurfaceAllocator::AllocShmemSection(size_t aSize, mozilla::layers::ShmemSection* aShmemSection) |
michael@0 | 202 | { |
michael@0 | 203 | // For now we only support sizes of 4. If we want to support different sizes |
michael@0 | 204 | // some more complicated bookkeeping should be added. |
michael@0 | 205 | MOZ_ASSERT(aSize == sSupportedBlockSize); |
michael@0 | 206 | MOZ_ASSERT(aShmemSection); |
michael@0 | 207 | |
michael@0 | 208 | uint32_t allocationSize = (aSize + sizeof(ShmemSectionHeapAllocation)); |
michael@0 | 209 | |
michael@0 | 210 | for (size_t i = 0; i < mUsedShmems.size(); i++) { |
michael@0 | 211 | ShmemSectionHeapHeader* header = mUsedShmems[i].get<ShmemSectionHeapHeader>(); |
michael@0 | 212 | if ((header->mAllocatedBlocks + 1) * allocationSize + sizeof(ShmemSectionHeapHeader) < sShmemPageSize) { |
michael@0 | 213 | aShmemSection->shmem() = mUsedShmems[i]; |
michael@0 | 214 | MOZ_ASSERT(mUsedShmems[i].IsWritable()); |
michael@0 | 215 | break; |
michael@0 | 216 | } |
michael@0 | 217 | } |
michael@0 | 218 | |
michael@0 | 219 | if (!aShmemSection->shmem().IsWritable()) { |
michael@0 | 220 | ipc::Shmem tmp; |
michael@0 | 221 | if (!AllocUnsafeShmem(sShmemPageSize, ipc::SharedMemory::TYPE_BASIC, &tmp)) { |
michael@0 | 222 | return false; |
michael@0 | 223 | } |
michael@0 | 224 | |
michael@0 | 225 | ShmemSectionHeapHeader* header = tmp.get<ShmemSectionHeapHeader>(); |
michael@0 | 226 | header->mTotalBlocks = 0; |
michael@0 | 227 | header->mAllocatedBlocks = 0; |
michael@0 | 228 | |
michael@0 | 229 | mUsedShmems.push_back(tmp); |
michael@0 | 230 | aShmemSection->shmem() = tmp; |
michael@0 | 231 | } |
michael@0 | 232 | |
michael@0 | 233 | MOZ_ASSERT(aShmemSection->shmem().IsWritable()); |
michael@0 | 234 | |
michael@0 | 235 | ShmemSectionHeapHeader* header = aShmemSection->shmem().get<ShmemSectionHeapHeader>(); |
michael@0 | 236 | uint8_t* heap = aShmemSection->shmem().get<uint8_t>() + sizeof(ShmemSectionHeapHeader); |
michael@0 | 237 | |
michael@0 | 238 | ShmemSectionHeapAllocation* allocHeader = nullptr; |
michael@0 | 239 | |
michael@0 | 240 | if (header->mTotalBlocks > header->mAllocatedBlocks) { |
michael@0 | 241 | // Search for the first available block. |
michael@0 | 242 | for (size_t i = 0; i < header->mTotalBlocks; i++) { |
michael@0 | 243 | allocHeader = reinterpret_cast<ShmemSectionHeapAllocation*>(heap); |
michael@0 | 244 | |
michael@0 | 245 | if (allocHeader->mStatus == STATUS_FREED) { |
michael@0 | 246 | break; |
michael@0 | 247 | } |
michael@0 | 248 | heap += allocationSize; |
michael@0 | 249 | } |
michael@0 | 250 | MOZ_ASSERT(allocHeader && allocHeader->mStatus == STATUS_FREED); |
michael@0 | 251 | MOZ_ASSERT(allocHeader->mSize == sSupportedBlockSize); |
michael@0 | 252 | } else { |
michael@0 | 253 | heap += header->mTotalBlocks * allocationSize; |
michael@0 | 254 | |
michael@0 | 255 | header->mTotalBlocks++; |
michael@0 | 256 | allocHeader = reinterpret_cast<ShmemSectionHeapAllocation*>(heap); |
michael@0 | 257 | allocHeader->mSize = aSize; |
michael@0 | 258 | } |
michael@0 | 259 | |
michael@0 | 260 | MOZ_ASSERT(allocHeader); |
michael@0 | 261 | header->mAllocatedBlocks++; |
michael@0 | 262 | allocHeader->mStatus = STATUS_ALLOCATED; |
michael@0 | 263 | |
michael@0 | 264 | aShmemSection->size() = aSize; |
michael@0 | 265 | aShmemSection->offset() = (heap + sizeof(ShmemSectionHeapAllocation)) - aShmemSection->shmem().get<uint8_t>(); |
michael@0 | 266 | ShrinkShmemSectionHeap(); |
michael@0 | 267 | return true; |
michael@0 | 268 | } |
michael@0 | 269 | |
michael@0 | 270 | void |
michael@0 | 271 | ISurfaceAllocator::FreeShmemSection(mozilla::layers::ShmemSection& aShmemSection) |
michael@0 | 272 | { |
michael@0 | 273 | MOZ_ASSERT(aShmemSection.size() == sSupportedBlockSize); |
michael@0 | 274 | MOZ_ASSERT(aShmemSection.offset() < sShmemPageSize - sSupportedBlockSize); |
michael@0 | 275 | |
michael@0 | 276 | ShmemSectionHeapAllocation* allocHeader = |
michael@0 | 277 | reinterpret_cast<ShmemSectionHeapAllocation*>(aShmemSection.shmem().get<char>() + |
michael@0 | 278 | aShmemSection.offset() - |
michael@0 | 279 | sizeof(ShmemSectionHeapAllocation)); |
michael@0 | 280 | |
michael@0 | 281 | MOZ_ASSERT(allocHeader->mSize == aShmemSection.size()); |
michael@0 | 282 | |
michael@0 | 283 | DebugOnly<bool> success = allocHeader->mStatus.compareExchange(STATUS_ALLOCATED, STATUS_FREED); |
michael@0 | 284 | // If this fails something really weird is going on. |
michael@0 | 285 | MOZ_ASSERT(success); |
michael@0 | 286 | |
michael@0 | 287 | ShmemSectionHeapHeader* header = aShmemSection.shmem().get<ShmemSectionHeapHeader>(); |
michael@0 | 288 | header->mAllocatedBlocks--; |
michael@0 | 289 | |
michael@0 | 290 | ShrinkShmemSectionHeap(); |
michael@0 | 291 | } |
michael@0 | 292 | |
michael@0 | 293 | void |
michael@0 | 294 | ISurfaceAllocator::ShrinkShmemSectionHeap() |
michael@0 | 295 | { |
michael@0 | 296 | for (size_t i = 0; i < mUsedShmems.size(); i++) { |
michael@0 | 297 | ShmemSectionHeapHeader* header = mUsedShmems[i].get<ShmemSectionHeapHeader>(); |
michael@0 | 298 | if (header->mAllocatedBlocks == 0) { |
michael@0 | 299 | DeallocShmem(mUsedShmems[i]); |
michael@0 | 300 | |
michael@0 | 301 | // We don't particularly care about order, move the last one in the array |
michael@0 | 302 | // to this position. |
michael@0 | 303 | mUsedShmems[i] = mUsedShmems[mUsedShmems.size() - 1]; |
michael@0 | 304 | mUsedShmems.pop_back(); |
michael@0 | 305 | i--; |
michael@0 | 306 | break; |
michael@0 | 307 | } |
michael@0 | 308 | } |
michael@0 | 309 | } |
michael@0 | 310 | |
michael@0 | 311 | } // namespace |
michael@0 | 312 | } // namespace |