gfx/layers/ipc/ISurfaceAllocator.cpp

changeset 0
6474c204b198
equal deleted inserted replaced
-1:000000000000 0:0fb10e37a53a
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: sw=2 ts=8 et :
3 */
4 /* This Source Code Form is subject to the terms of the Mozilla Public
5 * License, v. 2.0. If a copy of the MPL was not distributed with this
6 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7
8 #include "ISurfaceAllocator.h"
9 #include <sys/types.h> // for int32_t
10 #include "gfx2DGlue.h" // for IntSize
11 #include "gfxPlatform.h" // for gfxPlatform, gfxImageFormat
12 #include "gfxSharedImageSurface.h" // for gfxSharedImageSurface
13 #include "mozilla/Assertions.h" // for MOZ_ASSERT, etc
14 #include "mozilla/Atomics.h" // for PrimitiveIntrinsics
15 #include "mozilla/ipc/SharedMemory.h" // for SharedMemory, etc
16 #include "mozilla/layers/LayersSurfaces.h" // for SurfaceDescriptor, etc
17 #include "ShadowLayerUtils.h"
18 #include "mozilla/mozalloc.h" // for operator delete[], etc
19 #include "nsAutoPtr.h" // for nsRefPtr, getter_AddRefs, etc
20 #include "nsDebug.h" // for NS_RUNTIMEABORT
21 #include "nsXULAppAPI.h" // for XRE_GetProcessType, etc
22 #include "mozilla/ipc/Shmem.h"
23 #include "mozilla/layers/ImageDataSerializer.h"
24 #ifdef DEBUG
25 #include "prenv.h"
26 #endif
27
28 using namespace mozilla::ipc;
29
30 namespace mozilla {
31 namespace layers {
32
33 NS_IMPL_ISUPPORTS(GfxMemoryImageReporter, nsIMemoryReporter)
34
35 mozilla::Atomic<size_t> GfxMemoryImageReporter::sAmount(0);
36
37 mozilla::ipc::SharedMemory::SharedMemoryType OptimalShmemType()
38 {
39 return mozilla::ipc::SharedMemory::TYPE_BASIC;
40 }
41
42 bool
43 IsSurfaceDescriptorValid(const SurfaceDescriptor& aSurface)
44 {
45 return aSurface.type() != SurfaceDescriptor::T__None &&
46 aSurface.type() != SurfaceDescriptor::Tnull_t;
47 }
48
49 ISurfaceAllocator::~ISurfaceAllocator()
50 {
51 // Check if we're not leaking..
52 MOZ_ASSERT(mUsedShmems.empty());
53 }
54
55 void
56 ISurfaceAllocator::Finalize()
57 {
58 ShrinkShmemSectionHeap();
59 }
60
61 static inline uint8_t*
62 GetAddressFromDescriptor(const SurfaceDescriptor& aDescriptor, size_t& aSize)
63 {
64 MOZ_ASSERT(IsSurfaceDescriptorValid(aDescriptor));
65 MOZ_ASSERT(aDescriptor.type() == SurfaceDescriptor::TSurfaceDescriptorShmem ||
66 aDescriptor.type() == SurfaceDescriptor::TSurfaceDescriptorMemory);
67 if (aDescriptor.type() == SurfaceDescriptor::TSurfaceDescriptorShmem) {
68 Shmem shmem(aDescriptor.get_SurfaceDescriptorShmem().data());
69 aSize = shmem.Size<uint8_t>();
70 return shmem.get<uint8_t>();
71 } else {
72 const SurfaceDescriptorMemory& image = aDescriptor.get_SurfaceDescriptorMemory();
73 aSize = std::numeric_limits<size_t>::max();
74 return reinterpret_cast<uint8_t*>(image.data());
75 }
76 }
77
78 TemporaryRef<gfx::DrawTarget>
79 GetDrawTargetForDescriptor(const SurfaceDescriptor& aDescriptor, gfx::BackendType aBackend)
80 {
81 size_t size;
82 uint8_t* data = GetAddressFromDescriptor(aDescriptor, size);
83 ImageDataDeserializer image(data, size);
84 return image.GetAsDrawTarget(aBackend);
85 }
86
87 TemporaryRef<gfx::DataSourceSurface>
88 GetSurfaceForDescriptor(const SurfaceDescriptor& aDescriptor)
89 {
90 size_t size;
91 uint8_t* data = GetAddressFromDescriptor(aDescriptor, size);
92 ImageDataDeserializer image(data, size);
93 return image.GetAsSurface();
94 }
95
96 bool
97 ISurfaceAllocator::AllocSurfaceDescriptor(const gfx::IntSize& aSize,
98 gfxContentType aContent,
99 SurfaceDescriptor* aBuffer)
100 {
101 return AllocSurfaceDescriptorWithCaps(aSize, aContent, DEFAULT_BUFFER_CAPS, aBuffer);
102 }
103
104 bool
105 ISurfaceAllocator::AllocSurfaceDescriptorWithCaps(const gfx::IntSize& aSize,
106 gfxContentType aContent,
107 uint32_t aCaps,
108 SurfaceDescriptor* aBuffer)
109 {
110 gfx::SurfaceFormat format =
111 gfxPlatform::GetPlatform()->Optimal2DFormatForContent(aContent);
112 size_t size = ImageDataSerializer::ComputeMinBufferSize(aSize, format);
113 if (gfxPlatform::GetPlatform()->PreferMemoryOverShmem()) {
114 uint8_t *data = new (std::nothrow) uint8_t[size];
115 if (!data) {
116 return false;
117 }
118 GfxMemoryImageReporter::DidAlloc(data);
119 #ifdef XP_MACOSX
120 // Workaround a bug in Quartz where drawing an a8 surface to another a8
121 // surface with OPERATOR_SOURCE still requires the destination to be clear.
122 if (format == gfx::SurfaceFormat::A8) {
123 memset(data, 0, size);
124 }
125 #endif
126 *aBuffer = SurfaceDescriptorMemory((uintptr_t)data, format);
127 } else {
128
129 mozilla::ipc::SharedMemory::SharedMemoryType shmemType = OptimalShmemType();
130 mozilla::ipc::Shmem shmem;
131 if (!AllocUnsafeShmem(size, shmemType, &shmem)) {
132 return false;
133 }
134
135 *aBuffer = SurfaceDescriptorShmem(shmem, format);
136 }
137
138 uint8_t* data = GetAddressFromDescriptor(*aBuffer, size);
139 ImageDataSerializer serializer(data, size);
140 serializer.InitializeBufferInfo(aSize, format);
141 return true;
142 }
143
144 /* static */ bool
145 ISurfaceAllocator::IsShmem(SurfaceDescriptor* aSurface)
146 {
147 return aSurface && (aSurface->type() == SurfaceDescriptor::TSurfaceDescriptorShmem);
148 }
149
150 void
151 ISurfaceAllocator::DestroySharedSurface(SurfaceDescriptor* aSurface)
152 {
153 MOZ_ASSERT(aSurface);
154 if (!aSurface) {
155 return;
156 }
157 if (!IPCOpen()) {
158 return;
159 }
160 switch (aSurface->type()) {
161 case SurfaceDescriptor::TSurfaceDescriptorShmem:
162 DeallocShmem(aSurface->get_SurfaceDescriptorShmem().data());
163 break;
164 case SurfaceDescriptor::TSurfaceDescriptorMemory:
165 GfxMemoryImageReporter::WillFree((uint8_t*)aSurface->get_SurfaceDescriptorMemory().data());
166 delete [] (uint8_t*)aSurface->get_SurfaceDescriptorMemory().data();
167 break;
168 case SurfaceDescriptor::Tnull_t:
169 case SurfaceDescriptor::T__None:
170 break;
171 default:
172 NS_RUNTIMEABORT("surface type not implemented!");
173 }
174 *aSurface = SurfaceDescriptor();
175 }
176
177 // XXX - We should actually figure out the minimum shmem allocation size on
178 // a certain platform and use that.
179 const uint32_t sShmemPageSize = 4096;
180 const uint32_t sSupportedBlockSize = 4;
181
182 enum AllocationStatus
183 {
184 STATUS_ALLOCATED,
185 STATUS_FREED
186 };
187
188 struct ShmemSectionHeapHeader
189 {
190 Atomic<uint32_t> mTotalBlocks;
191 Atomic<uint32_t> mAllocatedBlocks;
192 };
193
194 struct ShmemSectionHeapAllocation
195 {
196 Atomic<uint32_t> mStatus;
197 uint32_t mSize;
198 };
199
200 bool
201 ISurfaceAllocator::AllocShmemSection(size_t aSize, mozilla::layers::ShmemSection* aShmemSection)
202 {
203 // For now we only support sizes of 4. If we want to support different sizes
204 // some more complicated bookkeeping should be added.
205 MOZ_ASSERT(aSize == sSupportedBlockSize);
206 MOZ_ASSERT(aShmemSection);
207
208 uint32_t allocationSize = (aSize + sizeof(ShmemSectionHeapAllocation));
209
210 for (size_t i = 0; i < mUsedShmems.size(); i++) {
211 ShmemSectionHeapHeader* header = mUsedShmems[i].get<ShmemSectionHeapHeader>();
212 if ((header->mAllocatedBlocks + 1) * allocationSize + sizeof(ShmemSectionHeapHeader) < sShmemPageSize) {
213 aShmemSection->shmem() = mUsedShmems[i];
214 MOZ_ASSERT(mUsedShmems[i].IsWritable());
215 break;
216 }
217 }
218
219 if (!aShmemSection->shmem().IsWritable()) {
220 ipc::Shmem tmp;
221 if (!AllocUnsafeShmem(sShmemPageSize, ipc::SharedMemory::TYPE_BASIC, &tmp)) {
222 return false;
223 }
224
225 ShmemSectionHeapHeader* header = tmp.get<ShmemSectionHeapHeader>();
226 header->mTotalBlocks = 0;
227 header->mAllocatedBlocks = 0;
228
229 mUsedShmems.push_back(tmp);
230 aShmemSection->shmem() = tmp;
231 }
232
233 MOZ_ASSERT(aShmemSection->shmem().IsWritable());
234
235 ShmemSectionHeapHeader* header = aShmemSection->shmem().get<ShmemSectionHeapHeader>();
236 uint8_t* heap = aShmemSection->shmem().get<uint8_t>() + sizeof(ShmemSectionHeapHeader);
237
238 ShmemSectionHeapAllocation* allocHeader = nullptr;
239
240 if (header->mTotalBlocks > header->mAllocatedBlocks) {
241 // Search for the first available block.
242 for (size_t i = 0; i < header->mTotalBlocks; i++) {
243 allocHeader = reinterpret_cast<ShmemSectionHeapAllocation*>(heap);
244
245 if (allocHeader->mStatus == STATUS_FREED) {
246 break;
247 }
248 heap += allocationSize;
249 }
250 MOZ_ASSERT(allocHeader && allocHeader->mStatus == STATUS_FREED);
251 MOZ_ASSERT(allocHeader->mSize == sSupportedBlockSize);
252 } else {
253 heap += header->mTotalBlocks * allocationSize;
254
255 header->mTotalBlocks++;
256 allocHeader = reinterpret_cast<ShmemSectionHeapAllocation*>(heap);
257 allocHeader->mSize = aSize;
258 }
259
260 MOZ_ASSERT(allocHeader);
261 header->mAllocatedBlocks++;
262 allocHeader->mStatus = STATUS_ALLOCATED;
263
264 aShmemSection->size() = aSize;
265 aShmemSection->offset() = (heap + sizeof(ShmemSectionHeapAllocation)) - aShmemSection->shmem().get<uint8_t>();
266 ShrinkShmemSectionHeap();
267 return true;
268 }
269
270 void
271 ISurfaceAllocator::FreeShmemSection(mozilla::layers::ShmemSection& aShmemSection)
272 {
273 MOZ_ASSERT(aShmemSection.size() == sSupportedBlockSize);
274 MOZ_ASSERT(aShmemSection.offset() < sShmemPageSize - sSupportedBlockSize);
275
276 ShmemSectionHeapAllocation* allocHeader =
277 reinterpret_cast<ShmemSectionHeapAllocation*>(aShmemSection.shmem().get<char>() +
278 aShmemSection.offset() -
279 sizeof(ShmemSectionHeapAllocation));
280
281 MOZ_ASSERT(allocHeader->mSize == aShmemSection.size());
282
283 DebugOnly<bool> success = allocHeader->mStatus.compareExchange(STATUS_ALLOCATED, STATUS_FREED);
284 // If this fails something really weird is going on.
285 MOZ_ASSERT(success);
286
287 ShmemSectionHeapHeader* header = aShmemSection.shmem().get<ShmemSectionHeapHeader>();
288 header->mAllocatedBlocks--;
289
290 ShrinkShmemSectionHeap();
291 }
292
293 void
294 ISurfaceAllocator::ShrinkShmemSectionHeap()
295 {
296 for (size_t i = 0; i < mUsedShmems.size(); i++) {
297 ShmemSectionHeapHeader* header = mUsedShmems[i].get<ShmemSectionHeapHeader>();
298 if (header->mAllocatedBlocks == 0) {
299 DeallocShmem(mUsedShmems[i]);
300
301 // We don't particularly care about order, move the last one in the array
302 // to this position.
303 mUsedShmems[i] = mUsedShmems[mUsedShmems.size() - 1];
304 mUsedShmems.pop_back();
305 i--;
306 break;
307 }
308 }
309 }
310
311 } // namespace
312 } // namespace

mercurial