|
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
|
2 * vim: set ts=8 sts=4 et sw=4 tw=99: |
|
3 * This Source Code Form is subject to the terms of the Mozilla Public |
|
4 * License, v. 2.0. If a copy of the MPL was not distributed with this |
|
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
|
6 |
|
7 #ifndef ds_LifoAlloc_h |
|
8 #define ds_LifoAlloc_h |
|
9 |
|
10 #include "mozilla/DebugOnly.h" |
|
11 #include "mozilla/MathAlgorithms.h" |
|
12 #include "mozilla/MemoryChecking.h" |
|
13 #include "mozilla/MemoryReporting.h" |
|
14 #include "mozilla/PodOperations.h" |
|
15 #include "mozilla/TemplateLib.h" |
|
16 #include "mozilla/TypeTraits.h" |
|
17 |
|
18 // This data structure supports stacky LIFO allocation (mark/release and |
|
19 // LifoAllocScope). It does not maintain one contiguous segment; instead, it |
|
20 // maintains a bunch of linked memory segments. In order to prevent malloc/free |
|
21 // thrashing, unused segments are deallocated when garbage collection occurs. |
|
22 |
|
23 #include "jsutil.h" |
|
24 |
|
25 namespace js { |
|
26 |
|
27 namespace detail { |
|
28 |
|
29 static const size_t LIFO_ALLOC_ALIGN = 8; |
|
30 |
|
31 MOZ_ALWAYS_INLINE |
|
32 char * |
|
33 AlignPtr(void *orig) |
|
34 { |
|
35 static_assert(mozilla::tl::FloorLog2<LIFO_ALLOC_ALIGN>::value == |
|
36 mozilla::tl::CeilingLog2<LIFO_ALLOC_ALIGN>::value, |
|
37 "LIFO_ALLOC_ALIGN must be a power of two"); |
|
38 |
|
39 char *result = (char *) ((uintptr_t(orig) + (LIFO_ALLOC_ALIGN - 1)) & (~LIFO_ALLOC_ALIGN + 1)); |
|
40 JS_ASSERT(uintptr_t(result) % LIFO_ALLOC_ALIGN == 0); |
|
41 return result; |
|
42 } |
|
43 |
|
44 // Header for a chunk of memory wrangled by the LifoAlloc. |
|
45 class BumpChunk |
|
46 { |
|
47 char *bump; // start of the available data |
|
48 char *limit; // end of the data |
|
49 BumpChunk *next_; // the next BumpChunk |
|
50 size_t bumpSpaceSize; // size of the data area |
|
51 |
|
52 char *headerBase() { return reinterpret_cast<char *>(this); } |
|
53 char *bumpBase() const { return limit - bumpSpaceSize; } |
|
54 |
|
55 explicit BumpChunk(size_t bumpSpaceSize) |
|
56 : bump(reinterpret_cast<char *>(MOZ_THIS_IN_INITIALIZER_LIST()) + sizeof(BumpChunk)), |
|
57 limit(bump + bumpSpaceSize), |
|
58 next_(nullptr), bumpSpaceSize(bumpSpaceSize) |
|
59 { |
|
60 JS_ASSERT(bump == AlignPtr(bump)); |
|
61 } |
|
62 |
|
63 void setBump(void *ptr) { |
|
64 JS_ASSERT(bumpBase() <= ptr); |
|
65 JS_ASSERT(ptr <= limit); |
|
66 #if defined(DEBUG) || defined(MOZ_HAVE_MEM_CHECKS) |
|
67 char* prevBump = bump; |
|
68 #endif |
|
69 bump = static_cast<char *>(ptr); |
|
70 #ifdef DEBUG |
|
71 JS_ASSERT(contains(prevBump)); |
|
72 |
|
73 // Clobber the now-free space. |
|
74 if (prevBump > bump) |
|
75 memset(bump, 0xcd, prevBump - bump); |
|
76 #endif |
|
77 |
|
78 // Poison/Unpoison memory that we just free'd/allocated. |
|
79 #if defined(MOZ_HAVE_MEM_CHECKS) |
|
80 if (prevBump > bump) |
|
81 MOZ_MAKE_MEM_NOACCESS(bump, prevBump - bump); |
|
82 else if (bump > prevBump) |
|
83 MOZ_MAKE_MEM_UNDEFINED(prevBump, bump - prevBump); |
|
84 #endif |
|
85 } |
|
86 |
|
87 public: |
|
88 BumpChunk *next() const { return next_; } |
|
89 void setNext(BumpChunk *succ) { next_ = succ; } |
|
90 |
|
91 size_t used() const { return bump - bumpBase(); } |
|
92 |
|
93 void *start() const { return bumpBase(); } |
|
94 void *end() const { return limit; } |
|
95 |
|
96 size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) { |
|
97 return mallocSizeOf(this); |
|
98 } |
|
99 |
|
100 size_t computedSizeOfIncludingThis() { |
|
101 return limit - headerBase(); |
|
102 } |
|
103 |
|
104 void resetBump() { |
|
105 setBump(headerBase() + sizeof(BumpChunk)); |
|
106 } |
|
107 |
|
108 void *mark() const { return bump; } |
|
109 |
|
110 void release(void *mark) { |
|
111 JS_ASSERT(contains(mark)); |
|
112 JS_ASSERT(mark <= bump); |
|
113 setBump(mark); |
|
114 } |
|
115 |
|
116 bool contains(void *mark) const { |
|
117 return bumpBase() <= mark && mark <= limit; |
|
118 } |
|
119 |
|
120 bool canAlloc(size_t n); |
|
121 |
|
122 size_t unused() { |
|
123 return limit - AlignPtr(bump); |
|
124 } |
|
125 |
|
126 // Try to perform an allocation of size |n|, return null if not possible. |
|
127 MOZ_ALWAYS_INLINE |
|
128 void *tryAlloc(size_t n) { |
|
129 char *aligned = AlignPtr(bump); |
|
130 char *newBump = aligned + n; |
|
131 |
|
132 if (newBump > limit) |
|
133 return nullptr; |
|
134 |
|
135 // Check for overflow. |
|
136 if (MOZ_UNLIKELY(newBump < bump)) |
|
137 return nullptr; |
|
138 |
|
139 JS_ASSERT(canAlloc(n)); // Ensure consistency between "can" and "try". |
|
140 setBump(newBump); |
|
141 return aligned; |
|
142 } |
|
143 |
|
144 static BumpChunk *new_(size_t chunkSize); |
|
145 static void delete_(BumpChunk *chunk); |
|
146 }; |
|
147 |
|
148 } // namespace detail |
|
149 |
|
150 // LIFO bump allocator: used for phase-oriented and fast LIFO allocations. |
|
151 // |
|
152 // Note: |latest| is not necessary "last". We leave BumpChunks latent in the |
|
153 // chain after they've been released to avoid thrashing before a GC. |
|
154 class LifoAlloc |
|
155 { |
|
156 typedef detail::BumpChunk BumpChunk; |
|
157 |
|
158 BumpChunk *first; |
|
159 BumpChunk *latest; |
|
160 BumpChunk *last; |
|
161 size_t markCount; |
|
162 size_t defaultChunkSize_; |
|
163 size_t curSize_; |
|
164 size_t peakSize_; |
|
165 |
|
166 void operator=(const LifoAlloc &) MOZ_DELETE; |
|
167 LifoAlloc(const LifoAlloc &) MOZ_DELETE; |
|
168 |
|
169 // Return a BumpChunk that can perform an allocation of at least size |n| |
|
170 // and add it to the chain appropriately. |
|
171 // |
|
172 // Side effect: if retval is non-null, |first| and |latest| are initialized |
|
173 // appropriately. |
|
174 BumpChunk *getOrCreateChunk(size_t n); |
|
175 |
|
176 void reset(size_t defaultChunkSize) { |
|
177 JS_ASSERT(mozilla::RoundUpPow2(defaultChunkSize) == defaultChunkSize); |
|
178 first = latest = last = nullptr; |
|
179 defaultChunkSize_ = defaultChunkSize; |
|
180 markCount = 0; |
|
181 curSize_ = 0; |
|
182 } |
|
183 |
|
184 // Append unused chunks to the end of this LifoAlloc. |
|
185 void appendUnused(BumpChunk *start, BumpChunk *end) { |
|
186 JS_ASSERT(start && end); |
|
187 if (last) |
|
188 last->setNext(start); |
|
189 else |
|
190 first = latest = start; |
|
191 last = end; |
|
192 } |
|
193 |
|
194 // Append used chunks to the end of this LifoAlloc. We act as if all the |
|
195 // chunks in |this| are used, even if they're not, so memory may be wasted. |
|
196 void appendUsed(BumpChunk *start, BumpChunk *latest, BumpChunk *end) { |
|
197 JS_ASSERT(start && latest && end); |
|
198 if (last) |
|
199 last->setNext(start); |
|
200 else |
|
201 first = latest = start; |
|
202 last = end; |
|
203 this->latest = latest; |
|
204 } |
|
205 |
|
206 void incrementCurSize(size_t size) { |
|
207 curSize_ += size; |
|
208 if (curSize_ > peakSize_) |
|
209 peakSize_ = curSize_; |
|
210 } |
|
211 void decrementCurSize(size_t size) { |
|
212 JS_ASSERT(curSize_ >= size); |
|
213 curSize_ -= size; |
|
214 } |
|
215 |
|
216 public: |
|
217 explicit LifoAlloc(size_t defaultChunkSize) |
|
218 : peakSize_(0) |
|
219 { |
|
220 reset(defaultChunkSize); |
|
221 } |
|
222 |
|
223 // Steal allocated chunks from |other|. |
|
224 void steal(LifoAlloc *other) { |
|
225 JS_ASSERT(!other->markCount); |
|
226 |
|
227 // Copy everything from |other| to |this| except for |peakSize_|, which |
|
228 // requires some care. |
|
229 size_t oldPeakSize = peakSize_; |
|
230 mozilla::PodAssign(this, other); |
|
231 peakSize_ = Max(oldPeakSize, curSize_); |
|
232 |
|
233 other->reset(defaultChunkSize_); |
|
234 } |
|
235 |
|
236 // Append all chunks from |other|. They are removed from |other|. |
|
237 void transferFrom(LifoAlloc *other); |
|
238 |
|
239 // Append unused chunks from |other|. They are removed from |other|. |
|
240 void transferUnusedFrom(LifoAlloc *other); |
|
241 |
|
242 ~LifoAlloc() { freeAll(); } |
|
243 |
|
244 size_t defaultChunkSize() const { return defaultChunkSize_; } |
|
245 |
|
246 // Frees all held memory. |
|
247 void freeAll(); |
|
248 |
|
249 static const unsigned HUGE_ALLOCATION = 50 * 1024 * 1024; |
|
250 void freeAllIfHugeAndUnused() { |
|
251 if (markCount == 0 && curSize_ > HUGE_ALLOCATION) |
|
252 freeAll(); |
|
253 } |
|
254 |
|
255 MOZ_ALWAYS_INLINE |
|
256 void *alloc(size_t n) { |
|
257 JS_OOM_POSSIBLY_FAIL(); |
|
258 |
|
259 void *result; |
|
260 if (latest && (result = latest->tryAlloc(n))) |
|
261 return result; |
|
262 |
|
263 if (!getOrCreateChunk(n)) |
|
264 return nullptr; |
|
265 |
|
266 // Since we just created a large enough chunk, this can't fail. |
|
267 result = latest->tryAlloc(n); |
|
268 MOZ_ASSERT(result); |
|
269 return result; |
|
270 } |
|
271 |
|
272 // Ensures that enough space exists to satisfy N bytes worth of |
|
273 // allocation requests, not necessarily contiguous. Note that this does |
|
274 // not guarantee a successful single allocation of N bytes. |
|
275 MOZ_ALWAYS_INLINE |
|
276 bool ensureUnusedApproximate(size_t n) { |
|
277 size_t total = 0; |
|
278 for (BumpChunk *chunk = latest; chunk; chunk = chunk->next()) { |
|
279 total += chunk->unused(); |
|
280 if (total >= n) |
|
281 return true; |
|
282 } |
|
283 BumpChunk *latestBefore = latest; |
|
284 if (!getOrCreateChunk(n)) |
|
285 return false; |
|
286 if (latestBefore) |
|
287 latest = latestBefore; |
|
288 return true; |
|
289 } |
|
290 |
|
291 template <typename T> |
|
292 T *newArray(size_t count) { |
|
293 JS_STATIC_ASSERT(mozilla::IsPod<T>::value); |
|
294 return newArrayUninitialized<T>(count); |
|
295 } |
|
296 |
|
297 // Create an array with uninitialized elements of type |T|. |
|
298 // The caller is responsible for initialization. |
|
299 template <typename T> |
|
300 T *newArrayUninitialized(size_t count) { |
|
301 if (count & mozilla::tl::MulOverflowMask<sizeof(T)>::value) |
|
302 return nullptr; |
|
303 return static_cast<T *>(alloc(sizeof(T) * count)); |
|
304 } |
|
305 |
|
306 class Mark { |
|
307 BumpChunk *chunk; |
|
308 void *markInChunk; |
|
309 friend class LifoAlloc; |
|
310 Mark(BumpChunk *chunk, void *markInChunk) : chunk(chunk), markInChunk(markInChunk) {} |
|
311 public: |
|
312 Mark() : chunk(nullptr), markInChunk(nullptr) {} |
|
313 }; |
|
314 |
|
315 Mark mark() { |
|
316 markCount++; |
|
317 return latest ? Mark(latest, latest->mark()) : Mark(); |
|
318 } |
|
319 |
|
320 void release(Mark mark) { |
|
321 markCount--; |
|
322 if (!mark.chunk) { |
|
323 latest = first; |
|
324 if (latest) |
|
325 latest->resetBump(); |
|
326 } else { |
|
327 latest = mark.chunk; |
|
328 latest->release(mark.markInChunk); |
|
329 } |
|
330 } |
|
331 |
|
332 void releaseAll() { |
|
333 JS_ASSERT(!markCount); |
|
334 latest = first; |
|
335 if (latest) |
|
336 latest->resetBump(); |
|
337 } |
|
338 |
|
339 // Get the total "used" (occupied bytes) count for the arena chunks. |
|
340 size_t used() const { |
|
341 size_t accum = 0; |
|
342 for (BumpChunk *chunk = first; chunk; chunk = chunk->next()) { |
|
343 accum += chunk->used(); |
|
344 if (chunk == latest) |
|
345 break; |
|
346 } |
|
347 return accum; |
|
348 } |
|
349 |
|
350 // Return true if the LifoAlloc does not currently contain any allocations. |
|
351 bool isEmpty() const { |
|
352 return !latest || !latest->used(); |
|
353 } |
|
354 |
|
355 // Return the number of bytes remaining to allocate in the current chunk. |
|
356 // e.g. How many bytes we can allocate before needing a new block. |
|
357 size_t availableInCurrentChunk() const { |
|
358 if (!latest) |
|
359 return 0; |
|
360 return latest->unused(); |
|
361 } |
|
362 |
|
363 // Get the total size of the arena chunks (including unused space). |
|
364 size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const { |
|
365 size_t n = 0; |
|
366 for (BumpChunk *chunk = first; chunk; chunk = chunk->next()) |
|
367 n += chunk->sizeOfIncludingThis(mallocSizeOf); |
|
368 return n; |
|
369 } |
|
370 |
|
371 // Like sizeOfExcludingThis(), but includes the size of the LifoAlloc itself. |
|
372 size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const { |
|
373 return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf); |
|
374 } |
|
375 |
|
376 // Get the peak size of the arena chunks (including unused space and |
|
377 // bookkeeping space). |
|
378 size_t peakSizeOfExcludingThis() const { return peakSize_; } |
|
379 |
|
380 // Doesn't perform construction; useful for lazily-initialized POD types. |
|
381 template <typename T> |
|
382 MOZ_ALWAYS_INLINE |
|
383 T *newPod() { |
|
384 return static_cast<T *>(alloc(sizeof(T))); |
|
385 } |
|
386 |
|
387 JS_DECLARE_NEW_METHODS(new_, alloc, MOZ_ALWAYS_INLINE) |
|
388 |
|
389 // A mutable enumeration of the allocated data. |
|
390 class Enum |
|
391 { |
|
392 friend class LifoAlloc; |
|
393 friend class detail::BumpChunk; |
|
394 |
|
395 LifoAlloc *alloc_; // The LifoAlloc being traversed. |
|
396 BumpChunk *chunk_; // The current chunk. |
|
397 char *position_; // The current position (must be within chunk_). |
|
398 |
|
399 // If there is not enough room in the remaining block for |size|, |
|
400 // advance to the next block and update the position. |
|
401 void ensureSpaceAndAlignment(size_t size) { |
|
402 JS_ASSERT(!empty()); |
|
403 char *aligned = detail::AlignPtr(position_); |
|
404 if (aligned + size > chunk_->end()) { |
|
405 chunk_ = chunk_->next(); |
|
406 position_ = static_cast<char *>(chunk_->start()); |
|
407 } else { |
|
408 position_ = aligned; |
|
409 } |
|
410 JS_ASSERT(uintptr_t(position_) + size <= uintptr_t(chunk_->end())); |
|
411 } |
|
412 |
|
413 public: |
|
414 Enum(LifoAlloc &alloc) |
|
415 : alloc_(&alloc), |
|
416 chunk_(alloc.first), |
|
417 position_(static_cast<char *>(alloc.first ? alloc.first->start() : nullptr)) |
|
418 {} |
|
419 |
|
420 // Return true if there are no more bytes to enumerate. |
|
421 bool empty() { |
|
422 return !chunk_ || (chunk_ == alloc_->latest && position_ >= chunk_->mark()); |
|
423 } |
|
424 |
|
425 // Move the read position forward by the size of one T. |
|
426 template <typename T> |
|
427 void popFront() { |
|
428 popFront(sizeof(T)); |
|
429 } |
|
430 |
|
431 // Move the read position forward by |size| bytes. |
|
432 void popFront(size_t size) { |
|
433 ensureSpaceAndAlignment(size); |
|
434 position_ = position_ + size; |
|
435 } |
|
436 |
|
437 // Update the bytes at the current position with a new value. |
|
438 template <typename T> |
|
439 void updateFront(const T &t) { |
|
440 ensureSpaceAndAlignment(sizeof(T)); |
|
441 memmove(position_, &t, sizeof(T)); |
|
442 } |
|
443 |
|
444 // Return a pointer to the item at the current position. This |
|
445 // returns a pointer to the inline storage, not a copy. |
|
446 template <typename T> |
|
447 T *get(size_t size = sizeof(T)) { |
|
448 ensureSpaceAndAlignment(size); |
|
449 return reinterpret_cast<T *>(position_); |
|
450 } |
|
451 |
|
452 // Return a Mark at the current position of the Enum. |
|
453 Mark mark() { |
|
454 alloc_->markCount++; |
|
455 return Mark(chunk_, position_); |
|
456 } |
|
457 }; |
|
458 }; |
|
459 |
|
460 class LifoAllocScope |
|
461 { |
|
462 LifoAlloc *lifoAlloc; |
|
463 LifoAlloc::Mark mark; |
|
464 bool shouldRelease; |
|
465 MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER |
|
466 |
|
467 public: |
|
468 explicit LifoAllocScope(LifoAlloc *lifoAlloc |
|
469 MOZ_GUARD_OBJECT_NOTIFIER_PARAM) |
|
470 : lifoAlloc(lifoAlloc), |
|
471 mark(lifoAlloc->mark()), |
|
472 shouldRelease(true) |
|
473 { |
|
474 MOZ_GUARD_OBJECT_NOTIFIER_INIT; |
|
475 } |
|
476 |
|
477 ~LifoAllocScope() { |
|
478 if (shouldRelease) |
|
479 lifoAlloc->release(mark); |
|
480 } |
|
481 |
|
482 LifoAlloc &alloc() { |
|
483 return *lifoAlloc; |
|
484 } |
|
485 |
|
486 void releaseEarly() { |
|
487 JS_ASSERT(shouldRelease); |
|
488 lifoAlloc->release(mark); |
|
489 shouldRelease = false; |
|
490 } |
|
491 }; |
|
492 |
|
493 class LifoAllocPolicy |
|
494 { |
|
495 LifoAlloc &alloc_; |
|
496 |
|
497 public: |
|
498 LifoAllocPolicy(LifoAlloc &alloc) |
|
499 : alloc_(alloc) |
|
500 {} |
|
501 void *malloc_(size_t bytes) { |
|
502 return alloc_.alloc(bytes); |
|
503 } |
|
504 void *calloc_(size_t bytes) { |
|
505 void *p = malloc_(bytes); |
|
506 if (p) |
|
507 memset(p, 0, bytes); |
|
508 return p; |
|
509 } |
|
510 void *realloc_(void *p, size_t oldBytes, size_t bytes) { |
|
511 void *n = malloc_(bytes); |
|
512 if (!n) |
|
513 return n; |
|
514 memcpy(n, p, Min(oldBytes, bytes)); |
|
515 return n; |
|
516 } |
|
517 void free_(void *p) { |
|
518 } |
|
519 void reportAllocOverflow() const { |
|
520 } |
|
521 }; |
|
522 |
|
523 } // namespace js |
|
524 |
|
525 #endif /* ds_LifoAlloc_h */ |