Sat, 03 Jan 2015 20:18:00 +0100
Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #ifndef ds_LifoAlloc_h
8 #define ds_LifoAlloc_h
10 #include "mozilla/DebugOnly.h"
11 #include "mozilla/MathAlgorithms.h"
12 #include "mozilla/MemoryChecking.h"
13 #include "mozilla/MemoryReporting.h"
14 #include "mozilla/PodOperations.h"
15 #include "mozilla/TemplateLib.h"
16 #include "mozilla/TypeTraits.h"
18 // This data structure supports stacky LIFO allocation (mark/release and
19 // LifoAllocScope). It does not maintain one contiguous segment; instead, it
20 // maintains a bunch of linked memory segments. In order to prevent malloc/free
21 // thrashing, unused segments are deallocated when garbage collection occurs.
23 #include "jsutil.h"
25 namespace js {
27 namespace detail {
29 static const size_t LIFO_ALLOC_ALIGN = 8;
31 MOZ_ALWAYS_INLINE
32 char *
33 AlignPtr(void *orig)
34 {
35 static_assert(mozilla::tl::FloorLog2<LIFO_ALLOC_ALIGN>::value ==
36 mozilla::tl::CeilingLog2<LIFO_ALLOC_ALIGN>::value,
37 "LIFO_ALLOC_ALIGN must be a power of two");
39 char *result = (char *) ((uintptr_t(orig) + (LIFO_ALLOC_ALIGN - 1)) & (~LIFO_ALLOC_ALIGN + 1));
40 JS_ASSERT(uintptr_t(result) % LIFO_ALLOC_ALIGN == 0);
41 return result;
42 }
44 // Header for a chunk of memory wrangled by the LifoAlloc.
45 class BumpChunk
46 {
47 char *bump; // start of the available data
48 char *limit; // end of the data
49 BumpChunk *next_; // the next BumpChunk
50 size_t bumpSpaceSize; // size of the data area
52 char *headerBase() { return reinterpret_cast<char *>(this); }
53 char *bumpBase() const { return limit - bumpSpaceSize; }
55 explicit BumpChunk(size_t bumpSpaceSize)
56 : bump(reinterpret_cast<char *>(MOZ_THIS_IN_INITIALIZER_LIST()) + sizeof(BumpChunk)),
57 limit(bump + bumpSpaceSize),
58 next_(nullptr), bumpSpaceSize(bumpSpaceSize)
59 {
60 JS_ASSERT(bump == AlignPtr(bump));
61 }
63 void setBump(void *ptr) {
64 JS_ASSERT(bumpBase() <= ptr);
65 JS_ASSERT(ptr <= limit);
66 #if defined(DEBUG) || defined(MOZ_HAVE_MEM_CHECKS)
67 char* prevBump = bump;
68 #endif
69 bump = static_cast<char *>(ptr);
70 #ifdef DEBUG
71 JS_ASSERT(contains(prevBump));
73 // Clobber the now-free space.
74 if (prevBump > bump)
75 memset(bump, 0xcd, prevBump - bump);
76 #endif
78 // Poison/Unpoison memory that we just free'd/allocated.
79 #if defined(MOZ_HAVE_MEM_CHECKS)
80 if (prevBump > bump)
81 MOZ_MAKE_MEM_NOACCESS(bump, prevBump - bump);
82 else if (bump > prevBump)
83 MOZ_MAKE_MEM_UNDEFINED(prevBump, bump - prevBump);
84 #endif
85 }
87 public:
88 BumpChunk *next() const { return next_; }
89 void setNext(BumpChunk *succ) { next_ = succ; }
91 size_t used() const { return bump - bumpBase(); }
93 void *start() const { return bumpBase(); }
94 void *end() const { return limit; }
96 size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) {
97 return mallocSizeOf(this);
98 }
100 size_t computedSizeOfIncludingThis() {
101 return limit - headerBase();
102 }
104 void resetBump() {
105 setBump(headerBase() + sizeof(BumpChunk));
106 }
108 void *mark() const { return bump; }
110 void release(void *mark) {
111 JS_ASSERT(contains(mark));
112 JS_ASSERT(mark <= bump);
113 setBump(mark);
114 }
116 bool contains(void *mark) const {
117 return bumpBase() <= mark && mark <= limit;
118 }
120 bool canAlloc(size_t n);
122 size_t unused() {
123 return limit - AlignPtr(bump);
124 }
126 // Try to perform an allocation of size |n|, return null if not possible.
127 MOZ_ALWAYS_INLINE
128 void *tryAlloc(size_t n) {
129 char *aligned = AlignPtr(bump);
130 char *newBump = aligned + n;
132 if (newBump > limit)
133 return nullptr;
135 // Check for overflow.
136 if (MOZ_UNLIKELY(newBump < bump))
137 return nullptr;
139 JS_ASSERT(canAlloc(n)); // Ensure consistency between "can" and "try".
140 setBump(newBump);
141 return aligned;
142 }
144 static BumpChunk *new_(size_t chunkSize);
145 static void delete_(BumpChunk *chunk);
146 };
148 } // namespace detail
150 // LIFO bump allocator: used for phase-oriented and fast LIFO allocations.
151 //
152 // Note: |latest| is not necessary "last". We leave BumpChunks latent in the
153 // chain after they've been released to avoid thrashing before a GC.
154 class LifoAlloc
155 {
156 typedef detail::BumpChunk BumpChunk;
158 BumpChunk *first;
159 BumpChunk *latest;
160 BumpChunk *last;
161 size_t markCount;
162 size_t defaultChunkSize_;
163 size_t curSize_;
164 size_t peakSize_;
166 void operator=(const LifoAlloc &) MOZ_DELETE;
167 LifoAlloc(const LifoAlloc &) MOZ_DELETE;
169 // Return a BumpChunk that can perform an allocation of at least size |n|
170 // and add it to the chain appropriately.
171 //
172 // Side effect: if retval is non-null, |first| and |latest| are initialized
173 // appropriately.
174 BumpChunk *getOrCreateChunk(size_t n);
176 void reset(size_t defaultChunkSize) {
177 JS_ASSERT(mozilla::RoundUpPow2(defaultChunkSize) == defaultChunkSize);
178 first = latest = last = nullptr;
179 defaultChunkSize_ = defaultChunkSize;
180 markCount = 0;
181 curSize_ = 0;
182 }
184 // Append unused chunks to the end of this LifoAlloc.
185 void appendUnused(BumpChunk *start, BumpChunk *end) {
186 JS_ASSERT(start && end);
187 if (last)
188 last->setNext(start);
189 else
190 first = latest = start;
191 last = end;
192 }
194 // Append used chunks to the end of this LifoAlloc. We act as if all the
195 // chunks in |this| are used, even if they're not, so memory may be wasted.
196 void appendUsed(BumpChunk *start, BumpChunk *latest, BumpChunk *end) {
197 JS_ASSERT(start && latest && end);
198 if (last)
199 last->setNext(start);
200 else
201 first = latest = start;
202 last = end;
203 this->latest = latest;
204 }
206 void incrementCurSize(size_t size) {
207 curSize_ += size;
208 if (curSize_ > peakSize_)
209 peakSize_ = curSize_;
210 }
211 void decrementCurSize(size_t size) {
212 JS_ASSERT(curSize_ >= size);
213 curSize_ -= size;
214 }
216 public:
217 explicit LifoAlloc(size_t defaultChunkSize)
218 : peakSize_(0)
219 {
220 reset(defaultChunkSize);
221 }
223 // Steal allocated chunks from |other|.
224 void steal(LifoAlloc *other) {
225 JS_ASSERT(!other->markCount);
227 // Copy everything from |other| to |this| except for |peakSize_|, which
228 // requires some care.
229 size_t oldPeakSize = peakSize_;
230 mozilla::PodAssign(this, other);
231 peakSize_ = Max(oldPeakSize, curSize_);
233 other->reset(defaultChunkSize_);
234 }
236 // Append all chunks from |other|. They are removed from |other|.
237 void transferFrom(LifoAlloc *other);
239 // Append unused chunks from |other|. They are removed from |other|.
240 void transferUnusedFrom(LifoAlloc *other);
242 ~LifoAlloc() { freeAll(); }
244 size_t defaultChunkSize() const { return defaultChunkSize_; }
246 // Frees all held memory.
247 void freeAll();
249 static const unsigned HUGE_ALLOCATION = 50 * 1024 * 1024;
250 void freeAllIfHugeAndUnused() {
251 if (markCount == 0 && curSize_ > HUGE_ALLOCATION)
252 freeAll();
253 }
255 MOZ_ALWAYS_INLINE
256 void *alloc(size_t n) {
257 JS_OOM_POSSIBLY_FAIL();
259 void *result;
260 if (latest && (result = latest->tryAlloc(n)))
261 return result;
263 if (!getOrCreateChunk(n))
264 return nullptr;
266 // Since we just created a large enough chunk, this can't fail.
267 result = latest->tryAlloc(n);
268 MOZ_ASSERT(result);
269 return result;
270 }
272 // Ensures that enough space exists to satisfy N bytes worth of
273 // allocation requests, not necessarily contiguous. Note that this does
274 // not guarantee a successful single allocation of N bytes.
275 MOZ_ALWAYS_INLINE
276 bool ensureUnusedApproximate(size_t n) {
277 size_t total = 0;
278 for (BumpChunk *chunk = latest; chunk; chunk = chunk->next()) {
279 total += chunk->unused();
280 if (total >= n)
281 return true;
282 }
283 BumpChunk *latestBefore = latest;
284 if (!getOrCreateChunk(n))
285 return false;
286 if (latestBefore)
287 latest = latestBefore;
288 return true;
289 }
291 template <typename T>
292 T *newArray(size_t count) {
293 JS_STATIC_ASSERT(mozilla::IsPod<T>::value);
294 return newArrayUninitialized<T>(count);
295 }
297 // Create an array with uninitialized elements of type |T|.
298 // The caller is responsible for initialization.
299 template <typename T>
300 T *newArrayUninitialized(size_t count) {
301 if (count & mozilla::tl::MulOverflowMask<sizeof(T)>::value)
302 return nullptr;
303 return static_cast<T *>(alloc(sizeof(T) * count));
304 }
306 class Mark {
307 BumpChunk *chunk;
308 void *markInChunk;
309 friend class LifoAlloc;
310 Mark(BumpChunk *chunk, void *markInChunk) : chunk(chunk), markInChunk(markInChunk) {}
311 public:
312 Mark() : chunk(nullptr), markInChunk(nullptr) {}
313 };
315 Mark mark() {
316 markCount++;
317 return latest ? Mark(latest, latest->mark()) : Mark();
318 }
320 void release(Mark mark) {
321 markCount--;
322 if (!mark.chunk) {
323 latest = first;
324 if (latest)
325 latest->resetBump();
326 } else {
327 latest = mark.chunk;
328 latest->release(mark.markInChunk);
329 }
330 }
332 void releaseAll() {
333 JS_ASSERT(!markCount);
334 latest = first;
335 if (latest)
336 latest->resetBump();
337 }
339 // Get the total "used" (occupied bytes) count for the arena chunks.
340 size_t used() const {
341 size_t accum = 0;
342 for (BumpChunk *chunk = first; chunk; chunk = chunk->next()) {
343 accum += chunk->used();
344 if (chunk == latest)
345 break;
346 }
347 return accum;
348 }
350 // Return true if the LifoAlloc does not currently contain any allocations.
351 bool isEmpty() const {
352 return !latest || !latest->used();
353 }
355 // Return the number of bytes remaining to allocate in the current chunk.
356 // e.g. How many bytes we can allocate before needing a new block.
357 size_t availableInCurrentChunk() const {
358 if (!latest)
359 return 0;
360 return latest->unused();
361 }
363 // Get the total size of the arena chunks (including unused space).
364 size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
365 size_t n = 0;
366 for (BumpChunk *chunk = first; chunk; chunk = chunk->next())
367 n += chunk->sizeOfIncludingThis(mallocSizeOf);
368 return n;
369 }
371 // Like sizeOfExcludingThis(), but includes the size of the LifoAlloc itself.
372 size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
373 return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf);
374 }
376 // Get the peak size of the arena chunks (including unused space and
377 // bookkeeping space).
378 size_t peakSizeOfExcludingThis() const { return peakSize_; }
380 // Doesn't perform construction; useful for lazily-initialized POD types.
381 template <typename T>
382 MOZ_ALWAYS_INLINE
383 T *newPod() {
384 return static_cast<T *>(alloc(sizeof(T)));
385 }
387 JS_DECLARE_NEW_METHODS(new_, alloc, MOZ_ALWAYS_INLINE)
389 // A mutable enumeration of the allocated data.
390 class Enum
391 {
392 friend class LifoAlloc;
393 friend class detail::BumpChunk;
395 LifoAlloc *alloc_; // The LifoAlloc being traversed.
396 BumpChunk *chunk_; // The current chunk.
397 char *position_; // The current position (must be within chunk_).
399 // If there is not enough room in the remaining block for |size|,
400 // advance to the next block and update the position.
401 void ensureSpaceAndAlignment(size_t size) {
402 JS_ASSERT(!empty());
403 char *aligned = detail::AlignPtr(position_);
404 if (aligned + size > chunk_->end()) {
405 chunk_ = chunk_->next();
406 position_ = static_cast<char *>(chunk_->start());
407 } else {
408 position_ = aligned;
409 }
410 JS_ASSERT(uintptr_t(position_) + size <= uintptr_t(chunk_->end()));
411 }
413 public:
414 Enum(LifoAlloc &alloc)
415 : alloc_(&alloc),
416 chunk_(alloc.first),
417 position_(static_cast<char *>(alloc.first ? alloc.first->start() : nullptr))
418 {}
420 // Return true if there are no more bytes to enumerate.
421 bool empty() {
422 return !chunk_ || (chunk_ == alloc_->latest && position_ >= chunk_->mark());
423 }
425 // Move the read position forward by the size of one T.
426 template <typename T>
427 void popFront() {
428 popFront(sizeof(T));
429 }
431 // Move the read position forward by |size| bytes.
432 void popFront(size_t size) {
433 ensureSpaceAndAlignment(size);
434 position_ = position_ + size;
435 }
437 // Update the bytes at the current position with a new value.
438 template <typename T>
439 void updateFront(const T &t) {
440 ensureSpaceAndAlignment(sizeof(T));
441 memmove(position_, &t, sizeof(T));
442 }
444 // Return a pointer to the item at the current position. This
445 // returns a pointer to the inline storage, not a copy.
446 template <typename T>
447 T *get(size_t size = sizeof(T)) {
448 ensureSpaceAndAlignment(size);
449 return reinterpret_cast<T *>(position_);
450 }
452 // Return a Mark at the current position of the Enum.
453 Mark mark() {
454 alloc_->markCount++;
455 return Mark(chunk_, position_);
456 }
457 };
458 };
460 class LifoAllocScope
461 {
462 LifoAlloc *lifoAlloc;
463 LifoAlloc::Mark mark;
464 bool shouldRelease;
465 MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
467 public:
468 explicit LifoAllocScope(LifoAlloc *lifoAlloc
469 MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
470 : lifoAlloc(lifoAlloc),
471 mark(lifoAlloc->mark()),
472 shouldRelease(true)
473 {
474 MOZ_GUARD_OBJECT_NOTIFIER_INIT;
475 }
477 ~LifoAllocScope() {
478 if (shouldRelease)
479 lifoAlloc->release(mark);
480 }
482 LifoAlloc &alloc() {
483 return *lifoAlloc;
484 }
486 void releaseEarly() {
487 JS_ASSERT(shouldRelease);
488 lifoAlloc->release(mark);
489 shouldRelease = false;
490 }
491 };
493 class LifoAllocPolicy
494 {
495 LifoAlloc &alloc_;
497 public:
498 LifoAllocPolicy(LifoAlloc &alloc)
499 : alloc_(alloc)
500 {}
501 void *malloc_(size_t bytes) {
502 return alloc_.alloc(bytes);
503 }
504 void *calloc_(size_t bytes) {
505 void *p = malloc_(bytes);
506 if (p)
507 memset(p, 0, bytes);
508 return p;
509 }
510 void *realloc_(void *p, size_t oldBytes, size_t bytes) {
511 void *n = malloc_(bytes);
512 if (!n)
513 return n;
514 memcpy(n, p, Min(oldBytes, bytes));
515 return n;
516 }
517 void free_(void *p) {
518 }
519 void reportAllocOverflow() const {
520 }
521 };
523 } // namespace js
525 #endif /* ds_LifoAlloc_h */