toolkit/crashreporter/google-breakpad/src/common/memory.h

Sat, 03 Jan 2015 20:18:00 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Sat, 03 Jan 2015 20:18:00 +0100
branch
TOR_BUG_3246
changeset 7
129ffea94266
permissions
-rw-r--r--

Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.

michael@0 1 // Copyright (c) 2009, Google Inc.
michael@0 2 // All rights reserved.
michael@0 3 //
michael@0 4 // Redistribution and use in source and binary forms, with or without
michael@0 5 // modification, are permitted provided that the following conditions are
michael@0 6 // met:
michael@0 7 //
michael@0 8 // * Redistributions of source code must retain the above copyright
michael@0 9 // notice, this list of conditions and the following disclaimer.
michael@0 10 // * Redistributions in binary form must reproduce the above
michael@0 11 // copyright notice, this list of conditions and the following disclaimer
michael@0 12 // in the documentation and/or other materials provided with the
michael@0 13 // distribution.
michael@0 14 // * Neither the name of Google Inc. nor the names of its
michael@0 15 // contributors may be used to endorse or promote products derived from
michael@0 16 // this software without specific prior written permission.
michael@0 17 //
michael@0 18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
michael@0 19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
michael@0 20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
michael@0 21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
michael@0 22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
michael@0 23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
michael@0 24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
michael@0 25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
michael@0 26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
michael@0 27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
michael@0 28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
michael@0 29
michael@0 30 #ifndef GOOGLE_BREAKPAD_COMMON_MEMORY_H_
michael@0 31 #define GOOGLE_BREAKPAD_COMMON_MEMORY_H_
michael@0 32
michael@0 33 #include <stdint.h>
michael@0 34 #include <stdlib.h>
michael@0 35 #include <unistd.h>
michael@0 36 #include <sys/mman.h>
michael@0 37
michael@0 38 #ifdef __APPLE__
michael@0 39 #define sys_mmap mmap
michael@0 40 #define sys_mmap2 mmap
michael@0 41 #define sys_munmap munmap
michael@0 42 #define MAP_ANONYMOUS MAP_ANON
michael@0 43 #else
michael@0 44 #include "third_party/lss/linux_syscall_support.h"
michael@0 45 #endif
michael@0 46
michael@0 47 namespace google_breakpad {
michael@0 48
michael@0 49 // This is very simple allocator which fetches pages from the kernel directly.
michael@0 50 // Thus, it can be used even when the heap may be corrupted.
michael@0 51 //
michael@0 52 // There is no free operation. The pages are only freed when the object is
michael@0 53 // destroyed.
michael@0 54 class PageAllocator {
michael@0 55 public:
michael@0 56 PageAllocator()
michael@0 57 : page_size_(getpagesize()),
michael@0 58 last_(NULL),
michael@0 59 current_page_(NULL),
michael@0 60 page_offset_(0) {
michael@0 61 }
michael@0 62
michael@0 63 ~PageAllocator() {
michael@0 64 FreeAll();
michael@0 65 }
michael@0 66
michael@0 67 void *Alloc(unsigned bytes) {
michael@0 68 if (!bytes)
michael@0 69 return NULL;
michael@0 70
michael@0 71 if (current_page_ && page_size_ - page_offset_ >= bytes) {
michael@0 72 uint8_t *const ret = current_page_ + page_offset_;
michael@0 73 page_offset_ += bytes;
michael@0 74 if (page_offset_ == page_size_) {
michael@0 75 page_offset_ = 0;
michael@0 76 current_page_ = NULL;
michael@0 77 }
michael@0 78
michael@0 79 return ret;
michael@0 80 }
michael@0 81
michael@0 82 const unsigned pages =
michael@0 83 (bytes + sizeof(PageHeader) + page_size_ - 1) / page_size_;
michael@0 84 uint8_t *const ret = GetNPages(pages);
michael@0 85 if (!ret)
michael@0 86 return NULL;
michael@0 87
michael@0 88 page_offset_ = (page_size_ - (page_size_ * pages - (bytes + sizeof(PageHeader)))) % page_size_;
michael@0 89 current_page_ = page_offset_ ? ret + page_size_ * (pages - 1) : NULL;
michael@0 90
michael@0 91 return ret + sizeof(PageHeader);
michael@0 92 }
michael@0 93
michael@0 94 private:
michael@0 95 uint8_t *GetNPages(unsigned num_pages) {
michael@0 96 #ifdef __x86_64
michael@0 97 void *a = sys_mmap(NULL, page_size_ * num_pages, PROT_READ | PROT_WRITE,
michael@0 98 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
michael@0 99 #else
michael@0 100 void *a = sys_mmap2(NULL, page_size_ * num_pages, PROT_READ | PROT_WRITE,
michael@0 101 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
michael@0 102 #endif
michael@0 103 if (a == MAP_FAILED)
michael@0 104 return NULL;
michael@0 105
michael@0 106 struct PageHeader *header = reinterpret_cast<PageHeader*>(a);
michael@0 107 header->next = last_;
michael@0 108 header->num_pages = num_pages;
michael@0 109 last_ = header;
michael@0 110
michael@0 111 return reinterpret_cast<uint8_t*>(a);
michael@0 112 }
michael@0 113
michael@0 114 void FreeAll() {
michael@0 115 PageHeader *next;
michael@0 116
michael@0 117 for (PageHeader *cur = last_; cur; cur = next) {
michael@0 118 next = cur->next;
michael@0 119 sys_munmap(cur, cur->num_pages * page_size_);
michael@0 120 }
michael@0 121 }
michael@0 122
michael@0 123 struct PageHeader {
michael@0 124 PageHeader *next; // pointer to the start of the next set of pages.
michael@0 125 unsigned num_pages; // the number of pages in this set.
michael@0 126 };
michael@0 127
michael@0 128 const unsigned page_size_;
michael@0 129 PageHeader *last_;
michael@0 130 uint8_t *current_page_;
michael@0 131 unsigned page_offset_;
michael@0 132 };
michael@0 133
michael@0 134 // A wasteful vector is like a normal std::vector, except that it's very much
michael@0 135 // simplier and it allocates memory from a PageAllocator. It's wasteful
michael@0 136 // because, when resizing, it always allocates a whole new array since the
michael@0 137 // PageAllocator doesn't support realloc.
michael@0 138 template<class T>
michael@0 139 class wasteful_vector {
michael@0 140 public:
michael@0 141 wasteful_vector(PageAllocator *allocator, unsigned size_hint = 16)
michael@0 142 : allocator_(allocator),
michael@0 143 a_((T*) allocator->Alloc(sizeof(T) * size_hint)),
michael@0 144 allocated_(size_hint),
michael@0 145 used_(0) {
michael@0 146 }
michael@0 147
michael@0 148 T& back() {
michael@0 149 return a_[used_ - 1];
michael@0 150 }
michael@0 151
michael@0 152 const T& back() const {
michael@0 153 return a_[used_ - 1];
michael@0 154 }
michael@0 155
michael@0 156 bool empty() const {
michael@0 157 return used_ == 0;
michael@0 158 }
michael@0 159
michael@0 160 void push_back(const T& new_element) {
michael@0 161 if (used_ == allocated_)
michael@0 162 Realloc(allocated_ * 2);
michael@0 163 a_[used_++] = new_element;
michael@0 164 }
michael@0 165
michael@0 166 size_t size() const {
michael@0 167 return used_;
michael@0 168 }
michael@0 169
michael@0 170 void resize(unsigned sz, T c = T()) {
michael@0 171 // No need to test "sz >= 0", as "sz" is unsigned.
michael@0 172 if (sz <= used_) {
michael@0 173 used_ = sz;
michael@0 174 } else {
michael@0 175 unsigned a = allocated_;
michael@0 176 if (sz > a) {
michael@0 177 while (sz > a) {
michael@0 178 a *= 2;
michael@0 179 }
michael@0 180 Realloc(a);
michael@0 181 }
michael@0 182 while (sz > used_) {
michael@0 183 a_[used_++] = c;
michael@0 184 }
michael@0 185 }
michael@0 186 }
michael@0 187
michael@0 188 T& operator[](size_t index) {
michael@0 189 return a_[index];
michael@0 190 }
michael@0 191
michael@0 192 const T& operator[](size_t index) const {
michael@0 193 return a_[index];
michael@0 194 }
michael@0 195
michael@0 196 private:
michael@0 197 void Realloc(unsigned new_size) {
michael@0 198 T *new_array =
michael@0 199 reinterpret_cast<T*>(allocator_->Alloc(sizeof(T) * new_size));
michael@0 200 memcpy(new_array, a_, used_ * sizeof(T));
michael@0 201 a_ = new_array;
michael@0 202 allocated_ = new_size;
michael@0 203 }
michael@0 204
michael@0 205 PageAllocator *const allocator_;
michael@0 206 T *a_; // pointer to an array of |allocated_| elements.
michael@0 207 unsigned allocated_; // size of |a_|, in elements.
michael@0 208 unsigned used_; // number of used slots in |a_|.
michael@0 209 };
michael@0 210
michael@0 211 } // namespace google_breakpad
michael@0 212
michael@0 213 inline void* operator new(size_t nbytes,
michael@0 214 google_breakpad::PageAllocator& allocator) {
michael@0 215 return allocator.Alloc(nbytes);
michael@0 216 }
michael@0 217
michael@0 218 #endif // GOOGLE_BREAKPAD_COMMON_MEMORY_H_

mercurial