Sat, 03 Jan 2015 20:18:00 +0100
Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.
1 // Copyright (c) 2009, Google Inc.
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following disclaimer
12 // in the documentation and/or other materials provided with the
13 // distribution.
14 // * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived from
16 // this software without specific prior written permission.
17 //
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #ifndef GOOGLE_BREAKPAD_COMMON_MEMORY_H_
31 #define GOOGLE_BREAKPAD_COMMON_MEMORY_H_
33 #include <stdint.h>
34 #include <stdlib.h>
35 #include <unistd.h>
36 #include <sys/mman.h>
38 #ifdef __APPLE__
39 #define sys_mmap mmap
40 #define sys_mmap2 mmap
41 #define sys_munmap munmap
42 #define MAP_ANONYMOUS MAP_ANON
43 #else
44 #include "third_party/lss/linux_syscall_support.h"
45 #endif
47 namespace google_breakpad {
49 // This is very simple allocator which fetches pages from the kernel directly.
50 // Thus, it can be used even when the heap may be corrupted.
51 //
52 // There is no free operation. The pages are only freed when the object is
53 // destroyed.
54 class PageAllocator {
55 public:
56 PageAllocator()
57 : page_size_(getpagesize()),
58 last_(NULL),
59 current_page_(NULL),
60 page_offset_(0) {
61 }
63 ~PageAllocator() {
64 FreeAll();
65 }
67 void *Alloc(unsigned bytes) {
68 if (!bytes)
69 return NULL;
71 if (current_page_ && page_size_ - page_offset_ >= bytes) {
72 uint8_t *const ret = current_page_ + page_offset_;
73 page_offset_ += bytes;
74 if (page_offset_ == page_size_) {
75 page_offset_ = 0;
76 current_page_ = NULL;
77 }
79 return ret;
80 }
82 const unsigned pages =
83 (bytes + sizeof(PageHeader) + page_size_ - 1) / page_size_;
84 uint8_t *const ret = GetNPages(pages);
85 if (!ret)
86 return NULL;
88 page_offset_ = (page_size_ - (page_size_ * pages - (bytes + sizeof(PageHeader)))) % page_size_;
89 current_page_ = page_offset_ ? ret + page_size_ * (pages - 1) : NULL;
91 return ret + sizeof(PageHeader);
92 }
94 private:
95 uint8_t *GetNPages(unsigned num_pages) {
96 #ifdef __x86_64
97 void *a = sys_mmap(NULL, page_size_ * num_pages, PROT_READ | PROT_WRITE,
98 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
99 #else
100 void *a = sys_mmap2(NULL, page_size_ * num_pages, PROT_READ | PROT_WRITE,
101 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
102 #endif
103 if (a == MAP_FAILED)
104 return NULL;
106 struct PageHeader *header = reinterpret_cast<PageHeader*>(a);
107 header->next = last_;
108 header->num_pages = num_pages;
109 last_ = header;
111 return reinterpret_cast<uint8_t*>(a);
112 }
114 void FreeAll() {
115 PageHeader *next;
117 for (PageHeader *cur = last_; cur; cur = next) {
118 next = cur->next;
119 sys_munmap(cur, cur->num_pages * page_size_);
120 }
121 }
123 struct PageHeader {
124 PageHeader *next; // pointer to the start of the next set of pages.
125 unsigned num_pages; // the number of pages in this set.
126 };
128 const unsigned page_size_;
129 PageHeader *last_;
130 uint8_t *current_page_;
131 unsigned page_offset_;
132 };
134 // A wasteful vector is like a normal std::vector, except that it's very much
135 // simplier and it allocates memory from a PageAllocator. It's wasteful
136 // because, when resizing, it always allocates a whole new array since the
137 // PageAllocator doesn't support realloc.
138 template<class T>
139 class wasteful_vector {
140 public:
141 wasteful_vector(PageAllocator *allocator, unsigned size_hint = 16)
142 : allocator_(allocator),
143 a_((T*) allocator->Alloc(sizeof(T) * size_hint)),
144 allocated_(size_hint),
145 used_(0) {
146 }
148 T& back() {
149 return a_[used_ - 1];
150 }
152 const T& back() const {
153 return a_[used_ - 1];
154 }
156 bool empty() const {
157 return used_ == 0;
158 }
160 void push_back(const T& new_element) {
161 if (used_ == allocated_)
162 Realloc(allocated_ * 2);
163 a_[used_++] = new_element;
164 }
166 size_t size() const {
167 return used_;
168 }
170 void resize(unsigned sz, T c = T()) {
171 // No need to test "sz >= 0", as "sz" is unsigned.
172 if (sz <= used_) {
173 used_ = sz;
174 } else {
175 unsigned a = allocated_;
176 if (sz > a) {
177 while (sz > a) {
178 a *= 2;
179 }
180 Realloc(a);
181 }
182 while (sz > used_) {
183 a_[used_++] = c;
184 }
185 }
186 }
188 T& operator[](size_t index) {
189 return a_[index];
190 }
192 const T& operator[](size_t index) const {
193 return a_[index];
194 }
196 private:
197 void Realloc(unsigned new_size) {
198 T *new_array =
199 reinterpret_cast<T*>(allocator_->Alloc(sizeof(T) * new_size));
200 memcpy(new_array, a_, used_ * sizeof(T));
201 a_ = new_array;
202 allocated_ = new_size;
203 }
205 PageAllocator *const allocator_;
206 T *a_; // pointer to an array of |allocated_| elements.
207 unsigned allocated_; // size of |a_|, in elements.
208 unsigned used_; // number of used slots in |a_|.
209 };
211 } // namespace google_breakpad
213 inline void* operator new(size_t nbytes,
214 google_breakpad::PageAllocator& allocator) {
215 return allocator.Alloc(nbytes);
216 }
218 #endif // GOOGLE_BREAKPAD_COMMON_MEMORY_H_