Sat, 03 Jan 2015 20:18:00 +0100
Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
michael@0 | 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: |
michael@0 | 3 | * This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 6 | |
michael@0 | 7 | #include "gc/Memory.h" |
michael@0 | 8 | |
michael@0 | 9 | #include "js/HeapAPI.h" |
michael@0 | 10 | #include "vm/Runtime.h" |
michael@0 | 11 | |
michael@0 | 12 | using namespace js; |
michael@0 | 13 | using namespace js::gc; |
michael@0 | 14 | |
michael@0 | 15 | static bool |
michael@0 | 16 | DecommitEnabled(JSRuntime *rt) |
michael@0 | 17 | { |
michael@0 | 18 | return rt->gcSystemPageSize == ArenaSize; |
michael@0 | 19 | } |
michael@0 | 20 | |
michael@0 | 21 | #if defined(XP_WIN) |
michael@0 | 22 | #include "jswin.h" |
michael@0 | 23 | #include <psapi.h> |
michael@0 | 24 | |
michael@0 | 25 | void |
michael@0 | 26 | gc::InitMemorySubsystem(JSRuntime *rt) |
michael@0 | 27 | { |
michael@0 | 28 | SYSTEM_INFO sysinfo; |
michael@0 | 29 | GetSystemInfo(&sysinfo); |
michael@0 | 30 | rt->gcSystemPageSize = sysinfo.dwPageSize; |
michael@0 | 31 | rt->gcSystemAllocGranularity = sysinfo.dwAllocationGranularity; |
michael@0 | 32 | } |
michael@0 | 33 | |
michael@0 | 34 | void * |
michael@0 | 35 | gc::MapAlignedPages(JSRuntime *rt, size_t size, size_t alignment) |
michael@0 | 36 | { |
michael@0 | 37 | JS_ASSERT(size >= alignment); |
michael@0 | 38 | JS_ASSERT(size % alignment == 0); |
michael@0 | 39 | JS_ASSERT(size % rt->gcSystemPageSize == 0); |
michael@0 | 40 | JS_ASSERT(alignment % rt->gcSystemAllocGranularity == 0); |
michael@0 | 41 | |
michael@0 | 42 | /* Special case: If we want allocation alignment, no further work is needed. */ |
michael@0 | 43 | if (alignment == rt->gcSystemAllocGranularity) { |
michael@0 | 44 | return VirtualAlloc(nullptr, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); |
michael@0 | 45 | } |
michael@0 | 46 | |
michael@0 | 47 | /* |
michael@0 | 48 | * Windows requires that there be a 1:1 mapping between VM allocation |
michael@0 | 49 | * and deallocation operations. Therefore, take care here to acquire the |
michael@0 | 50 | * final result via one mapping operation. This means unmapping any |
michael@0 | 51 | * preliminary result that is not correctly aligned. |
michael@0 | 52 | */ |
michael@0 | 53 | void *p = nullptr; |
michael@0 | 54 | while (!p) { |
michael@0 | 55 | /* |
michael@0 | 56 | * Over-allocate in order to map a memory region that is definitely |
michael@0 | 57 | * large enough, then deallocate and allocate again the correct size, |
michael@0 | 58 | * within the over-sized mapping. |
michael@0 | 59 | * |
michael@0 | 60 | * Since we're going to unmap the whole thing anyway, the first |
michael@0 | 61 | * mapping doesn't have to commit pages. |
michael@0 | 62 | */ |
michael@0 | 63 | size_t reserveSize = size + alignment - rt->gcSystemPageSize; |
michael@0 | 64 | p = VirtualAlloc(nullptr, reserveSize, MEM_RESERVE, PAGE_READWRITE); |
michael@0 | 65 | if (!p) |
michael@0 | 66 | return nullptr; |
michael@0 | 67 | void *chunkStart = (void *)AlignBytes(uintptr_t(p), alignment); |
michael@0 | 68 | UnmapPages(rt, p, reserveSize); |
michael@0 | 69 | p = VirtualAlloc(chunkStart, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); |
michael@0 | 70 | |
michael@0 | 71 | /* Failure here indicates a race with another thread, so try again. */ |
michael@0 | 72 | } |
michael@0 | 73 | |
michael@0 | 74 | JS_ASSERT(uintptr_t(p) % alignment == 0); |
michael@0 | 75 | return p; |
michael@0 | 76 | } |
michael@0 | 77 | |
michael@0 | 78 | void |
michael@0 | 79 | gc::UnmapPages(JSRuntime *rt, void *p, size_t size) |
michael@0 | 80 | { |
michael@0 | 81 | JS_ALWAYS_TRUE(VirtualFree(p, 0, MEM_RELEASE)); |
michael@0 | 82 | } |
michael@0 | 83 | |
michael@0 | 84 | bool |
michael@0 | 85 | gc::MarkPagesUnused(JSRuntime *rt, void *p, size_t size) |
michael@0 | 86 | { |
michael@0 | 87 | if (!DecommitEnabled(rt)) |
michael@0 | 88 | return true; |
michael@0 | 89 | |
michael@0 | 90 | JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0); |
michael@0 | 91 | LPVOID p2 = VirtualAlloc(p, size, MEM_RESET, PAGE_READWRITE); |
michael@0 | 92 | return p2 == p; |
michael@0 | 93 | } |
michael@0 | 94 | |
michael@0 | 95 | bool |
michael@0 | 96 | gc::MarkPagesInUse(JSRuntime *rt, void *p, size_t size) |
michael@0 | 97 | { |
michael@0 | 98 | JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0); |
michael@0 | 99 | return true; |
michael@0 | 100 | } |
michael@0 | 101 | |
michael@0 | 102 | size_t |
michael@0 | 103 | gc::GetPageFaultCount() |
michael@0 | 104 | { |
michael@0 | 105 | PROCESS_MEMORY_COUNTERS pmc; |
michael@0 | 106 | if (!GetProcessMemoryInfo(GetCurrentProcess(), &pmc, sizeof(pmc))) |
michael@0 | 107 | return 0; |
michael@0 | 108 | return pmc.PageFaultCount; |
michael@0 | 109 | } |
michael@0 | 110 | |
michael@0 | 111 | void * |
michael@0 | 112 | gc::AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment) |
michael@0 | 113 | { |
michael@0 | 114 | // TODO: Bug 988813 - Support memory mapped array buffer for Windows platform. |
michael@0 | 115 | return nullptr; |
michael@0 | 116 | } |
michael@0 | 117 | |
michael@0 | 118 | // Deallocate mapped memory for object. |
michael@0 | 119 | void |
michael@0 | 120 | gc::DeallocateMappedContent(void *p, size_t length) |
michael@0 | 121 | { |
michael@0 | 122 | // TODO: Bug 988813 - Support memory mapped array buffer for Windows platform. |
michael@0 | 123 | } |
michael@0 | 124 | |
michael@0 | 125 | #elif defined(SOLARIS) |
michael@0 | 126 | |
michael@0 | 127 | #include <sys/mman.h> |
michael@0 | 128 | #include <unistd.h> |
michael@0 | 129 | |
michael@0 | 130 | #ifndef MAP_NOSYNC |
michael@0 | 131 | # define MAP_NOSYNC 0 |
michael@0 | 132 | #endif |
michael@0 | 133 | |
michael@0 | 134 | void |
michael@0 | 135 | gc::InitMemorySubsystem(JSRuntime *rt) |
michael@0 | 136 | { |
michael@0 | 137 | rt->gcSystemPageSize = rt->gcSystemAllocGranularity = size_t(sysconf(_SC_PAGESIZE)); |
michael@0 | 138 | } |
michael@0 | 139 | |
michael@0 | 140 | void * |
michael@0 | 141 | gc::MapAlignedPages(JSRuntime *rt, size_t size, size_t alignment) |
michael@0 | 142 | { |
michael@0 | 143 | JS_ASSERT(size >= alignment); |
michael@0 | 144 | JS_ASSERT(size % alignment == 0); |
michael@0 | 145 | JS_ASSERT(size % rt->gcSystemPageSize == 0); |
michael@0 | 146 | JS_ASSERT(alignment % rt->gcSystemAllocGranularity == 0); |
michael@0 | 147 | |
michael@0 | 148 | int prot = PROT_READ | PROT_WRITE; |
michael@0 | 149 | int flags = MAP_PRIVATE | MAP_ANON | MAP_ALIGN | MAP_NOSYNC; |
michael@0 | 150 | |
michael@0 | 151 | void *p = mmap((caddr_t)alignment, size, prot, flags, -1, 0); |
michael@0 | 152 | if (p == MAP_FAILED) |
michael@0 | 153 | return nullptr; |
michael@0 | 154 | return p; |
michael@0 | 155 | } |
michael@0 | 156 | |
michael@0 | 157 | void |
michael@0 | 158 | gc::UnmapPages(JSRuntime *rt, void *p, size_t size) |
michael@0 | 159 | { |
michael@0 | 160 | JS_ALWAYS_TRUE(0 == munmap((caddr_t)p, size)); |
michael@0 | 161 | } |
michael@0 | 162 | |
michael@0 | 163 | bool |
michael@0 | 164 | gc::MarkPagesUnused(JSRuntime *rt, void *p, size_t size) |
michael@0 | 165 | { |
michael@0 | 166 | JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0); |
michael@0 | 167 | return true; |
michael@0 | 168 | } |
michael@0 | 169 | |
michael@0 | 170 | bool |
michael@0 | 171 | gc::MarkPagesInUse(JSRuntime *rt, void *p, size_t size) |
michael@0 | 172 | { |
michael@0 | 173 | JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0); |
michael@0 | 174 | return true; |
michael@0 | 175 | } |
michael@0 | 176 | |
michael@0 | 177 | size_t |
michael@0 | 178 | gc::GetPageFaultCount() |
michael@0 | 179 | { |
michael@0 | 180 | return 0; |
michael@0 | 181 | } |
michael@0 | 182 | |
michael@0 | 183 | void * |
michael@0 | 184 | gc::AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment) |
michael@0 | 185 | { |
michael@0 | 186 | // Not implemented. |
michael@0 | 187 | return nullptr; |
michael@0 | 188 | } |
michael@0 | 189 | |
michael@0 | 190 | // Deallocate mapped memory for object. |
michael@0 | 191 | void |
michael@0 | 192 | gc::DeallocateMappedContent(void *p, size_t length) |
michael@0 | 193 | { |
michael@0 | 194 | // Not implemented. |
michael@0 | 195 | } |
michael@0 | 196 | |
michael@0 | 197 | #elif defined(XP_UNIX) |
michael@0 | 198 | |
michael@0 | 199 | #include <algorithm> |
michael@0 | 200 | #include <sys/mman.h> |
michael@0 | 201 | #include <sys/resource.h> |
michael@0 | 202 | #include <sys/stat.h> |
michael@0 | 203 | #include <sys/types.h> |
michael@0 | 204 | #include <unistd.h> |
michael@0 | 205 | |
michael@0 | 206 | void |
michael@0 | 207 | gc::InitMemorySubsystem(JSRuntime *rt) |
michael@0 | 208 | { |
michael@0 | 209 | rt->gcSystemPageSize = rt->gcSystemAllocGranularity = size_t(sysconf(_SC_PAGESIZE)); |
michael@0 | 210 | } |
michael@0 | 211 | |
michael@0 | 212 | static inline void * |
michael@0 | 213 | MapMemory(size_t length, int prot, int flags, int fd, off_t offset) |
michael@0 | 214 | { |
michael@0 | 215 | #if defined(__ia64__) |
michael@0 | 216 | /* |
michael@0 | 217 | * The JS engine assumes that all allocated pointers have their high 17 bits clear, |
michael@0 | 218 | * which ia64's mmap doesn't support directly. However, we can emulate it by passing |
michael@0 | 219 | * mmap an "addr" parameter with those bits clear. The mmap will return that address, |
michael@0 | 220 | * or the nearest available memory above that address, providing a near-guarantee |
michael@0 | 221 | * that those bits are clear. If they are not, we return nullptr below to indicate |
michael@0 | 222 | * out-of-memory. |
michael@0 | 223 | * |
michael@0 | 224 | * The addr is chosen as 0x0000070000000000, which still allows about 120TB of virtual |
michael@0 | 225 | * address space. |
michael@0 | 226 | * |
michael@0 | 227 | * See Bug 589735 for more information. |
michael@0 | 228 | */ |
michael@0 | 229 | void *region = mmap((void*)0x0000070000000000, length, prot, flags, fd, offset); |
michael@0 | 230 | if (region == MAP_FAILED) |
michael@0 | 231 | return MAP_FAILED; |
michael@0 | 232 | /* |
michael@0 | 233 | * If the allocated memory doesn't have its upper 17 bits clear, consider it |
michael@0 | 234 | * as out of memory. |
michael@0 | 235 | */ |
michael@0 | 236 | if ((uintptr_t(region) + (length - 1)) & 0xffff800000000000) { |
michael@0 | 237 | JS_ALWAYS_TRUE(0 == munmap(region, length)); |
michael@0 | 238 | return MAP_FAILED; |
michael@0 | 239 | } |
michael@0 | 240 | return region; |
michael@0 | 241 | #else |
michael@0 | 242 | return mmap(nullptr, length, prot, flags, fd, offset); |
michael@0 | 243 | #endif |
michael@0 | 244 | } |
michael@0 | 245 | |
michael@0 | 246 | void * |
michael@0 | 247 | gc::MapAlignedPages(JSRuntime *rt, size_t size, size_t alignment) |
michael@0 | 248 | { |
michael@0 | 249 | JS_ASSERT(size >= alignment); |
michael@0 | 250 | JS_ASSERT(size % alignment == 0); |
michael@0 | 251 | JS_ASSERT(size % rt->gcSystemPageSize == 0); |
michael@0 | 252 | JS_ASSERT(alignment % rt->gcSystemAllocGranularity == 0); |
michael@0 | 253 | |
michael@0 | 254 | int prot = PROT_READ | PROT_WRITE; |
michael@0 | 255 | int flags = MAP_PRIVATE | MAP_ANON; |
michael@0 | 256 | |
michael@0 | 257 | /* Special case: If we want page alignment, no further work is needed. */ |
michael@0 | 258 | if (alignment == rt->gcSystemAllocGranularity) { |
michael@0 | 259 | void *region = MapMemory(size, prot, flags, -1, 0); |
michael@0 | 260 | if (region == MAP_FAILED) |
michael@0 | 261 | return nullptr; |
michael@0 | 262 | return region; |
michael@0 | 263 | } |
michael@0 | 264 | |
michael@0 | 265 | /* Overallocate and unmap the region's edges. */ |
michael@0 | 266 | size_t reqSize = Min(size + 2 * alignment, 2 * size); |
michael@0 | 267 | void *region = MapMemory(reqSize, prot, flags, -1, 0); |
michael@0 | 268 | if (region == MAP_FAILED) |
michael@0 | 269 | return nullptr; |
michael@0 | 270 | |
michael@0 | 271 | uintptr_t regionEnd = uintptr_t(region) + reqSize; |
michael@0 | 272 | uintptr_t offset = uintptr_t(region) % alignment; |
michael@0 | 273 | JS_ASSERT(offset < reqSize - size); |
michael@0 | 274 | |
michael@0 | 275 | void *front = (void *)AlignBytes(uintptr_t(region), alignment); |
michael@0 | 276 | void *end = (void *)(uintptr_t(front) + size); |
michael@0 | 277 | if (front != region) |
michael@0 | 278 | JS_ALWAYS_TRUE(0 == munmap(region, alignment - offset)); |
michael@0 | 279 | if (uintptr_t(end) != regionEnd) |
michael@0 | 280 | JS_ALWAYS_TRUE(0 == munmap(end, regionEnd - uintptr_t(end))); |
michael@0 | 281 | |
michael@0 | 282 | JS_ASSERT(uintptr_t(front) % alignment == 0); |
michael@0 | 283 | return front; |
michael@0 | 284 | } |
michael@0 | 285 | |
michael@0 | 286 | void |
michael@0 | 287 | gc::UnmapPages(JSRuntime *rt, void *p, size_t size) |
michael@0 | 288 | { |
michael@0 | 289 | JS_ALWAYS_TRUE(0 == munmap(p, size)); |
michael@0 | 290 | } |
michael@0 | 291 | |
michael@0 | 292 | bool |
michael@0 | 293 | gc::MarkPagesUnused(JSRuntime *rt, void *p, size_t size) |
michael@0 | 294 | { |
michael@0 | 295 | if (!DecommitEnabled(rt)) |
michael@0 | 296 | return false; |
michael@0 | 297 | |
michael@0 | 298 | JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0); |
michael@0 | 299 | int result = madvise(p, size, MADV_DONTNEED); |
michael@0 | 300 | return result != -1; |
michael@0 | 301 | } |
michael@0 | 302 | |
michael@0 | 303 | bool |
michael@0 | 304 | gc::MarkPagesInUse(JSRuntime *rt, void *p, size_t size) |
michael@0 | 305 | { |
michael@0 | 306 | JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0); |
michael@0 | 307 | return true; |
michael@0 | 308 | } |
michael@0 | 309 | |
michael@0 | 310 | size_t |
michael@0 | 311 | gc::GetPageFaultCount() |
michael@0 | 312 | { |
michael@0 | 313 | struct rusage usage; |
michael@0 | 314 | int err = getrusage(RUSAGE_SELF, &usage); |
michael@0 | 315 | if (err) |
michael@0 | 316 | return 0; |
michael@0 | 317 | return usage.ru_majflt; |
michael@0 | 318 | } |
michael@0 | 319 | |
michael@0 | 320 | void * |
michael@0 | 321 | gc::AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment) |
michael@0 | 322 | { |
michael@0 | 323 | #define NEED_PAGE_ALIGNED 0 |
michael@0 | 324 | size_t pa_start; // Page aligned starting |
michael@0 | 325 | size_t pa_end; // Page aligned ending |
michael@0 | 326 | size_t pa_size; // Total page aligned size |
michael@0 | 327 | size_t page_size = sysconf(_SC_PAGESIZE); // Page size |
michael@0 | 328 | struct stat st; |
michael@0 | 329 | uint8_t *buf; |
michael@0 | 330 | |
michael@0 | 331 | // Make sure file exists and do sanity check for offset and size. |
michael@0 | 332 | if (fstat(fd, &st) < 0 || offset >= (size_t) st.st_size || |
michael@0 | 333 | length == 0 || length > (size_t) st.st_size - offset) |
michael@0 | 334 | return nullptr; |
michael@0 | 335 | |
michael@0 | 336 | // Check for minimal alignment requirement. |
michael@0 | 337 | #if NEED_PAGE_ALIGNED |
michael@0 | 338 | alignment = std::max(alignment, page_size); |
michael@0 | 339 | #endif |
michael@0 | 340 | if (offset & (alignment - 1)) |
michael@0 | 341 | return nullptr; |
michael@0 | 342 | |
michael@0 | 343 | // Page aligned starting of the offset. |
michael@0 | 344 | pa_start = offset & ~(page_size - 1); |
michael@0 | 345 | // Calculate page aligned ending by adding one page to the page aligned |
michael@0 | 346 | // starting of data end position(offset + length - 1). |
michael@0 | 347 | pa_end = ((offset + length - 1) & ~(page_size - 1)) + page_size; |
michael@0 | 348 | pa_size = pa_end - pa_start; |
michael@0 | 349 | |
michael@0 | 350 | // Ask for a continuous memory location. |
michael@0 | 351 | buf = (uint8_t *) MapMemory(pa_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); |
michael@0 | 352 | if (buf == MAP_FAILED) |
michael@0 | 353 | return nullptr; |
michael@0 | 354 | |
michael@0 | 355 | buf = (uint8_t *) mmap(buf, pa_size, PROT_READ | PROT_WRITE, |
michael@0 | 356 | MAP_PRIVATE | MAP_FIXED, fd, pa_start); |
michael@0 | 357 | if (buf == MAP_FAILED) |
michael@0 | 358 | return nullptr; |
michael@0 | 359 | |
michael@0 | 360 | // Reset the data before target file, which we don't need to see. |
michael@0 | 361 | memset(buf, 0, offset - pa_start); |
michael@0 | 362 | |
michael@0 | 363 | // Reset the data after target file, which we don't need to see. |
michael@0 | 364 | memset(buf + (offset - pa_start) + length, 0, pa_end - (offset + length)); |
michael@0 | 365 | |
michael@0 | 366 | return buf + (offset - pa_start); |
michael@0 | 367 | } |
michael@0 | 368 | |
michael@0 | 369 | void |
michael@0 | 370 | gc::DeallocateMappedContent(void *p, size_t length) |
michael@0 | 371 | { |
michael@0 | 372 | void *pa_start; // Page aligned starting |
michael@0 | 373 | size_t page_size = sysconf(_SC_PAGESIZE); // Page size |
michael@0 | 374 | size_t total_size; // Total allocated size |
michael@0 | 375 | |
michael@0 | 376 | pa_start = (void *)(uintptr_t(p) & ~(page_size - 1)); |
michael@0 | 377 | total_size = ((uintptr_t(p) + length) & ~(page_size - 1)) + page_size - uintptr_t(pa_start); |
michael@0 | 378 | munmap(pa_start, total_size); |
michael@0 | 379 | } |
michael@0 | 380 | |
michael@0 | 381 | #else |
michael@0 | 382 | #error "Memory mapping functions are not defined for your OS." |
michael@0 | 383 | #endif |