Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
michael@0 | 2 | /* vim: set ts=8 sts=2 et sw=2 tw=80: */ |
michael@0 | 3 | /* This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 6 | |
michael@0 | 7 | #include "mozilla/AvailableMemoryTracker.h" |
michael@0 | 8 | |
michael@0 | 9 | #if defined(XP_WIN) |
michael@0 | 10 | #include "prinrval.h" |
michael@0 | 11 | #include "prenv.h" |
michael@0 | 12 | #include "nsIMemoryReporter.h" |
michael@0 | 13 | #include "nsMemoryPressure.h" |
michael@0 | 14 | #endif |
michael@0 | 15 | |
michael@0 | 16 | #include "nsIObserver.h" |
michael@0 | 17 | #include "nsIObserverService.h" |
michael@0 | 18 | #include "nsIRunnable.h" |
michael@0 | 19 | #include "nsISupports.h" |
michael@0 | 20 | #include "nsThreadUtils.h" |
michael@0 | 21 | |
michael@0 | 22 | #include "mozilla/Preferences.h" |
michael@0 | 23 | #include "mozilla/Services.h" |
michael@0 | 24 | |
michael@0 | 25 | #if defined(XP_WIN) |
michael@0 | 26 | # include "nsWindowsDllInterceptor.h" |
michael@0 | 27 | # include <windows.h> |
michael@0 | 28 | #endif |
michael@0 | 29 | |
michael@0 | 30 | #if defined(MOZ_MEMORY) |
michael@0 | 31 | # include "mozmemory.h" |
michael@0 | 32 | #endif // MOZ_MEMORY |
michael@0 | 33 | |
michael@0 | 34 | using namespace mozilla; |
michael@0 | 35 | |
michael@0 | 36 | namespace { |
michael@0 | 37 | |
michael@0 | 38 | #if defined(XP_WIN) |
michael@0 | 39 | |
michael@0 | 40 | // We don't want our diagnostic functions to call malloc, because that could |
michael@0 | 41 | // call VirtualAlloc, and we'd end up back in here! So here are a few simple |
michael@0 | 42 | // debugging macros (modeled on jemalloc's), which hopefully won't allocate. |
michael@0 | 43 | |
michael@0 | 44 | // #define LOGGING_ENABLED |
michael@0 | 45 | |
michael@0 | 46 | #ifdef LOGGING_ENABLED |
michael@0 | 47 | |
michael@0 | 48 | #define LOG(msg) \ |
michael@0 | 49 | do { \ |
michael@0 | 50 | safe_write(msg); \ |
michael@0 | 51 | safe_write("\n"); \ |
michael@0 | 52 | } while(0) |
michael@0 | 53 | |
michael@0 | 54 | #define LOG2(m1, m2) \ |
michael@0 | 55 | do { \ |
michael@0 | 56 | safe_write(m1); \ |
michael@0 | 57 | safe_write(m2); \ |
michael@0 | 58 | safe_write("\n"); \ |
michael@0 | 59 | } while(0) |
michael@0 | 60 | |
michael@0 | 61 | #define LOG3(m1, m2, m3) \ |
michael@0 | 62 | do { \ |
michael@0 | 63 | safe_write(m1); \ |
michael@0 | 64 | safe_write(m2); \ |
michael@0 | 65 | safe_write(m3); \ |
michael@0 | 66 | safe_write("\n"); \ |
michael@0 | 67 | } while(0) |
michael@0 | 68 | |
michael@0 | 69 | #define LOG4(m1, m2, m3, m4) \ |
michael@0 | 70 | do { \ |
michael@0 | 71 | safe_write(m1); \ |
michael@0 | 72 | safe_write(m2); \ |
michael@0 | 73 | safe_write(m3); \ |
michael@0 | 74 | safe_write(m4); \ |
michael@0 | 75 | safe_write("\n"); \ |
michael@0 | 76 | } while(0) |
michael@0 | 77 | |
michael@0 | 78 | #else |
michael@0 | 79 | |
michael@0 | 80 | #define LOG(msg) |
michael@0 | 81 | #define LOG2(m1, m2) |
michael@0 | 82 | #define LOG3(m1, m2, m3) |
michael@0 | 83 | #define LOG4(m1, m2, m3, m4) |
michael@0 | 84 | |
michael@0 | 85 | #endif |
michael@0 | 86 | |
michael@0 | 87 | void safe_write(const char *a) |
michael@0 | 88 | { |
michael@0 | 89 | // Well, puts isn't exactly "safe", but at least it doesn't call malloc... |
michael@0 | 90 | fputs(a, stdout); |
michael@0 | 91 | } |
michael@0 | 92 | |
michael@0 | 93 | void safe_write(uint64_t x) |
michael@0 | 94 | { |
michael@0 | 95 | // 2^64 is 20 decimal digits. |
michael@0 | 96 | const unsigned int max_len = 21; |
michael@0 | 97 | char buf[max_len]; |
michael@0 | 98 | buf[max_len - 1] = '\0'; |
michael@0 | 99 | |
michael@0 | 100 | uint32_t i; |
michael@0 | 101 | for (i = max_len - 2; i < max_len && x > 0; i--) |
michael@0 | 102 | { |
michael@0 | 103 | buf[i] = "0123456789"[x % 10]; |
michael@0 | 104 | x /= 10; |
michael@0 | 105 | } |
michael@0 | 106 | |
michael@0 | 107 | safe_write(&buf[i + 1]); |
michael@0 | 108 | } |
michael@0 | 109 | |
michael@0 | 110 | #ifdef DEBUG |
michael@0 | 111 | #define DEBUG_WARN_IF_FALSE(cond, msg) \ |
michael@0 | 112 | do { \ |
michael@0 | 113 | if (!(cond)) { \ |
michael@0 | 114 | safe_write(__FILE__); \ |
michael@0 | 115 | safe_write(":"); \ |
michael@0 | 116 | safe_write(__LINE__); \ |
michael@0 | 117 | safe_write(" "); \ |
michael@0 | 118 | safe_write(msg); \ |
michael@0 | 119 | safe_write("\n"); \ |
michael@0 | 120 | } \ |
michael@0 | 121 | } while(0) |
michael@0 | 122 | #else |
michael@0 | 123 | #define DEBUG_WARN_IF_FALSE(cond, msg) |
michael@0 | 124 | #endif |
michael@0 | 125 | |
michael@0 | 126 | uint32_t sLowVirtualMemoryThreshold = 0; |
michael@0 | 127 | uint32_t sLowCommitSpaceThreshold = 0; |
michael@0 | 128 | uint32_t sLowPhysicalMemoryThreshold = 0; |
michael@0 | 129 | uint32_t sLowMemoryNotificationIntervalMS = 0; |
michael@0 | 130 | |
michael@0 | 131 | Atomic<uint32_t> sNumLowVirtualMemEvents; |
michael@0 | 132 | Atomic<uint32_t> sNumLowCommitSpaceEvents; |
michael@0 | 133 | Atomic<uint32_t> sNumLowPhysicalMemEvents; |
michael@0 | 134 | |
michael@0 | 135 | WindowsDllInterceptor sKernel32Intercept; |
michael@0 | 136 | WindowsDllInterceptor sGdi32Intercept; |
michael@0 | 137 | |
michael@0 | 138 | // Has Init() been called? |
michael@0 | 139 | bool sInitialized = false; |
michael@0 | 140 | |
michael@0 | 141 | // Has Activate() been called? The hooks don't do anything until this happens. |
michael@0 | 142 | bool sHooksActive = false; |
michael@0 | 143 | |
michael@0 | 144 | // Alas, we'd like to use mozilla::TimeStamp, but we can't, because it acquires |
michael@0 | 145 | // a lock! |
michael@0 | 146 | volatile bool sHasScheduledOneLowMemoryNotification = false; |
michael@0 | 147 | volatile PRIntervalTime sLastLowMemoryNotificationTime; |
michael@0 | 148 | |
michael@0 | 149 | // These are function pointers to the functions we wrap in Init(). |
michael@0 | 150 | |
michael@0 | 151 | void* (WINAPI *sVirtualAllocOrig) |
michael@0 | 152 | (LPVOID aAddress, SIZE_T aSize, DWORD aAllocationType, DWORD aProtect); |
michael@0 | 153 | |
michael@0 | 154 | void* (WINAPI *sMapViewOfFileOrig) |
michael@0 | 155 | (HANDLE aFileMappingObject, DWORD aDesiredAccess, |
michael@0 | 156 | DWORD aFileOffsetHigh, DWORD aFileOffsetLow, |
michael@0 | 157 | SIZE_T aNumBytesToMap); |
michael@0 | 158 | |
michael@0 | 159 | HBITMAP (WINAPI *sCreateDIBSectionOrig) |
michael@0 | 160 | (HDC aDC, const BITMAPINFO *aBitmapInfo, |
michael@0 | 161 | UINT aUsage, VOID **aBits, |
michael@0 | 162 | HANDLE aSection, DWORD aOffset); |
michael@0 | 163 | |
michael@0 | 164 | /** |
michael@0 | 165 | * Fire a memory pressure event if it's been long enough since the last one we |
michael@0 | 166 | * fired. |
michael@0 | 167 | */ |
michael@0 | 168 | bool MaybeScheduleMemoryPressureEvent() |
michael@0 | 169 | { |
michael@0 | 170 | // If this interval rolls over, we may fire an extra memory pressure |
michael@0 | 171 | // event, but that's not a big deal. |
michael@0 | 172 | PRIntervalTime interval = PR_IntervalNow() - sLastLowMemoryNotificationTime; |
michael@0 | 173 | if (sHasScheduledOneLowMemoryNotification && |
michael@0 | 174 | PR_IntervalToMilliseconds(interval) < sLowMemoryNotificationIntervalMS) { |
michael@0 | 175 | |
michael@0 | 176 | LOG("Not scheduling low physical memory notification, " |
michael@0 | 177 | "because not enough time has elapsed since last one."); |
michael@0 | 178 | return false; |
michael@0 | 179 | } |
michael@0 | 180 | |
michael@0 | 181 | // There's a bit of a race condition here, since an interval may be a |
michael@0 | 182 | // 64-bit number, and 64-bit writes aren't atomic on x86-32. But let's |
michael@0 | 183 | // not worry about it -- the races only happen when we're already |
michael@0 | 184 | // experiencing memory pressure and firing notifications, so the worst |
michael@0 | 185 | // thing that can happen is that we fire two notifications when we |
michael@0 | 186 | // should have fired only one. |
michael@0 | 187 | sHasScheduledOneLowMemoryNotification = true; |
michael@0 | 188 | sLastLowMemoryNotificationTime = PR_IntervalNow(); |
michael@0 | 189 | |
michael@0 | 190 | LOG("Scheduling memory pressure notification."); |
michael@0 | 191 | NS_DispatchEventualMemoryPressure(MemPressure_New); |
michael@0 | 192 | return true; |
michael@0 | 193 | } |
michael@0 | 194 | |
michael@0 | 195 | void CheckMemAvailable() |
michael@0 | 196 | { |
michael@0 | 197 | if (!sHooksActive) { |
michael@0 | 198 | return; |
michael@0 | 199 | } |
michael@0 | 200 | |
michael@0 | 201 | MEMORYSTATUSEX stat; |
michael@0 | 202 | stat.dwLength = sizeof(stat); |
michael@0 | 203 | bool success = GlobalMemoryStatusEx(&stat); |
michael@0 | 204 | |
michael@0 | 205 | DEBUG_WARN_IF_FALSE(success, "GlobalMemoryStatusEx failed."); |
michael@0 | 206 | |
michael@0 | 207 | if (success) |
michael@0 | 208 | { |
michael@0 | 209 | // sLowVirtualMemoryThreshold is in MB, but ullAvailVirtual is in bytes. |
michael@0 | 210 | if (stat.ullAvailVirtual < sLowVirtualMemoryThreshold * 1024 * 1024) { |
michael@0 | 211 | // If we're running low on virtual memory, unconditionally schedule the |
michael@0 | 212 | // notification. We'll probably crash if we run out of virtual memory, |
michael@0 | 213 | // so don't worry about firing this notification too often. |
michael@0 | 214 | LOG("Detected low virtual memory."); |
michael@0 | 215 | ++sNumLowVirtualMemEvents; |
michael@0 | 216 | NS_DispatchEventualMemoryPressure(MemPressure_New); |
michael@0 | 217 | } |
michael@0 | 218 | else if (stat.ullAvailPageFile < sLowCommitSpaceThreshold * 1024 * 1024) { |
michael@0 | 219 | LOG("Detected low available page file space."); |
michael@0 | 220 | if (MaybeScheduleMemoryPressureEvent()) { |
michael@0 | 221 | ++sNumLowCommitSpaceEvents; |
michael@0 | 222 | } |
michael@0 | 223 | } |
michael@0 | 224 | else if (stat.ullAvailPhys < sLowPhysicalMemoryThreshold * 1024 * 1024) { |
michael@0 | 225 | LOG("Detected low physical memory."); |
michael@0 | 226 | if (MaybeScheduleMemoryPressureEvent()) { |
michael@0 | 227 | ++sNumLowPhysicalMemEvents; |
michael@0 | 228 | } |
michael@0 | 229 | } |
michael@0 | 230 | } |
michael@0 | 231 | } |
michael@0 | 232 | |
michael@0 | 233 | LPVOID WINAPI |
michael@0 | 234 | VirtualAllocHook(LPVOID aAddress, SIZE_T aSize, |
michael@0 | 235 | DWORD aAllocationType, |
michael@0 | 236 | DWORD aProtect) |
michael@0 | 237 | { |
michael@0 | 238 | // It's tempting to see whether we have enough free virtual address space for |
michael@0 | 239 | // this allocation and, if we don't, synchronously fire a low-memory |
michael@0 | 240 | // notification to free some before we allocate. |
michael@0 | 241 | // |
michael@0 | 242 | // Unfortunately that doesn't work, principally because code doesn't expect a |
michael@0 | 243 | // call to malloc could trigger a GC (or call into the other routines which |
michael@0 | 244 | // are triggered by a low-memory notification). |
michael@0 | 245 | // |
michael@0 | 246 | // I think the best we can do here is try to allocate the memory and check |
michael@0 | 247 | // afterwards how much free virtual address space we have. If we're running |
michael@0 | 248 | // low, we schedule a low-memory notification to run as soon as possible. |
michael@0 | 249 | |
michael@0 | 250 | LPVOID result = sVirtualAllocOrig(aAddress, aSize, aAllocationType, aProtect); |
michael@0 | 251 | |
michael@0 | 252 | // Don't call CheckMemAvailable for MEM_RESERVE if we're not tracking low |
michael@0 | 253 | // virtual memory. Similarly, don't call CheckMemAvailable for MEM_COMMIT if |
michael@0 | 254 | // we're not tracking low physical memory. |
michael@0 | 255 | if ((sLowVirtualMemoryThreshold != 0 && aAllocationType & MEM_RESERVE) || |
michael@0 | 256 | (sLowPhysicalMemoryThreshold != 0 && aAllocationType & MEM_COMMIT)) { |
michael@0 | 257 | LOG3("VirtualAllocHook(size=", aSize, ")"); |
michael@0 | 258 | CheckMemAvailable(); |
michael@0 | 259 | } |
michael@0 | 260 | |
michael@0 | 261 | return result; |
michael@0 | 262 | } |
michael@0 | 263 | |
michael@0 | 264 | LPVOID WINAPI |
michael@0 | 265 | MapViewOfFileHook(HANDLE aFileMappingObject, |
michael@0 | 266 | DWORD aDesiredAccess, |
michael@0 | 267 | DWORD aFileOffsetHigh, |
michael@0 | 268 | DWORD aFileOffsetLow, |
michael@0 | 269 | SIZE_T aNumBytesToMap) |
michael@0 | 270 | { |
michael@0 | 271 | LPVOID result = sMapViewOfFileOrig(aFileMappingObject, aDesiredAccess, |
michael@0 | 272 | aFileOffsetHigh, aFileOffsetLow, |
michael@0 | 273 | aNumBytesToMap); |
michael@0 | 274 | LOG("MapViewOfFileHook"); |
michael@0 | 275 | CheckMemAvailable(); |
michael@0 | 276 | return result; |
michael@0 | 277 | } |
michael@0 | 278 | |
michael@0 | 279 | HBITMAP WINAPI |
michael@0 | 280 | CreateDIBSectionHook(HDC aDC, |
michael@0 | 281 | const BITMAPINFO *aBitmapInfo, |
michael@0 | 282 | UINT aUsage, |
michael@0 | 283 | VOID **aBits, |
michael@0 | 284 | HANDLE aSection, |
michael@0 | 285 | DWORD aOffset) |
michael@0 | 286 | { |
michael@0 | 287 | // There are a lot of calls to CreateDIBSection, so we make some effort not |
michael@0 | 288 | // to CheckMemAvailable() for calls to CreateDIBSection which allocate only |
michael@0 | 289 | // a small amount of memory. |
michael@0 | 290 | |
michael@0 | 291 | // If aSection is non-null, CreateDIBSection won't allocate any new memory. |
michael@0 | 292 | bool doCheck = false; |
michael@0 | 293 | if (sHooksActive && !aSection && aBitmapInfo) { |
michael@0 | 294 | uint16_t bitCount = aBitmapInfo->bmiHeader.biBitCount; |
michael@0 | 295 | if (bitCount == 0) { |
michael@0 | 296 | // MSDN says bitCount == 0 means that it figures out how many bits each |
michael@0 | 297 | // pixel gets by examining the corresponding JPEG or PNG data. We'll just |
michael@0 | 298 | // assume the worst. |
michael@0 | 299 | bitCount = 32; |
michael@0 | 300 | } |
michael@0 | 301 | |
michael@0 | 302 | // |size| contains the expected allocation size in *bits*. Height may be |
michael@0 | 303 | // negative (indicating the direction the DIB is drawn in), so we take the |
michael@0 | 304 | // absolute value. |
michael@0 | 305 | int64_t size = bitCount * aBitmapInfo->bmiHeader.biWidth * |
michael@0 | 306 | aBitmapInfo->bmiHeader.biHeight; |
michael@0 | 307 | if (size < 0) |
michael@0 | 308 | size *= -1; |
michael@0 | 309 | |
michael@0 | 310 | // If we're allocating more than 1MB, check how much memory is left after |
michael@0 | 311 | // the allocation. |
michael@0 | 312 | if (size > 1024 * 1024 * 8) { |
michael@0 | 313 | LOG3("CreateDIBSectionHook: Large allocation (size=", size, ")"); |
michael@0 | 314 | doCheck = true; |
michael@0 | 315 | } |
michael@0 | 316 | } |
michael@0 | 317 | |
michael@0 | 318 | HBITMAP result = sCreateDIBSectionOrig(aDC, aBitmapInfo, aUsage, aBits, |
michael@0 | 319 | aSection, aOffset); |
michael@0 | 320 | |
michael@0 | 321 | if (doCheck) { |
michael@0 | 322 | CheckMemAvailable(); |
michael@0 | 323 | } |
michael@0 | 324 | |
michael@0 | 325 | return result; |
michael@0 | 326 | } |
michael@0 | 327 | |
michael@0 | 328 | static int64_t |
michael@0 | 329 | LowMemoryEventsVirtualDistinguishedAmount() |
michael@0 | 330 | { |
michael@0 | 331 | return sNumLowVirtualMemEvents; |
michael@0 | 332 | } |
michael@0 | 333 | |
michael@0 | 334 | static int64_t |
michael@0 | 335 | LowMemoryEventsPhysicalDistinguishedAmount() |
michael@0 | 336 | { |
michael@0 | 337 | return sNumLowPhysicalMemEvents; |
michael@0 | 338 | } |
michael@0 | 339 | |
michael@0 | 340 | class LowEventsReporter MOZ_FINAL : public nsIMemoryReporter |
michael@0 | 341 | { |
michael@0 | 342 | public: |
michael@0 | 343 | NS_DECL_ISUPPORTS |
michael@0 | 344 | |
michael@0 | 345 | NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport, |
michael@0 | 346 | nsISupports* aData) |
michael@0 | 347 | { |
michael@0 | 348 | nsresult rv; |
michael@0 | 349 | |
michael@0 | 350 | // We only do virtual-memory tracking on 32-bit builds. |
michael@0 | 351 | if (sizeof(void*) == 4) { |
michael@0 | 352 | rv = MOZ_COLLECT_REPORT( |
michael@0 | 353 | "low-memory-events/virtual", KIND_OTHER, UNITS_COUNT_CUMULATIVE, |
michael@0 | 354 | LowMemoryEventsVirtualDistinguishedAmount(), |
michael@0 | 355 | "Number of low-virtual-memory events fired since startup. We fire such an " |
michael@0 | 356 | "event if we notice there is less than memory.low_virtual_mem_threshold_mb of " |
michael@0 | 357 | "virtual address space available (if zero, this behavior is disabled). The " |
michael@0 | 358 | "process will probably crash if it runs out of virtual address space, so " |
michael@0 | 359 | "this event is dire."); |
michael@0 | 360 | NS_ENSURE_SUCCESS(rv, rv); |
michael@0 | 361 | } |
michael@0 | 362 | |
michael@0 | 363 | rv = MOZ_COLLECT_REPORT( |
michael@0 | 364 | "low-commit-space-events", KIND_OTHER, UNITS_COUNT_CUMULATIVE, |
michael@0 | 365 | sNumLowCommitSpaceEvents, |
michael@0 | 366 | "Number of low-commit-space events fired since startup. We fire such an " |
michael@0 | 367 | "event if we notice there is less than memory.low_commit_space_threshold_mb of " |
michael@0 | 368 | "commit space available (if zero, this behavior is disabled). Windows will " |
michael@0 | 369 | "likely kill the process if it runs out of commit space, so this event is " |
michael@0 | 370 | "dire."); |
michael@0 | 371 | NS_ENSURE_SUCCESS(rv, rv); |
michael@0 | 372 | |
michael@0 | 373 | rv = MOZ_COLLECT_REPORT( |
michael@0 | 374 | "low-memory-events/physical", KIND_OTHER, UNITS_COUNT_CUMULATIVE, |
michael@0 | 375 | LowMemoryEventsPhysicalDistinguishedAmount(), |
michael@0 | 376 | "Number of low-physical-memory events fired since startup. We fire such an " |
michael@0 | 377 | "event if we notice there is less than memory.low_physical_memory_threshold_mb " |
michael@0 | 378 | "of physical memory available (if zero, this behavior is disabled). The " |
michael@0 | 379 | "machine will start to page if it runs out of physical memory. This may " |
michael@0 | 380 | "cause it to run slowly, but it shouldn't cause it to crash."); |
michael@0 | 381 | NS_ENSURE_SUCCESS(rv, rv); |
michael@0 | 382 | |
michael@0 | 383 | return NS_OK; |
michael@0 | 384 | } |
michael@0 | 385 | }; |
michael@0 | 386 | NS_IMPL_ISUPPORTS(LowEventsReporter, nsIMemoryReporter) |
michael@0 | 387 | |
michael@0 | 388 | #endif // defined(XP_WIN) |
michael@0 | 389 | |
michael@0 | 390 | /** |
michael@0 | 391 | * This runnable is executed in response to a memory-pressure event; we spin |
michael@0 | 392 | * the event-loop when receiving the memory-pressure event in the hope that |
michael@0 | 393 | * other observers will synchronously free some memory that we'll be able to |
michael@0 | 394 | * purge here. |
michael@0 | 395 | */ |
michael@0 | 396 | class nsJemallocFreeDirtyPagesRunnable MOZ_FINAL : public nsIRunnable |
michael@0 | 397 | { |
michael@0 | 398 | public: |
michael@0 | 399 | NS_DECL_ISUPPORTS |
michael@0 | 400 | NS_DECL_NSIRUNNABLE |
michael@0 | 401 | }; |
michael@0 | 402 | |
michael@0 | 403 | NS_IMPL_ISUPPORTS(nsJemallocFreeDirtyPagesRunnable, nsIRunnable) |
michael@0 | 404 | |
michael@0 | 405 | NS_IMETHODIMP |
michael@0 | 406 | nsJemallocFreeDirtyPagesRunnable::Run() |
michael@0 | 407 | { |
michael@0 | 408 | MOZ_ASSERT(NS_IsMainThread()); |
michael@0 | 409 | |
michael@0 | 410 | #if defined(MOZ_MEMORY) |
michael@0 | 411 | jemalloc_free_dirty_pages(); |
michael@0 | 412 | #endif |
michael@0 | 413 | |
michael@0 | 414 | return NS_OK; |
michael@0 | 415 | } |
michael@0 | 416 | |
michael@0 | 417 | /** |
michael@0 | 418 | * The memory pressure watcher is used for listening to memory-pressure events |
michael@0 | 419 | * and reacting upon them. We use one instance per process currently only for |
michael@0 | 420 | * cleaning up dirty unused pages held by jemalloc. |
michael@0 | 421 | */ |
michael@0 | 422 | class nsMemoryPressureWatcher MOZ_FINAL : public nsIObserver |
michael@0 | 423 | { |
michael@0 | 424 | public: |
michael@0 | 425 | NS_DECL_ISUPPORTS |
michael@0 | 426 | NS_DECL_NSIOBSERVER |
michael@0 | 427 | |
michael@0 | 428 | void Init(); |
michael@0 | 429 | |
michael@0 | 430 | private: |
michael@0 | 431 | static bool sFreeDirtyPages; |
michael@0 | 432 | }; |
michael@0 | 433 | |
michael@0 | 434 | NS_IMPL_ISUPPORTS(nsMemoryPressureWatcher, nsIObserver) |
michael@0 | 435 | |
michael@0 | 436 | bool nsMemoryPressureWatcher::sFreeDirtyPages = false; |
michael@0 | 437 | |
michael@0 | 438 | /** |
michael@0 | 439 | * Initialize and subscribe to the memory-pressure events. We subscribe to the |
michael@0 | 440 | * observer service in this method and not in the constructor because we need |
michael@0 | 441 | * to hold a strong reference to 'this' before calling the observer service. |
michael@0 | 442 | */ |
michael@0 | 443 | void |
michael@0 | 444 | nsMemoryPressureWatcher::Init() |
michael@0 | 445 | { |
michael@0 | 446 | nsCOMPtr<nsIObserverService> os = services::GetObserverService(); |
michael@0 | 447 | |
michael@0 | 448 | if (os) { |
michael@0 | 449 | os->AddObserver(this, "memory-pressure", /* ownsWeak */ false); |
michael@0 | 450 | } |
michael@0 | 451 | |
michael@0 | 452 | Preferences::AddBoolVarCache(&sFreeDirtyPages, "memory.free_dirty_pages", |
michael@0 | 453 | false); |
michael@0 | 454 | } |
michael@0 | 455 | |
michael@0 | 456 | /** |
michael@0 | 457 | * Reacts to all types of memory-pressure events, launches a runnable to |
michael@0 | 458 | * free dirty pages held by jemalloc. |
michael@0 | 459 | */ |
michael@0 | 460 | NS_IMETHODIMP |
michael@0 | 461 | nsMemoryPressureWatcher::Observe(nsISupports *subject, const char *topic, |
michael@0 | 462 | const char16_t *data) |
michael@0 | 463 | { |
michael@0 | 464 | MOZ_ASSERT(!strcmp(topic, "memory-pressure"), "Unknown topic"); |
michael@0 | 465 | |
michael@0 | 466 | if (sFreeDirtyPages) { |
michael@0 | 467 | nsRefPtr<nsIRunnable> runnable = new nsJemallocFreeDirtyPagesRunnable(); |
michael@0 | 468 | |
michael@0 | 469 | NS_DispatchToMainThread(runnable); |
michael@0 | 470 | } |
michael@0 | 471 | |
michael@0 | 472 | return NS_OK; |
michael@0 | 473 | } |
michael@0 | 474 | |
michael@0 | 475 | } // anonymous namespace |
michael@0 | 476 | |
michael@0 | 477 | namespace mozilla { |
michael@0 | 478 | namespace AvailableMemoryTracker { |
michael@0 | 479 | |
michael@0 | 480 | void Activate() |
michael@0 | 481 | { |
michael@0 | 482 | #if defined(_M_IX86) && defined(XP_WIN) |
michael@0 | 483 | MOZ_ASSERT(sInitialized); |
michael@0 | 484 | MOZ_ASSERT(!sHooksActive); |
michael@0 | 485 | |
michael@0 | 486 | // On 64-bit systems, hardcode sLowVirtualMemoryThreshold to 0 -- we assume |
michael@0 | 487 | // we're not going to run out of virtual memory! |
michael@0 | 488 | if (sizeof(void*) > 4) { |
michael@0 | 489 | sLowVirtualMemoryThreshold = 0; |
michael@0 | 490 | } |
michael@0 | 491 | else { |
michael@0 | 492 | Preferences::AddUintVarCache(&sLowVirtualMemoryThreshold, |
michael@0 | 493 | "memory.low_virtual_mem_threshold_mb", 128); |
michael@0 | 494 | } |
michael@0 | 495 | |
michael@0 | 496 | Preferences::AddUintVarCache(&sLowPhysicalMemoryThreshold, |
michael@0 | 497 | "memory.low_physical_memory_threshold_mb", 0); |
michael@0 | 498 | Preferences::AddUintVarCache(&sLowCommitSpaceThreshold, |
michael@0 | 499 | "memory.low_commit_space_threshold_mb", 128); |
michael@0 | 500 | Preferences::AddUintVarCache(&sLowMemoryNotificationIntervalMS, |
michael@0 | 501 | "memory.low_memory_notification_interval_ms", 10000); |
michael@0 | 502 | |
michael@0 | 503 | RegisterStrongMemoryReporter(new LowEventsReporter()); |
michael@0 | 504 | RegisterLowMemoryEventsVirtualDistinguishedAmount(LowMemoryEventsVirtualDistinguishedAmount); |
michael@0 | 505 | RegisterLowMemoryEventsPhysicalDistinguishedAmount(LowMemoryEventsPhysicalDistinguishedAmount); |
michael@0 | 506 | sHooksActive = true; |
michael@0 | 507 | #endif |
michael@0 | 508 | |
michael@0 | 509 | // This object is held alive by the observer service. |
michael@0 | 510 | nsRefPtr<nsMemoryPressureWatcher> watcher = new nsMemoryPressureWatcher(); |
michael@0 | 511 | watcher->Init(); |
michael@0 | 512 | } |
michael@0 | 513 | |
michael@0 | 514 | void Init() |
michael@0 | 515 | { |
michael@0 | 516 | // Do nothing on x86-64, because nsWindowsDllInterceptor is not thread-safe |
michael@0 | 517 | // on 64-bit. (On 32-bit, it's probably thread-safe.) Even if we run Init() |
michael@0 | 518 | // before any other of our threads are running, another process may have |
michael@0 | 519 | // started a remote thread which could call VirtualAlloc! |
michael@0 | 520 | // |
michael@0 | 521 | // Moreover, the benefit of this code is less clear when we're a 64-bit |
michael@0 | 522 | // process, because we aren't going to run out of virtual memory, and the |
michael@0 | 523 | // system is likely to have a fair bit of physical memory. |
michael@0 | 524 | |
michael@0 | 525 | #if defined(_M_IX86) && defined(XP_WIN) |
michael@0 | 526 | // Don't register the hooks if we're a build instrumented for PGO: If we're |
michael@0 | 527 | // an instrumented build, the compiler adds function calls all over the place |
michael@0 | 528 | // which may call VirtualAlloc; this makes it hard to prevent |
michael@0 | 529 | // VirtualAllocHook from reentering itself. |
michael@0 | 530 | if (!PR_GetEnv("MOZ_PGO_INSTRUMENTED")) { |
michael@0 | 531 | sKernel32Intercept.Init("Kernel32.dll"); |
michael@0 | 532 | sKernel32Intercept.AddHook("VirtualAlloc", |
michael@0 | 533 | reinterpret_cast<intptr_t>(VirtualAllocHook), |
michael@0 | 534 | (void**) &sVirtualAllocOrig); |
michael@0 | 535 | sKernel32Intercept.AddHook("MapViewOfFile", |
michael@0 | 536 | reinterpret_cast<intptr_t>(MapViewOfFileHook), |
michael@0 | 537 | (void**) &sMapViewOfFileOrig); |
michael@0 | 538 | |
michael@0 | 539 | sGdi32Intercept.Init("Gdi32.dll"); |
michael@0 | 540 | sGdi32Intercept.AddHook("CreateDIBSection", |
michael@0 | 541 | reinterpret_cast<intptr_t>(CreateDIBSectionHook), |
michael@0 | 542 | (void**) &sCreateDIBSectionOrig); |
michael@0 | 543 | } |
michael@0 | 544 | |
michael@0 | 545 | sInitialized = true; |
michael@0 | 546 | #endif |
michael@0 | 547 | } |
michael@0 | 548 | |
michael@0 | 549 | } // namespace AvailableMemoryTracker |
michael@0 | 550 | } // namespace mozilla |