Tue, 06 Jan 2015 21:39:09 +0100
Conditionally force memory storage according to privacy.thirdparty.isolate;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
michael@0 | 2 | /* This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 3 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 4 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 5 | |
michael@0 | 6 | #ifndef PROFILER_PSEUDO_STACK_H_ |
michael@0 | 7 | #define PROFILER_PSEUDO_STACK_H_ |
michael@0 | 8 | |
michael@0 | 9 | #include "mozilla/ArrayUtils.h" |
michael@0 | 10 | #include "mozilla/NullPtr.h" |
michael@0 | 11 | #include <stdint.h> |
michael@0 | 12 | #include "js/ProfilingStack.h" |
michael@0 | 13 | #include <stdlib.h> |
michael@0 | 14 | #include "mozilla/Atomics.h" |
michael@0 | 15 | |
michael@0 | 16 | /* we duplicate this code here to avoid header dependencies |
michael@0 | 17 | * which make it more difficult to include in other places */ |
michael@0 | 18 | #if defined(_M_X64) || defined(__x86_64__) |
michael@0 | 19 | #define V8_HOST_ARCH_X64 1 |
michael@0 | 20 | #elif defined(_M_IX86) || defined(__i386__) || defined(__i386) |
michael@0 | 21 | #define V8_HOST_ARCH_IA32 1 |
michael@0 | 22 | #elif defined(__ARMEL__) |
michael@0 | 23 | #define V8_HOST_ARCH_ARM 1 |
michael@0 | 24 | #else |
michael@0 | 25 | #warning Please add support for your architecture in chromium_types.h |
michael@0 | 26 | #endif |
michael@0 | 27 | |
michael@0 | 28 | // STORE_SEQUENCER: Because signals can interrupt our profile modification |
michael@0 | 29 | // we need to make stores are not re-ordered by the compiler |
michael@0 | 30 | // or hardware to make sure the profile is consistent at |
michael@0 | 31 | // every point the signal can fire. |
michael@0 | 32 | #ifdef V8_HOST_ARCH_ARM |
michael@0 | 33 | // TODO Is there something cheaper that will prevent |
michael@0 | 34 | // memory stores from being reordered |
michael@0 | 35 | |
michael@0 | 36 | typedef void (*LinuxKernelMemoryBarrierFunc)(void); |
michael@0 | 37 | LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) = |
michael@0 | 38 | (LinuxKernelMemoryBarrierFunc) 0xffff0fa0; |
michael@0 | 39 | |
michael@0 | 40 | # define STORE_SEQUENCER() pLinuxKernelMemoryBarrier() |
michael@0 | 41 | #elif defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64) |
michael@0 | 42 | # if defined(_MSC_VER) |
michael@0 | 43 | #if _MSC_VER > 1400 |
michael@0 | 44 | # include <intrin.h> |
michael@0 | 45 | #else // _MSC_VER > 1400 |
michael@0 | 46 | // MSVC2005 has a name collision bug caused when both <intrin.h> and <winnt.h> are included together. |
michael@0 | 47 | #ifdef _WINNT_ |
michael@0 | 48 | # define _interlockedbittestandreset _interlockedbittestandreset_NAME_CHANGED_TO_AVOID_MSVS2005_ERROR |
michael@0 | 49 | # define _interlockedbittestandset _interlockedbittestandset_NAME_CHANGED_TO_AVOID_MSVS2005_ERROR |
michael@0 | 50 | # include <intrin.h> |
michael@0 | 51 | #else |
michael@0 | 52 | # include <intrin.h> |
michael@0 | 53 | # define _interlockedbittestandreset _interlockedbittestandreset_NAME_CHANGED_TO_AVOID_MSVS2005_ERROR |
michael@0 | 54 | # define _interlockedbittestandset _interlockedbittestandset_NAME_CHANGED_TO_AVOID_MSVS2005_ERROR |
michael@0 | 55 | #endif |
michael@0 | 56 | // Even though MSVC2005 has the intrinsic _ReadWriteBarrier, it fails to link to it when it's |
michael@0 | 57 | // not explicitly declared. |
michael@0 | 58 | # pragma intrinsic(_ReadWriteBarrier) |
michael@0 | 59 | #endif // _MSC_VER > 1400 |
michael@0 | 60 | # define STORE_SEQUENCER() _ReadWriteBarrier(); |
michael@0 | 61 | # elif defined(__INTEL_COMPILER) |
michael@0 | 62 | # define STORE_SEQUENCER() __memory_barrier(); |
michael@0 | 63 | # elif __GNUC__ |
michael@0 | 64 | # define STORE_SEQUENCER() asm volatile("" ::: "memory"); |
michael@0 | 65 | # else |
michael@0 | 66 | # error "Memory clobber not supported for your compiler." |
michael@0 | 67 | # endif |
michael@0 | 68 | #else |
michael@0 | 69 | # error "Memory clobber not supported for your platform." |
michael@0 | 70 | #endif |
michael@0 | 71 | |
michael@0 | 72 | // We can't include <algorithm> because it causes issues on OS X, so we use |
michael@0 | 73 | // our own min function. |
michael@0 | 74 | static inline uint32_t sMin(uint32_t l, uint32_t r) { |
michael@0 | 75 | return l < r ? l : r; |
michael@0 | 76 | } |
michael@0 | 77 | |
michael@0 | 78 | // A stack entry exists to allow the JS engine to inform SPS of the current |
michael@0 | 79 | // backtrace, but also to instrument particular points in C++ in case stack |
michael@0 | 80 | // walking is not available on the platform we are running on. |
michael@0 | 81 | // |
michael@0 | 82 | // Each entry has a descriptive string, a relevant stack address, and some extra |
michael@0 | 83 | // information the JS engine might want to inform SPS of. This class inherits |
michael@0 | 84 | // from the JS engine's version of the entry to ensure that the size and layout |
michael@0 | 85 | // of the two representations are consistent. |
michael@0 | 86 | class StackEntry : public js::ProfileEntry |
michael@0 | 87 | { |
michael@0 | 88 | public: |
michael@0 | 89 | |
michael@0 | 90 | bool isCopyLabel() const volatile { |
michael@0 | 91 | return !((uintptr_t)stackAddress() & 0x1); |
michael@0 | 92 | } |
michael@0 | 93 | |
michael@0 | 94 | void setStackAddressCopy(void *sparg, bool copy) volatile { |
michael@0 | 95 | // Tagged pointer. Less significant bit used to track if mLabel needs a |
michael@0 | 96 | // copy. Note that we don't need the last bit of the stack address for |
michael@0 | 97 | // proper ordering. This is optimized for encoding within the JS engine's |
michael@0 | 98 | // instrumentation, so we do the extra work here of encoding a bit. |
michael@0 | 99 | // Last bit 1 = Don't copy, Last bit 0 = Copy. |
michael@0 | 100 | if (copy) { |
michael@0 | 101 | setStackAddress(reinterpret_cast<void*>( |
michael@0 | 102 | reinterpret_cast<uintptr_t>(sparg) & ~NoCopyBit)); |
michael@0 | 103 | } else { |
michael@0 | 104 | setStackAddress(reinterpret_cast<void*>( |
michael@0 | 105 | reinterpret_cast<uintptr_t>(sparg) | NoCopyBit)); |
michael@0 | 106 | } |
michael@0 | 107 | } |
michael@0 | 108 | }; |
michael@0 | 109 | |
michael@0 | 110 | class ProfilerMarkerPayload; |
michael@0 | 111 | template<typename T> |
michael@0 | 112 | class ProfilerLinkedList; |
michael@0 | 113 | class JSStreamWriter; |
michael@0 | 114 | class JSCustomArray; |
michael@0 | 115 | class ThreadProfile; |
michael@0 | 116 | class ProfilerMarker { |
michael@0 | 117 | friend class ProfilerLinkedList<ProfilerMarker>; |
michael@0 | 118 | public: |
michael@0 | 119 | ProfilerMarker(const char* aMarkerName, |
michael@0 | 120 | ProfilerMarkerPayload* aPayload = nullptr, |
michael@0 | 121 | float aTime = 0); |
michael@0 | 122 | |
michael@0 | 123 | ~ProfilerMarker(); |
michael@0 | 124 | |
michael@0 | 125 | const char* GetMarkerName() const { |
michael@0 | 126 | return mMarkerName; |
michael@0 | 127 | } |
michael@0 | 128 | |
michael@0 | 129 | void |
michael@0 | 130 | StreamJSObject(JSStreamWriter& b) const; |
michael@0 | 131 | |
michael@0 | 132 | void SetGeneration(int aGenID); |
michael@0 | 133 | |
michael@0 | 134 | bool HasExpired(int aGenID) const { |
michael@0 | 135 | return mGenID + 2 <= aGenID; |
michael@0 | 136 | } |
michael@0 | 137 | |
michael@0 | 138 | float GetTime(); |
michael@0 | 139 | |
michael@0 | 140 | private: |
michael@0 | 141 | char* mMarkerName; |
michael@0 | 142 | ProfilerMarkerPayload* mPayload; |
michael@0 | 143 | ProfilerMarker* mNext; |
michael@0 | 144 | float mTime; |
michael@0 | 145 | int mGenID; |
michael@0 | 146 | }; |
michael@0 | 147 | |
michael@0 | 148 | // Foward declaration |
michael@0 | 149 | typedef struct _UnwinderThreadBuffer UnwinderThreadBuffer; |
michael@0 | 150 | |
michael@0 | 151 | /** |
michael@0 | 152 | * This struct is used to add a mNext field to UnwinderThreadBuffer objects for |
michael@0 | 153 | * use with ProfilerLinkedList. It is done this way so that UnwinderThreadBuffer |
michael@0 | 154 | * may continue to be opaque with respect to code outside of UnwinderThread2.cpp |
michael@0 | 155 | */ |
michael@0 | 156 | struct LinkedUWTBuffer |
michael@0 | 157 | { |
michael@0 | 158 | LinkedUWTBuffer() |
michael@0 | 159 | :mNext(nullptr) |
michael@0 | 160 | {} |
michael@0 | 161 | virtual ~LinkedUWTBuffer() {} |
michael@0 | 162 | virtual UnwinderThreadBuffer* GetBuffer() = 0; |
michael@0 | 163 | LinkedUWTBuffer* mNext; |
michael@0 | 164 | }; |
michael@0 | 165 | |
michael@0 | 166 | template<typename T> |
michael@0 | 167 | class ProfilerLinkedList { |
michael@0 | 168 | public: |
michael@0 | 169 | ProfilerLinkedList() |
michael@0 | 170 | : mHead(nullptr) |
michael@0 | 171 | , mTail(nullptr) |
michael@0 | 172 | {} |
michael@0 | 173 | |
michael@0 | 174 | void insert(T* elem) |
michael@0 | 175 | { |
michael@0 | 176 | if (!mTail) { |
michael@0 | 177 | mHead = elem; |
michael@0 | 178 | mTail = elem; |
michael@0 | 179 | } else { |
michael@0 | 180 | mTail->mNext = elem; |
michael@0 | 181 | mTail = elem; |
michael@0 | 182 | } |
michael@0 | 183 | elem->mNext = nullptr; |
michael@0 | 184 | } |
michael@0 | 185 | |
michael@0 | 186 | T* popHead() |
michael@0 | 187 | { |
michael@0 | 188 | if (!mHead) { |
michael@0 | 189 | MOZ_ASSERT(false); |
michael@0 | 190 | return nullptr; |
michael@0 | 191 | } |
michael@0 | 192 | |
michael@0 | 193 | T* head = mHead; |
michael@0 | 194 | |
michael@0 | 195 | mHead = head->mNext; |
michael@0 | 196 | if (!mHead) { |
michael@0 | 197 | mTail = nullptr; |
michael@0 | 198 | } |
michael@0 | 199 | |
michael@0 | 200 | return head; |
michael@0 | 201 | } |
michael@0 | 202 | |
michael@0 | 203 | const T* peek() { |
michael@0 | 204 | return mHead; |
michael@0 | 205 | } |
michael@0 | 206 | |
michael@0 | 207 | private: |
michael@0 | 208 | T* mHead; |
michael@0 | 209 | T* mTail; |
michael@0 | 210 | }; |
michael@0 | 211 | |
michael@0 | 212 | typedef ProfilerLinkedList<ProfilerMarker> ProfilerMarkerLinkedList; |
michael@0 | 213 | typedef ProfilerLinkedList<LinkedUWTBuffer> UWTBufferLinkedList; |
michael@0 | 214 | |
michael@0 | 215 | class PendingMarkers { |
michael@0 | 216 | public: |
michael@0 | 217 | PendingMarkers() |
michael@0 | 218 | : mSignalLock(false) |
michael@0 | 219 | {} |
michael@0 | 220 | |
michael@0 | 221 | ~PendingMarkers(); |
michael@0 | 222 | |
michael@0 | 223 | void addMarker(ProfilerMarker *aMarker); |
michael@0 | 224 | |
michael@0 | 225 | void updateGeneration(int aGenID); |
michael@0 | 226 | |
michael@0 | 227 | /** |
michael@0 | 228 | * Track a marker which has been inserted into the ThreadProfile. |
michael@0 | 229 | * This marker can safely be deleted once the generation has |
michael@0 | 230 | * expired. |
michael@0 | 231 | */ |
michael@0 | 232 | void addStoredMarker(ProfilerMarker *aStoredMarker); |
michael@0 | 233 | |
michael@0 | 234 | // called within signal. Function must be reentrant |
michael@0 | 235 | ProfilerMarkerLinkedList* getPendingMarkers() |
michael@0 | 236 | { |
michael@0 | 237 | // if mSignalLock then the stack is inconsistent because it's being |
michael@0 | 238 | // modified by the profiled thread. Post pone these markers |
michael@0 | 239 | // for the next sample. The odds of a livelock are nearly impossible |
michael@0 | 240 | // and would show up in a profile as many sample in 'addMarker' thus |
michael@0 | 241 | // we ignore this scenario. |
michael@0 | 242 | if (mSignalLock) { |
michael@0 | 243 | return nullptr; |
michael@0 | 244 | } |
michael@0 | 245 | return &mPendingMarkers; |
michael@0 | 246 | } |
michael@0 | 247 | |
michael@0 | 248 | void clearMarkers() |
michael@0 | 249 | { |
michael@0 | 250 | while (mPendingMarkers.peek()) { |
michael@0 | 251 | delete mPendingMarkers.popHead(); |
michael@0 | 252 | } |
michael@0 | 253 | while (mStoredMarkers.peek()) { |
michael@0 | 254 | delete mStoredMarkers.popHead(); |
michael@0 | 255 | } |
michael@0 | 256 | } |
michael@0 | 257 | |
michael@0 | 258 | private: |
michael@0 | 259 | // Keep a list of active markers to be applied to the next sample taken |
michael@0 | 260 | ProfilerMarkerLinkedList mPendingMarkers; |
michael@0 | 261 | ProfilerMarkerLinkedList mStoredMarkers; |
michael@0 | 262 | // If this is set then it's not safe to read mStackPointer from the signal handler |
michael@0 | 263 | volatile bool mSignalLock; |
michael@0 | 264 | // We don't want to modify _markers from within the signal so we allow |
michael@0 | 265 | // it to queue a clear operation. |
michael@0 | 266 | volatile mozilla::sig_safe_t mGenID; |
michael@0 | 267 | }; |
michael@0 | 268 | |
michael@0 | 269 | class PendingUWTBuffers |
michael@0 | 270 | { |
michael@0 | 271 | public: |
michael@0 | 272 | PendingUWTBuffers() |
michael@0 | 273 | : mSignalLock(false) |
michael@0 | 274 | { |
michael@0 | 275 | } |
michael@0 | 276 | |
michael@0 | 277 | void addLinkedUWTBuffer(LinkedUWTBuffer* aBuff) |
michael@0 | 278 | { |
michael@0 | 279 | MOZ_ASSERT(aBuff); |
michael@0 | 280 | mSignalLock = true; |
michael@0 | 281 | STORE_SEQUENCER(); |
michael@0 | 282 | mPendingUWTBuffers.insert(aBuff); |
michael@0 | 283 | STORE_SEQUENCER(); |
michael@0 | 284 | mSignalLock = false; |
michael@0 | 285 | } |
michael@0 | 286 | |
michael@0 | 287 | // called within signal. Function must be reentrant |
michael@0 | 288 | UWTBufferLinkedList* getLinkedUWTBuffers() |
michael@0 | 289 | { |
michael@0 | 290 | if (mSignalLock) { |
michael@0 | 291 | return nullptr; |
michael@0 | 292 | } |
michael@0 | 293 | return &mPendingUWTBuffers; |
michael@0 | 294 | } |
michael@0 | 295 | |
michael@0 | 296 | private: |
michael@0 | 297 | UWTBufferLinkedList mPendingUWTBuffers; |
michael@0 | 298 | volatile bool mSignalLock; |
michael@0 | 299 | }; |
michael@0 | 300 | |
michael@0 | 301 | // Stub eventMarker function for js-engine event generation. |
michael@0 | 302 | void ProfilerJSEventMarker(const char *event); |
michael@0 | 303 | |
michael@0 | 304 | // the PseudoStack members are read by signal |
michael@0 | 305 | // handlers, so the mutation of them needs to be signal-safe. |
michael@0 | 306 | struct PseudoStack |
michael@0 | 307 | { |
michael@0 | 308 | public: |
michael@0 | 309 | PseudoStack() |
michael@0 | 310 | : mStackPointer(0) |
michael@0 | 311 | , mSleepId(0) |
michael@0 | 312 | , mSleepIdObserved(0) |
michael@0 | 313 | , mSleeping(false) |
michael@0 | 314 | , mRuntime(nullptr) |
michael@0 | 315 | , mStartJSSampling(false) |
michael@0 | 316 | , mPrivacyMode(false) |
michael@0 | 317 | { } |
michael@0 | 318 | |
michael@0 | 319 | ~PseudoStack() { |
michael@0 | 320 | if (mStackPointer != 0) { |
michael@0 | 321 | // We're releasing the pseudostack while it's still in use. |
michael@0 | 322 | // The label macros keep a non ref counted reference to the |
michael@0 | 323 | // stack to avoid a TLS. If these are not all cleared we will |
michael@0 | 324 | // get a use-after-free so better to crash now. |
michael@0 | 325 | abort(); |
michael@0 | 326 | } |
michael@0 | 327 | } |
michael@0 | 328 | |
michael@0 | 329 | // This is called on every profiler restart. Put things that should happen at that time here. |
michael@0 | 330 | void reinitializeOnResume() { |
michael@0 | 331 | // This is needed to cause an initial sample to be taken from sleeping threads. Otherwise sleeping |
michael@0 | 332 | // threads would not have any samples to copy forward while sleeping. |
michael@0 | 333 | mSleepId++; |
michael@0 | 334 | } |
michael@0 | 335 | |
michael@0 | 336 | void addLinkedUWTBuffer(LinkedUWTBuffer* aBuff) |
michael@0 | 337 | { |
michael@0 | 338 | mPendingUWTBuffers.addLinkedUWTBuffer(aBuff); |
michael@0 | 339 | } |
michael@0 | 340 | |
michael@0 | 341 | UWTBufferLinkedList* getLinkedUWTBuffers() |
michael@0 | 342 | { |
michael@0 | 343 | return mPendingUWTBuffers.getLinkedUWTBuffers(); |
michael@0 | 344 | } |
michael@0 | 345 | |
michael@0 | 346 | void addMarker(const char *aMarkerStr, ProfilerMarkerPayload *aPayload, float aTime) |
michael@0 | 347 | { |
michael@0 | 348 | ProfilerMarker* marker = new ProfilerMarker(aMarkerStr, aPayload, aTime); |
michael@0 | 349 | mPendingMarkers.addMarker(marker); |
michael@0 | 350 | } |
michael@0 | 351 | |
michael@0 | 352 | void addStoredMarker(ProfilerMarker *aStoredMarker) { |
michael@0 | 353 | mPendingMarkers.addStoredMarker(aStoredMarker); |
michael@0 | 354 | } |
michael@0 | 355 | |
michael@0 | 356 | void updateGeneration(int aGenID) { |
michael@0 | 357 | mPendingMarkers.updateGeneration(aGenID); |
michael@0 | 358 | } |
michael@0 | 359 | |
michael@0 | 360 | // called within signal. Function must be reentrant |
michael@0 | 361 | ProfilerMarkerLinkedList* getPendingMarkers() |
michael@0 | 362 | { |
michael@0 | 363 | return mPendingMarkers.getPendingMarkers(); |
michael@0 | 364 | } |
michael@0 | 365 | |
michael@0 | 366 | void push(const char *aName, uint32_t line) |
michael@0 | 367 | { |
michael@0 | 368 | push(aName, nullptr, false, line); |
michael@0 | 369 | } |
michael@0 | 370 | |
michael@0 | 371 | void push(const char *aName, void *aStackAddress, bool aCopy, uint32_t line) |
michael@0 | 372 | { |
michael@0 | 373 | if (size_t(mStackPointer) >= mozilla::ArrayLength(mStack)) { |
michael@0 | 374 | mStackPointer++; |
michael@0 | 375 | return; |
michael@0 | 376 | } |
michael@0 | 377 | |
michael@0 | 378 | // Make sure we increment the pointer after the name has |
michael@0 | 379 | // been written such that mStack is always consistent. |
michael@0 | 380 | mStack[mStackPointer].setLabel(aName); |
michael@0 | 381 | mStack[mStackPointer].setStackAddressCopy(aStackAddress, aCopy); |
michael@0 | 382 | mStack[mStackPointer].setLine(line); |
michael@0 | 383 | |
michael@0 | 384 | // Prevent the optimizer from re-ordering these instructions |
michael@0 | 385 | STORE_SEQUENCER(); |
michael@0 | 386 | mStackPointer++; |
michael@0 | 387 | } |
michael@0 | 388 | void pop() |
michael@0 | 389 | { |
michael@0 | 390 | mStackPointer--; |
michael@0 | 391 | } |
michael@0 | 392 | bool isEmpty() |
michael@0 | 393 | { |
michael@0 | 394 | return mStackPointer == 0; |
michael@0 | 395 | } |
michael@0 | 396 | uint32_t stackSize() const |
michael@0 | 397 | { |
michael@0 | 398 | return sMin(mStackPointer, mozilla::sig_safe_t(mozilla::ArrayLength(mStack))); |
michael@0 | 399 | } |
michael@0 | 400 | |
michael@0 | 401 | void sampleRuntime(JSRuntime *runtime) { |
michael@0 | 402 | mRuntime = runtime; |
michael@0 | 403 | if (!runtime) { |
michael@0 | 404 | // JS shut down |
michael@0 | 405 | return; |
michael@0 | 406 | } |
michael@0 | 407 | |
michael@0 | 408 | static_assert(sizeof(mStack[0]) == sizeof(js::ProfileEntry), |
michael@0 | 409 | "mStack must be binary compatible with js::ProfileEntry."); |
michael@0 | 410 | js::SetRuntimeProfilingStack(runtime, |
michael@0 | 411 | (js::ProfileEntry*) mStack, |
michael@0 | 412 | (uint32_t*) &mStackPointer, |
michael@0 | 413 | uint32_t(mozilla::ArrayLength(mStack))); |
michael@0 | 414 | if (mStartJSSampling) |
michael@0 | 415 | enableJSSampling(); |
michael@0 | 416 | } |
michael@0 | 417 | void enableJSSampling() { |
michael@0 | 418 | if (mRuntime) { |
michael@0 | 419 | js::EnableRuntimeProfilingStack(mRuntime, true); |
michael@0 | 420 | js::RegisterRuntimeProfilingEventMarker(mRuntime, &ProfilerJSEventMarker); |
michael@0 | 421 | mStartJSSampling = false; |
michael@0 | 422 | } else { |
michael@0 | 423 | mStartJSSampling = true; |
michael@0 | 424 | } |
michael@0 | 425 | } |
michael@0 | 426 | void jsOperationCallback() { |
michael@0 | 427 | if (mStartJSSampling) |
michael@0 | 428 | enableJSSampling(); |
michael@0 | 429 | } |
michael@0 | 430 | void disableJSSampling() { |
michael@0 | 431 | mStartJSSampling = false; |
michael@0 | 432 | if (mRuntime) |
michael@0 | 433 | js::EnableRuntimeProfilingStack(mRuntime, false); |
michael@0 | 434 | } |
michael@0 | 435 | |
michael@0 | 436 | // Keep a list of active checkpoints |
michael@0 | 437 | StackEntry volatile mStack[1024]; |
michael@0 | 438 | private: |
michael@0 | 439 | // Keep a list of pending markers that must be moved |
michael@0 | 440 | // to the circular buffer |
michael@0 | 441 | PendingMarkers mPendingMarkers; |
michael@0 | 442 | // List of LinkedUWTBuffers that must be processed on the next tick |
michael@0 | 443 | PendingUWTBuffers mPendingUWTBuffers; |
michael@0 | 444 | // This may exceed the length of mStack, so instead use the stackSize() method |
michael@0 | 445 | // to determine the number of valid samples in mStack |
michael@0 | 446 | mozilla::sig_safe_t mStackPointer; |
michael@0 | 447 | // Incremented at every sleep/wake up of the thread |
michael@0 | 448 | int mSleepId; |
michael@0 | 449 | // Previous id observed. If this is not the same as mSleepId, this thread is not sleeping in the same place any more |
michael@0 | 450 | mozilla::Atomic<int> mSleepIdObserved; |
michael@0 | 451 | // Keeps tack of whether the thread is sleeping or not (1 when sleeping 0 when awake) |
michael@0 | 452 | mozilla::Atomic<int> mSleeping; |
michael@0 | 453 | public: |
michael@0 | 454 | // The runtime which is being sampled |
michael@0 | 455 | JSRuntime *mRuntime; |
michael@0 | 456 | // Start JS Profiling when possible |
michael@0 | 457 | bool mStartJSSampling; |
michael@0 | 458 | bool mPrivacyMode; |
michael@0 | 459 | |
michael@0 | 460 | enum SleepState {NOT_SLEEPING, SLEEPING_FIRST, SLEEPING_AGAIN}; |
michael@0 | 461 | |
michael@0 | 462 | // The first time this is called per sleep cycle we return SLEEPING_FIRST |
michael@0 | 463 | // and any other subsequent call within the same sleep cycle we return SLEEPING_AGAIN |
michael@0 | 464 | SleepState observeSleeping() { |
michael@0 | 465 | if (mSleeping != 0) { |
michael@0 | 466 | if (mSleepIdObserved == mSleepId) { |
michael@0 | 467 | return SLEEPING_AGAIN; |
michael@0 | 468 | } else { |
michael@0 | 469 | mSleepIdObserved = mSleepId; |
michael@0 | 470 | return SLEEPING_FIRST; |
michael@0 | 471 | } |
michael@0 | 472 | } else { |
michael@0 | 473 | return NOT_SLEEPING; |
michael@0 | 474 | } |
michael@0 | 475 | } |
michael@0 | 476 | |
michael@0 | 477 | |
michael@0 | 478 | // Call this whenever the current thread sleeps or wakes up |
michael@0 | 479 | // Calling setSleeping with the same value twice in a row is an error |
michael@0 | 480 | void setSleeping(int sleeping) { |
michael@0 | 481 | MOZ_ASSERT(mSleeping != sleeping); |
michael@0 | 482 | mSleepId++; |
michael@0 | 483 | mSleeping = sleeping; |
michael@0 | 484 | } |
michael@0 | 485 | }; |
michael@0 | 486 | |
michael@0 | 487 | #endif |
michael@0 | 488 |