Tue, 06 Jan 2015 21:39:09 +0100
Conditionally force memory storage according to privacy.thirdparty.isolate;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
michael@0 | 2 | * vim: sw=4 ts=4 et : |
michael@0 | 3 | * This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 6 | #ifndef mozilla_DeadlockDetector_h |
michael@0 | 7 | #define mozilla_DeadlockDetector_h |
michael@0 | 8 | |
michael@0 | 9 | #include "mozilla/Attributes.h" |
michael@0 | 10 | |
michael@0 | 11 | #include <stdlib.h> |
michael@0 | 12 | |
michael@0 | 13 | #include "plhash.h" |
michael@0 | 14 | #include "prlock.h" |
michael@0 | 15 | |
michael@0 | 16 | #include "nsTArray.h" |
michael@0 | 17 | |
michael@0 | 18 | #ifdef NS_TRACE_MALLOC |
michael@0 | 19 | # include "nsTraceMalloc.h" |
michael@0 | 20 | #endif // ifdef NS_TRACE_MALLOC |
michael@0 | 21 | |
michael@0 | 22 | namespace mozilla { |
michael@0 | 23 | |
michael@0 | 24 | |
michael@0 | 25 | // FIXME bug 456272: split this off into a convenience API on top of |
michael@0 | 26 | // nsStackWalk? |
michael@0 | 27 | class NS_COM_GLUE CallStack |
michael@0 | 28 | { |
michael@0 | 29 | private: |
michael@0 | 30 | #ifdef NS_TRACE_MALLOC |
michael@0 | 31 | typedef nsTMStackTraceID callstack_id; |
michael@0 | 32 | // needs to be a macro to avoid disturbing the backtrace |
michael@0 | 33 | # define NS_GET_BACKTRACE() NS_TraceMallocGetStackTrace() |
michael@0 | 34 | # define NS_DEADLOCK_DETECTOR_CONSTEXPR |
michael@0 | 35 | #else |
michael@0 | 36 | typedef void* callstack_id; |
michael@0 | 37 | # define NS_GET_BACKTRACE() 0 |
michael@0 | 38 | # define NS_DEADLOCK_DETECTOR_CONSTEXPR MOZ_CONSTEXPR |
michael@0 | 39 | #endif // ifdef NS_TRACE_MALLOC |
michael@0 | 40 | |
michael@0 | 41 | callstack_id mCallStack; |
michael@0 | 42 | |
michael@0 | 43 | public: |
michael@0 | 44 | /** |
michael@0 | 45 | * CallStack |
michael@0 | 46 | * *ALWAYS* *ALWAYS* *ALWAYS* call this with no arguments. This |
michael@0 | 47 | * constructor takes an argument *ONLY* so that |GET_BACKTRACE()| |
michael@0 | 48 | * can be evaluated in the stack frame of the caller, rather than |
michael@0 | 49 | * that of the constructor. |
michael@0 | 50 | * |
michael@0 | 51 | * *BEWARE*: this means that calling this constructor with no |
michael@0 | 52 | * arguments is not the same as a "default, do-nothing" |
michael@0 | 53 | * constructor: it *will* construct a backtrace. This can cause |
michael@0 | 54 | * unexpected performance issues. |
michael@0 | 55 | */ |
michael@0 | 56 | NS_DEADLOCK_DETECTOR_CONSTEXPR |
michael@0 | 57 | CallStack(const callstack_id aCallStack = NS_GET_BACKTRACE()) : |
michael@0 | 58 | mCallStack(aCallStack) |
michael@0 | 59 | { |
michael@0 | 60 | } |
michael@0 | 61 | NS_DEADLOCK_DETECTOR_CONSTEXPR |
michael@0 | 62 | CallStack(const CallStack& aFrom) : |
michael@0 | 63 | mCallStack(aFrom.mCallStack) |
michael@0 | 64 | { |
michael@0 | 65 | } |
michael@0 | 66 | CallStack& operator=(const CallStack& aFrom) |
michael@0 | 67 | { |
michael@0 | 68 | mCallStack = aFrom.mCallStack; |
michael@0 | 69 | return *this; |
michael@0 | 70 | } |
michael@0 | 71 | bool operator==(const CallStack& aOther) const |
michael@0 | 72 | { |
michael@0 | 73 | return mCallStack == aOther.mCallStack; |
michael@0 | 74 | } |
michael@0 | 75 | bool operator!=(const CallStack& aOther) const |
michael@0 | 76 | { |
michael@0 | 77 | return mCallStack != aOther.mCallStack; |
michael@0 | 78 | } |
michael@0 | 79 | |
michael@0 | 80 | // FIXME bug 456272: if this is split off, |
michael@0 | 81 | // NS_TraceMallocPrintStackTrace should be modified to print into |
michael@0 | 82 | // an nsACString |
michael@0 | 83 | void Print(FILE* f) const |
michael@0 | 84 | { |
michael@0 | 85 | #ifdef NS_TRACE_MALLOC |
michael@0 | 86 | if (this != &kNone && mCallStack) { |
michael@0 | 87 | NS_TraceMallocPrintStackTrace(f, mCallStack); |
michael@0 | 88 | return; |
michael@0 | 89 | } |
michael@0 | 90 | #endif |
michael@0 | 91 | fputs(" [stack trace unavailable]\n", f); |
michael@0 | 92 | } |
michael@0 | 93 | |
michael@0 | 94 | /** The "null" callstack. */ |
michael@0 | 95 | static const CallStack kNone; |
michael@0 | 96 | }; |
michael@0 | 97 | |
michael@0 | 98 | |
michael@0 | 99 | /** |
michael@0 | 100 | * DeadlockDetector |
michael@0 | 101 | * |
michael@0 | 102 | * The following is an approximate description of how the deadlock detector |
michael@0 | 103 | * works. |
michael@0 | 104 | * |
michael@0 | 105 | * The deadlock detector ensures that all blocking resources are |
michael@0 | 106 | * acquired according to a partial order P. One type of blocking |
michael@0 | 107 | * resource is a lock. If a lock l1 is acquired (locked) before l2, |
michael@0 | 108 | * then we say that |l1 <_P l2|. The detector flags an error if two |
michael@0 | 109 | * locks l1 and l2 have an inconsistent ordering in P; that is, if |
michael@0 | 110 | * both |l1 <_P l2| and |l2 <_P l1|. This is a potential error |
michael@0 | 111 | * because a thread acquiring l1,l2 according to the first order might |
michael@0 | 112 | * race with a thread acquiring them according to the second order. |
michael@0 | 113 | * If this happens under the right conditions, then the acquisitions |
michael@0 | 114 | * will deadlock. |
michael@0 | 115 | * |
michael@0 | 116 | * This deadlock detector doesn't know at compile-time what P is. So, |
michael@0 | 117 | * it tries to discover the order at run time. More precisely, it |
michael@0 | 118 | * finds <i>some</i> order P, then tries to find chains of resource |
michael@0 | 119 | * acquisitions that violate P. An example acquisition sequence, and |
michael@0 | 120 | * the orders they impose, is |
michael@0 | 121 | * l1.lock() // current chain: [ l1 ] |
michael@0 | 122 | * // order: { } |
michael@0 | 123 | * |
michael@0 | 124 | * l2.lock() // current chain: [ l1, l2 ] |
michael@0 | 125 | * // order: { l1 <_P l2 } |
michael@0 | 126 | * |
michael@0 | 127 | * l3.lock() // current chain: [ l1, l2, l3 ] |
michael@0 | 128 | * // order: { l1 <_P l2, l2 <_P l3, l1 <_P l3 } |
michael@0 | 129 | * // (note: <_P is transitive, so also |l1 <_P l3|) |
michael@0 | 130 | * |
michael@0 | 131 | * l2.unlock() // current chain: [ l1, l3 ] |
michael@0 | 132 | * // order: { l1 <_P l2, l2 <_P l3, l1 <_P l3 } |
michael@0 | 133 | * // (note: it's OK, but weird, that l2 was unlocked out |
michael@0 | 134 | * // of order. we still have l1 <_P l3). |
michael@0 | 135 | * |
michael@0 | 136 | * l2.lock() // current chain: [ l1, l3, l2 ] |
michael@0 | 137 | * // order: { l1 <_P l2, l2 <_P l3, l1 <_P l3, |
michael@0 | 138 | * l3 <_P l2 (!!!) } |
michael@0 | 139 | * BEEP BEEP! Here the detector will flag a potential error, since |
michael@0 | 140 | * l2 and l3 were used inconsistently (and potentially in ways that |
michael@0 | 141 | * would deadlock). |
michael@0 | 142 | */ |
michael@0 | 143 | template <typename T> |
michael@0 | 144 | class DeadlockDetector |
michael@0 | 145 | { |
michael@0 | 146 | public: |
michael@0 | 147 | /** |
michael@0 | 148 | * ResourceAcquisition |
michael@0 | 149 | * Consists simply of a resource and the calling context from |
michael@0 | 150 | * which it was acquired. We pack this information together so |
michael@0 | 151 | * that it can be returned back to the caller when a potential |
michael@0 | 152 | * deadlock has been found. |
michael@0 | 153 | */ |
michael@0 | 154 | struct ResourceAcquisition |
michael@0 | 155 | { |
michael@0 | 156 | const T* mResource; |
michael@0 | 157 | CallStack mCallContext; |
michael@0 | 158 | |
michael@0 | 159 | ResourceAcquisition( |
michael@0 | 160 | const T* aResource, |
michael@0 | 161 | const CallStack aCallContext=CallStack::kNone) : |
michael@0 | 162 | mResource(aResource), |
michael@0 | 163 | mCallContext(aCallContext) |
michael@0 | 164 | { |
michael@0 | 165 | } |
michael@0 | 166 | ResourceAcquisition(const ResourceAcquisition& aFrom) : |
michael@0 | 167 | mResource(aFrom.mResource), |
michael@0 | 168 | mCallContext(aFrom.mCallContext) |
michael@0 | 169 | { |
michael@0 | 170 | } |
michael@0 | 171 | ResourceAcquisition& operator=(const ResourceAcquisition& aFrom) |
michael@0 | 172 | { |
michael@0 | 173 | mResource = aFrom.mResource; |
michael@0 | 174 | mCallContext = aFrom.mCallContext; |
michael@0 | 175 | return *this; |
michael@0 | 176 | } |
michael@0 | 177 | }; |
michael@0 | 178 | typedef nsTArray<ResourceAcquisition> ResourceAcquisitionArray; |
michael@0 | 179 | |
michael@0 | 180 | private: |
michael@0 | 181 | typedef nsTArray<PLHashEntry*> HashEntryArray; |
michael@0 | 182 | typedef typename HashEntryArray::index_type index_type; |
michael@0 | 183 | typedef typename HashEntryArray::size_type size_type; |
michael@0 | 184 | enum { |
michael@0 | 185 | NoIndex = HashEntryArray::NoIndex |
michael@0 | 186 | }; |
michael@0 | 187 | |
michael@0 | 188 | /** |
michael@0 | 189 | * Value type for the ordering table. Contains the other |
michael@0 | 190 | * resources on which an ordering constraint |key < other| |
michael@0 | 191 | * exists. The catch is that we also store the calling context at |
michael@0 | 192 | * which the other resource was acquired; this improves the |
michael@0 | 193 | * quality of error messages when potential deadlock is detected. |
michael@0 | 194 | */ |
michael@0 | 195 | struct OrderingEntry |
michael@0 | 196 | { |
michael@0 | 197 | OrderingEntry() : |
michael@0 | 198 | mFirstSeen(CallStack::kNone), |
michael@0 | 199 | mOrderedLT() // FIXME bug 456272: set to empirical |
michael@0 | 200 | { // dep size? |
michael@0 | 201 | } |
michael@0 | 202 | ~OrderingEntry() |
michael@0 | 203 | { |
michael@0 | 204 | } |
michael@0 | 205 | |
michael@0 | 206 | CallStack mFirstSeen; // first site from which the resource appeared |
michael@0 | 207 | HashEntryArray mOrderedLT; // this <_o Other |
michael@0 | 208 | }; |
michael@0 | 209 | |
michael@0 | 210 | static void* TableAlloc(void* /*pool*/, size_t size) |
michael@0 | 211 | { |
michael@0 | 212 | return operator new(size); |
michael@0 | 213 | } |
michael@0 | 214 | static void TableFree(void* /*pool*/, void* item) |
michael@0 | 215 | { |
michael@0 | 216 | operator delete(item); |
michael@0 | 217 | } |
michael@0 | 218 | static PLHashEntry* EntryAlloc(void* /*pool*/, const void* key) |
michael@0 | 219 | { |
michael@0 | 220 | return new PLHashEntry; |
michael@0 | 221 | } |
michael@0 | 222 | static void EntryFree(void* /*pool*/, PLHashEntry* entry, unsigned flag) |
michael@0 | 223 | { |
michael@0 | 224 | delete static_cast<T*>(const_cast<void*>(entry->key)); |
michael@0 | 225 | delete static_cast<OrderingEntry*>(entry->value); |
michael@0 | 226 | entry->value = 0; |
michael@0 | 227 | if (HT_FREE_ENTRY == flag) |
michael@0 | 228 | delete entry; |
michael@0 | 229 | } |
michael@0 | 230 | static PLHashNumber HashKey(const void* aKey) |
michael@0 | 231 | { |
michael@0 | 232 | return NS_PTR_TO_INT32(aKey) >> 2; |
michael@0 | 233 | } |
michael@0 | 234 | static const PLHashAllocOps kAllocOps; |
michael@0 | 235 | |
michael@0 | 236 | // Hash table "interface" the rest of the code should use |
michael@0 | 237 | |
michael@0 | 238 | PLHashEntry** GetEntry(const T* aKey) |
michael@0 | 239 | { |
michael@0 | 240 | return PL_HashTableRawLookup(mOrdering, HashKey(aKey), aKey); |
michael@0 | 241 | } |
michael@0 | 242 | |
michael@0 | 243 | void PutEntry(T* aKey) |
michael@0 | 244 | { |
michael@0 | 245 | PL_HashTableAdd(mOrdering, aKey, new OrderingEntry()); |
michael@0 | 246 | } |
michael@0 | 247 | |
michael@0 | 248 | // XXX need these helper methods because OrderingEntry doesn't have |
michael@0 | 249 | // XXX access to underlying PLHashEntry |
michael@0 | 250 | |
michael@0 | 251 | /** |
michael@0 | 252 | * Add the order |aFirst <_o aSecond|. |
michael@0 | 253 | * |
michael@0 | 254 | * WARNING: this does not check whether it's sane to add this |
michael@0 | 255 | * order. In the "best" bad case, when this order already exists, |
michael@0 | 256 | * adding it anyway may unnecessarily result in O(n^2) space. In |
michael@0 | 257 | * the "worst" bad case, adding it anyway will cause |
michael@0 | 258 | * |InTransitiveClosure()| to diverge. |
michael@0 | 259 | */ |
michael@0 | 260 | void AddOrder(PLHashEntry* aLT, PLHashEntry* aGT) |
michael@0 | 261 | { |
michael@0 | 262 | static_cast<OrderingEntry*>(aLT->value)->mOrderedLT |
michael@0 | 263 | .InsertElementSorted(aGT); |
michael@0 | 264 | } |
michael@0 | 265 | |
michael@0 | 266 | /** |
michael@0 | 267 | * Return true iff the order |aFirst < aSecond| has been |
michael@0 | 268 | * *explicitly* added. |
michael@0 | 269 | * |
michael@0 | 270 | * Does not consider transitivity. |
michael@0 | 271 | */ |
michael@0 | 272 | bool IsOrdered(const PLHashEntry* aFirst, const PLHashEntry* aSecond) |
michael@0 | 273 | const |
michael@0 | 274 | { |
michael@0 | 275 | return NoIndex != |
michael@0 | 276 | static_cast<const OrderingEntry*>(aFirst->value)->mOrderedLT |
michael@0 | 277 | .BinaryIndexOf(aSecond); |
michael@0 | 278 | } |
michael@0 | 279 | |
michael@0 | 280 | /** |
michael@0 | 281 | * Return a pointer to the array of all elements "that" for |
michael@0 | 282 | * which the order |this < that| has been explicitly added. |
michael@0 | 283 | * |
michael@0 | 284 | * NOTE: this does *not* consider transitive orderings. |
michael@0 | 285 | */ |
michael@0 | 286 | PLHashEntry* const* GetOrders(const PLHashEntry* aEntry) const |
michael@0 | 287 | { |
michael@0 | 288 | return static_cast<const OrderingEntry*>(aEntry->value)->mOrderedLT |
michael@0 | 289 | .Elements(); |
michael@0 | 290 | } |
michael@0 | 291 | |
michael@0 | 292 | /** |
michael@0 | 293 | * Return the number of elements "that" for which the order |
michael@0 | 294 | * |this < that| has been explicitly added. |
michael@0 | 295 | * |
michael@0 | 296 | * NOTE: this does *not* consider transitive orderings. |
michael@0 | 297 | */ |
michael@0 | 298 | size_type NumOrders(const PLHashEntry* aEntry) const |
michael@0 | 299 | { |
michael@0 | 300 | return static_cast<const OrderingEntry*>(aEntry->value)->mOrderedLT |
michael@0 | 301 | .Length(); |
michael@0 | 302 | } |
michael@0 | 303 | |
michael@0 | 304 | /** Make a ResourceAcquisition out of |aEntry|. */ |
michael@0 | 305 | ResourceAcquisition MakeResourceAcquisition(const PLHashEntry* aEntry) |
michael@0 | 306 | const |
michael@0 | 307 | { |
michael@0 | 308 | return ResourceAcquisition( |
michael@0 | 309 | static_cast<const T*>(aEntry->key), |
michael@0 | 310 | static_cast<const OrderingEntry*>(aEntry->value)->mFirstSeen); |
michael@0 | 311 | } |
michael@0 | 312 | |
michael@0 | 313 | // Throwaway RAII lock to make the following code safer. |
michael@0 | 314 | struct PRAutoLock |
michael@0 | 315 | { |
michael@0 | 316 | PRAutoLock(PRLock* aLock) : mLock(aLock) { PR_Lock(mLock); } |
michael@0 | 317 | ~PRAutoLock() { PR_Unlock(mLock); } |
michael@0 | 318 | PRLock* mLock; |
michael@0 | 319 | }; |
michael@0 | 320 | |
michael@0 | 321 | public: |
michael@0 | 322 | static const uint32_t kDefaultNumBuckets; |
michael@0 | 323 | |
michael@0 | 324 | /** |
michael@0 | 325 | * DeadlockDetector |
michael@0 | 326 | * Create a new deadlock detector. |
michael@0 | 327 | * |
michael@0 | 328 | * @param aNumResourcesGuess Guess at approximate number of resources |
michael@0 | 329 | * that will be checked. |
michael@0 | 330 | */ |
michael@0 | 331 | DeadlockDetector(uint32_t aNumResourcesGuess = kDefaultNumBuckets) |
michael@0 | 332 | { |
michael@0 | 333 | mOrdering = PL_NewHashTable(aNumResourcesGuess, |
michael@0 | 334 | HashKey, |
michael@0 | 335 | PL_CompareValues, PL_CompareValues, |
michael@0 | 336 | &kAllocOps, 0); |
michael@0 | 337 | if (!mOrdering) |
michael@0 | 338 | NS_RUNTIMEABORT("couldn't initialize resource ordering table"); |
michael@0 | 339 | |
michael@0 | 340 | mLock = PR_NewLock(); |
michael@0 | 341 | if (!mLock) |
michael@0 | 342 | NS_RUNTIMEABORT("couldn't allocate deadlock detector lock"); |
michael@0 | 343 | } |
michael@0 | 344 | |
michael@0 | 345 | /** |
michael@0 | 346 | * ~DeadlockDetector |
michael@0 | 347 | * |
michael@0 | 348 | * *NOT* thread safe. |
michael@0 | 349 | */ |
michael@0 | 350 | ~DeadlockDetector() |
michael@0 | 351 | { |
michael@0 | 352 | PL_HashTableDestroy(mOrdering); |
michael@0 | 353 | PR_DestroyLock(mLock); |
michael@0 | 354 | } |
michael@0 | 355 | |
michael@0 | 356 | /** |
michael@0 | 357 | * Add |
michael@0 | 358 | * Make the deadlock detector aware of |aResource|. |
michael@0 | 359 | * |
michael@0 | 360 | * WARNING: The deadlock detector owns |aResource|. |
michael@0 | 361 | * |
michael@0 | 362 | * Thread safe. |
michael@0 | 363 | * |
michael@0 | 364 | * @param aResource Resource to make deadlock detector aware of. |
michael@0 | 365 | */ |
michael@0 | 366 | void Add(T* aResource) |
michael@0 | 367 | { |
michael@0 | 368 | PRAutoLock _(mLock); |
michael@0 | 369 | PutEntry(aResource); |
michael@0 | 370 | } |
michael@0 | 371 | |
michael@0 | 372 | // Nb: implementing a Remove() method makes the detector "more |
michael@0 | 373 | // unsound." By removing a resource from the orderings, deadlocks |
michael@0 | 374 | // may be missed that would otherwise have been found. However, |
michael@0 | 375 | // removing resources possibly reduces the # of false positives, |
michael@0 | 376 | // and additionally saves space. So it's a trade off; we have |
michael@0 | 377 | // chosen to err on the side of caution and not implement Remove(). |
michael@0 | 378 | |
michael@0 | 379 | /** |
michael@0 | 380 | * CheckAcquisition This method is called after acquiring |aLast|, |
michael@0 | 381 | * but before trying to acquire |aProposed| from |aCallContext|. |
michael@0 | 382 | * It determines whether actually trying to acquire |aProposed| |
michael@0 | 383 | * will create problems. It is OK if |aLast| is nullptr; this is |
michael@0 | 384 | * interpreted as |aProposed| being the thread's first acquisition |
michael@0 | 385 | * of its current chain. |
michael@0 | 386 | * |
michael@0 | 387 | * Iff acquiring |aProposed| may lead to deadlock for some thread |
michael@0 | 388 | * interleaving (including the current one!), the cyclical |
michael@0 | 389 | * dependency from which this was deduced is returned. Otherwise, |
michael@0 | 390 | * 0 is returned. |
michael@0 | 391 | * |
michael@0 | 392 | * If a potential deadlock is detected and a resource cycle is |
michael@0 | 393 | * returned, it is the *caller's* responsibility to free it. |
michael@0 | 394 | * |
michael@0 | 395 | * Thread safe. |
michael@0 | 396 | * |
michael@0 | 397 | * @param aLast Last resource acquired by calling thread (or 0). |
michael@0 | 398 | * @param aProposed Resource calling thread proposes to acquire. |
michael@0 | 399 | * @param aCallContext Calling context whence acquisiton request came. |
michael@0 | 400 | */ |
michael@0 | 401 | ResourceAcquisitionArray* CheckAcquisition(const T* aLast, |
michael@0 | 402 | const T* aProposed, |
michael@0 | 403 | const CallStack& aCallContext) |
michael@0 | 404 | { |
michael@0 | 405 | NS_ASSERTION(aProposed, "null resource"); |
michael@0 | 406 | PRAutoLock _(mLock); |
michael@0 | 407 | |
michael@0 | 408 | PLHashEntry* second = *GetEntry(aProposed); |
michael@0 | 409 | OrderingEntry* e = static_cast<OrderingEntry*>(second->value); |
michael@0 | 410 | if (CallStack::kNone == e->mFirstSeen) |
michael@0 | 411 | e->mFirstSeen = aCallContext; |
michael@0 | 412 | |
michael@0 | 413 | if (!aLast) |
michael@0 | 414 | // don't check if |0 < proposed|; just vamoose |
michael@0 | 415 | return 0; |
michael@0 | 416 | |
michael@0 | 417 | PLHashEntry* first = *GetEntry(aLast); |
michael@0 | 418 | |
michael@0 | 419 | // this is the crux of the deadlock detector algorithm |
michael@0 | 420 | |
michael@0 | 421 | if (first == second) { |
michael@0 | 422 | // reflexive deadlock. fastpath b/c InTransitiveClosure is |
michael@0 | 423 | // not applicable here. |
michael@0 | 424 | ResourceAcquisitionArray* cycle = new ResourceAcquisitionArray(); |
michael@0 | 425 | if (!cycle) |
michael@0 | 426 | NS_RUNTIMEABORT("can't allocate dep. cycle array"); |
michael@0 | 427 | cycle->AppendElement(MakeResourceAcquisition(first)); |
michael@0 | 428 | cycle->AppendElement(ResourceAcquisition(aProposed, |
michael@0 | 429 | aCallContext)); |
michael@0 | 430 | return cycle; |
michael@0 | 431 | } |
michael@0 | 432 | if (InTransitiveClosure(first, second)) { |
michael@0 | 433 | // we've already established |last < proposed|. all is well. |
michael@0 | 434 | return 0; |
michael@0 | 435 | } |
michael@0 | 436 | if (InTransitiveClosure(second, first)) { |
michael@0 | 437 | // the order |proposed < last| has been deduced, perhaps |
michael@0 | 438 | // transitively. we're attempting to violate that |
michael@0 | 439 | // constraint by acquiring resources in the order |
michael@0 | 440 | // |last < proposed|, and thus we may deadlock under the |
michael@0 | 441 | // right conditions. |
michael@0 | 442 | ResourceAcquisitionArray* cycle = GetDeductionChain(second, first); |
michael@0 | 443 | // show how acquiring |proposed| would complete the cycle |
michael@0 | 444 | cycle->AppendElement(ResourceAcquisition(aProposed, |
michael@0 | 445 | aCallContext)); |
michael@0 | 446 | return cycle; |
michael@0 | 447 | } |
michael@0 | 448 | // |last|, |proposed| are unordered according to our |
michael@0 | 449 | // poset. this is fine, but we now need to add this |
michael@0 | 450 | // ordering constraint. |
michael@0 | 451 | AddOrder(first, second); |
michael@0 | 452 | return 0; |
michael@0 | 453 | } |
michael@0 | 454 | |
michael@0 | 455 | /** |
michael@0 | 456 | * Return true iff |aTarget| is in the transitive closure of |aStart| |
michael@0 | 457 | * over the ordering relation `<_this'. |
michael@0 | 458 | * |
michael@0 | 459 | * @precondition |aStart != aTarget| |
michael@0 | 460 | */ |
michael@0 | 461 | bool InTransitiveClosure(const PLHashEntry* aStart, |
michael@0 | 462 | const PLHashEntry* aTarget) const |
michael@0 | 463 | { |
michael@0 | 464 | if (IsOrdered(aStart, aTarget)) |
michael@0 | 465 | return true; |
michael@0 | 466 | |
michael@0 | 467 | index_type i = 0; |
michael@0 | 468 | size_type len = NumOrders(aStart); |
michael@0 | 469 | for (const PLHashEntry* const* it = GetOrders(aStart); |
michael@0 | 470 | i < len; ++i, ++it) |
michael@0 | 471 | if (InTransitiveClosure(*it, aTarget)) |
michael@0 | 472 | return true; |
michael@0 | 473 | return false; |
michael@0 | 474 | } |
michael@0 | 475 | |
michael@0 | 476 | /** |
michael@0 | 477 | * Return an array of all resource acquisitions |
michael@0 | 478 | * aStart <_this r1 <_this r2 <_ ... <_ aTarget |
michael@0 | 479 | * from which |aStart <_this aTarget| was deduced, including |
michael@0 | 480 | * |aStart| and |aTarget|. |
michael@0 | 481 | * |
michael@0 | 482 | * Nb: there may be multiple deductions of |aStart <_this |
michael@0 | 483 | * aTarget|. This function returns the first ordering found by |
michael@0 | 484 | * depth-first search. |
michael@0 | 485 | * |
michael@0 | 486 | * Nb: |InTransitiveClosure| could be replaced by this function. |
michael@0 | 487 | * However, this one is more expensive because we record the DFS |
michael@0 | 488 | * search stack on the heap whereas the other doesn't. |
michael@0 | 489 | * |
michael@0 | 490 | * @precondition |aStart != aTarget| |
michael@0 | 491 | */ |
michael@0 | 492 | ResourceAcquisitionArray* GetDeductionChain( |
michael@0 | 493 | const PLHashEntry* aStart, |
michael@0 | 494 | const PLHashEntry* aTarget) |
michael@0 | 495 | { |
michael@0 | 496 | ResourceAcquisitionArray* chain = new ResourceAcquisitionArray(); |
michael@0 | 497 | if (!chain) |
michael@0 | 498 | NS_RUNTIMEABORT("can't allocate dep. cycle array"); |
michael@0 | 499 | chain->AppendElement(MakeResourceAcquisition(aStart)); |
michael@0 | 500 | |
michael@0 | 501 | NS_ASSERTION(GetDeductionChain_Helper(aStart, aTarget, chain), |
michael@0 | 502 | "GetDeductionChain called when there's no deadlock"); |
michael@0 | 503 | return chain; |
michael@0 | 504 | } |
michael@0 | 505 | |
michael@0 | 506 | // precondition: |aStart != aTarget| |
michael@0 | 507 | // invariant: |aStart| is the last element in |aChain| |
michael@0 | 508 | bool GetDeductionChain_Helper(const PLHashEntry* aStart, |
michael@0 | 509 | const PLHashEntry* aTarget, |
michael@0 | 510 | ResourceAcquisitionArray* aChain) |
michael@0 | 511 | { |
michael@0 | 512 | if (IsOrdered(aStart, aTarget)) { |
michael@0 | 513 | aChain->AppendElement(MakeResourceAcquisition(aTarget)); |
michael@0 | 514 | return true; |
michael@0 | 515 | } |
michael@0 | 516 | |
michael@0 | 517 | index_type i = 0; |
michael@0 | 518 | size_type len = NumOrders(aStart); |
michael@0 | 519 | for (const PLHashEntry* const* it = GetOrders(aStart); |
michael@0 | 520 | i < len; ++i, ++it) { |
michael@0 | 521 | aChain->AppendElement(MakeResourceAcquisition(*it)); |
michael@0 | 522 | if (GetDeductionChain_Helper(*it, aTarget, aChain)) |
michael@0 | 523 | return true; |
michael@0 | 524 | aChain->RemoveElementAt(aChain->Length() - 1); |
michael@0 | 525 | } |
michael@0 | 526 | return false; |
michael@0 | 527 | } |
michael@0 | 528 | |
michael@0 | 529 | /** |
michael@0 | 530 | * The partial order on resource acquisitions used by the deadlock |
michael@0 | 531 | * detector. |
michael@0 | 532 | */ |
michael@0 | 533 | PLHashTable* mOrdering; // T* -> PLHashEntry<OrderingEntry> |
michael@0 | 534 | |
michael@0 | 535 | /** |
michael@0 | 536 | * Protects contentious methods. |
michael@0 | 537 | * Nb: can't use mozilla::Mutex since we are used as its deadlock |
michael@0 | 538 | * detector. |
michael@0 | 539 | */ |
michael@0 | 540 | PRLock* mLock; |
michael@0 | 541 | |
michael@0 | 542 | private: |
michael@0 | 543 | DeadlockDetector(const DeadlockDetector& aDD) MOZ_DELETE; |
michael@0 | 544 | DeadlockDetector& operator=(const DeadlockDetector& aDD) MOZ_DELETE; |
michael@0 | 545 | }; |
michael@0 | 546 | |
michael@0 | 547 | |
michael@0 | 548 | template<typename T> |
michael@0 | 549 | const PLHashAllocOps DeadlockDetector<T>::kAllocOps = { |
michael@0 | 550 | DeadlockDetector<T>::TableAlloc, DeadlockDetector<T>::TableFree, |
michael@0 | 551 | DeadlockDetector<T>::EntryAlloc, DeadlockDetector<T>::EntryFree |
michael@0 | 552 | }; |
michael@0 | 553 | |
michael@0 | 554 | |
michael@0 | 555 | template<typename T> |
michael@0 | 556 | // FIXME bug 456272: tune based on average workload |
michael@0 | 557 | const uint32_t DeadlockDetector<T>::kDefaultNumBuckets = 64; |
michael@0 | 558 | |
michael@0 | 559 | |
michael@0 | 560 | } // namespace mozilla |
michael@0 | 561 | |
michael@0 | 562 | #endif // ifndef mozilla_DeadlockDetector_h |