Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
michael@0 | 1 | // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. |
michael@0 | 2 | // Use of this source code is governed by a BSD-style license that can be |
michael@0 | 3 | // found in the LICENSE file. |
michael@0 | 4 | |
michael@0 | 5 | #include "base/condition_variable.h" |
michael@0 | 6 | |
michael@0 | 7 | #include <stack> |
michael@0 | 8 | |
michael@0 | 9 | #include "base/lock.h" |
michael@0 | 10 | #include "base/logging.h" |
michael@0 | 11 | #include "base/time.h" |
michael@0 | 12 | |
michael@0 | 13 | using base::TimeDelta; |
michael@0 | 14 | |
michael@0 | 15 | ConditionVariable::ConditionVariable(Lock* user_lock) |
michael@0 | 16 | : user_lock_(*user_lock), |
michael@0 | 17 | run_state_(RUNNING), |
michael@0 | 18 | allocation_counter_(0), |
michael@0 | 19 | recycling_list_size_(0) { |
michael@0 | 20 | DCHECK(user_lock); |
michael@0 | 21 | } |
michael@0 | 22 | |
michael@0 | 23 | ConditionVariable::~ConditionVariable() { |
michael@0 | 24 | AutoLock auto_lock(internal_lock_); |
michael@0 | 25 | run_state_ = SHUTDOWN; // Prevent any more waiting. |
michael@0 | 26 | |
michael@0 | 27 | DCHECK_EQ(recycling_list_size_, allocation_counter_); |
michael@0 | 28 | if (recycling_list_size_ != allocation_counter_) { // Rare shutdown problem. |
michael@0 | 29 | // There are threads of execution still in this->TimedWait() and yet the |
michael@0 | 30 | // caller has instigated the destruction of this instance :-/. |
michael@0 | 31 | // A common reason for such "overly hasty" destruction is that the caller |
michael@0 | 32 | // was not willing to wait for all the threads to terminate. Such hasty |
michael@0 | 33 | // actions are a violation of our usage contract, but we'll give the |
michael@0 | 34 | // waiting thread(s) one last chance to exit gracefully (prior to our |
michael@0 | 35 | // destruction). |
michael@0 | 36 | // Note: waiting_list_ *might* be empty, but recycling is still pending. |
michael@0 | 37 | AutoUnlock auto_unlock(internal_lock_); |
michael@0 | 38 | Broadcast(); // Make sure all waiting threads have been signaled. |
michael@0 | 39 | Sleep(10); // Give threads a chance to grab internal_lock_. |
michael@0 | 40 | // All contained threads should be blocked on user_lock_ by now :-). |
michael@0 | 41 | } // Reacquire internal_lock_. |
michael@0 | 42 | |
michael@0 | 43 | DCHECK_EQ(recycling_list_size_, allocation_counter_); |
michael@0 | 44 | } |
michael@0 | 45 | |
michael@0 | 46 | void ConditionVariable::Wait() { |
michael@0 | 47 | // Default to "wait forever" timing, which means have to get a Signal() |
michael@0 | 48 | // or Broadcast() to come out of this wait state. |
michael@0 | 49 | TimedWait(TimeDelta::FromMilliseconds(INFINITE)); |
michael@0 | 50 | } |
michael@0 | 51 | |
michael@0 | 52 | void ConditionVariable::TimedWait(const TimeDelta& max_time) { |
michael@0 | 53 | Event* waiting_event; |
michael@0 | 54 | HANDLE handle; |
michael@0 | 55 | { |
michael@0 | 56 | AutoLock auto_lock(internal_lock_); |
michael@0 | 57 | if (RUNNING != run_state_) return; // Destruction in progress. |
michael@0 | 58 | waiting_event = GetEventForWaiting(); |
michael@0 | 59 | handle = waiting_event->handle(); |
michael@0 | 60 | DCHECK(handle); |
michael@0 | 61 | } // Release internal_lock. |
michael@0 | 62 | |
michael@0 | 63 | { |
michael@0 | 64 | AutoUnlock unlock(user_lock_); // Release caller's lock |
michael@0 | 65 | WaitForSingleObject(handle, static_cast<DWORD>(max_time.InMilliseconds())); |
michael@0 | 66 | // Minimize spurious signal creation window by recycling asap. |
michael@0 | 67 | AutoLock auto_lock(internal_lock_); |
michael@0 | 68 | RecycleEvent(waiting_event); |
michael@0 | 69 | // Release internal_lock_ |
michael@0 | 70 | } // Reacquire callers lock to depth at entry. |
michael@0 | 71 | } |
michael@0 | 72 | |
michael@0 | 73 | // Broadcast() is guaranteed to signal all threads that were waiting (i.e., had |
michael@0 | 74 | // a cv_event internally allocated for them) before Broadcast() was called. |
michael@0 | 75 | void ConditionVariable::Broadcast() { |
michael@0 | 76 | std::stack<HANDLE> handles; // See FAQ-question-10. |
michael@0 | 77 | { |
michael@0 | 78 | AutoLock auto_lock(internal_lock_); |
michael@0 | 79 | if (waiting_list_.IsEmpty()) |
michael@0 | 80 | return; |
michael@0 | 81 | while (!waiting_list_.IsEmpty()) |
michael@0 | 82 | // This is not a leak from waiting_list_. See FAQ-question 12. |
michael@0 | 83 | handles.push(waiting_list_.PopBack()->handle()); |
michael@0 | 84 | } // Release internal_lock_. |
michael@0 | 85 | while (!handles.empty()) { |
michael@0 | 86 | SetEvent(handles.top()); |
michael@0 | 87 | handles.pop(); |
michael@0 | 88 | } |
michael@0 | 89 | } |
michael@0 | 90 | |
michael@0 | 91 | // Signal() will select one of the waiting threads, and signal it (signal its |
michael@0 | 92 | // cv_event). For better performance we signal the thread that went to sleep |
michael@0 | 93 | // most recently (LIFO). If we want fairness, then we wake the thread that has |
michael@0 | 94 | // been sleeping the longest (FIFO). |
michael@0 | 95 | void ConditionVariable::Signal() { |
michael@0 | 96 | HANDLE handle; |
michael@0 | 97 | { |
michael@0 | 98 | AutoLock auto_lock(internal_lock_); |
michael@0 | 99 | if (waiting_list_.IsEmpty()) |
michael@0 | 100 | return; // No one to signal. |
michael@0 | 101 | // Only performance option should be used. |
michael@0 | 102 | // This is not a leak from waiting_list. See FAQ-question 12. |
michael@0 | 103 | handle = waiting_list_.PopBack()->handle(); // LIFO. |
michael@0 | 104 | } // Release internal_lock_. |
michael@0 | 105 | SetEvent(handle); |
michael@0 | 106 | } |
michael@0 | 107 | |
michael@0 | 108 | // GetEventForWaiting() provides a unique cv_event for any caller that needs to |
michael@0 | 109 | // wait. This means that (worst case) we may over time create as many cv_event |
michael@0 | 110 | // objects as there are threads simultaneously using this instance's Wait() |
michael@0 | 111 | // functionality. |
michael@0 | 112 | ConditionVariable::Event* ConditionVariable::GetEventForWaiting() { |
michael@0 | 113 | // We hold internal_lock, courtesy of Wait(). |
michael@0 | 114 | Event* cv_event; |
michael@0 | 115 | if (0 == recycling_list_size_) { |
michael@0 | 116 | DCHECK(recycling_list_.IsEmpty()); |
michael@0 | 117 | cv_event = new Event(); |
michael@0 | 118 | cv_event->InitListElement(); |
michael@0 | 119 | allocation_counter_++; |
michael@0 | 120 | // CHECK_NE is not defined in our codebase, so we have to use CHECK |
michael@0 | 121 | CHECK(cv_event->handle()); |
michael@0 | 122 | } else { |
michael@0 | 123 | cv_event = recycling_list_.PopFront(); |
michael@0 | 124 | recycling_list_size_--; |
michael@0 | 125 | } |
michael@0 | 126 | waiting_list_.PushBack(cv_event); |
michael@0 | 127 | return cv_event; |
michael@0 | 128 | } |
michael@0 | 129 | |
michael@0 | 130 | // RecycleEvent() takes a cv_event that was previously used for Wait()ing, and |
michael@0 | 131 | // recycles it for use in future Wait() calls for this or other threads. |
michael@0 | 132 | // Note that there is a tiny chance that the cv_event is still signaled when we |
michael@0 | 133 | // obtain it, and that can cause spurious signals (if/when we re-use the |
michael@0 | 134 | // cv_event), but such is quite rare (see FAQ-question-5). |
michael@0 | 135 | void ConditionVariable::RecycleEvent(Event* used_event) { |
michael@0 | 136 | // We hold internal_lock, courtesy of Wait(). |
michael@0 | 137 | // If the cv_event timed out, then it is necessary to remove it from |
michael@0 | 138 | // waiting_list_. If it was selected by Broadcast() or Signal(), then it is |
michael@0 | 139 | // already gone. |
michael@0 | 140 | used_event->Extract(); // Possibly redundant |
michael@0 | 141 | recycling_list_.PushBack(used_event); |
michael@0 | 142 | recycling_list_size_++; |
michael@0 | 143 | } |
michael@0 | 144 | //------------------------------------------------------------------------------ |
michael@0 | 145 | // The next section provides the implementation for the private Event class. |
michael@0 | 146 | //------------------------------------------------------------------------------ |
michael@0 | 147 | |
michael@0 | 148 | // Event provides a doubly-linked-list of events for use exclusively by the |
michael@0 | 149 | // ConditionVariable class. |
michael@0 | 150 | |
michael@0 | 151 | // This custom container was crafted because no simple combination of STL |
michael@0 | 152 | // classes appeared to support the functionality required. The specific |
michael@0 | 153 | // unusual requirement for a linked-list-class is support for the Extract() |
michael@0 | 154 | // method, which can remove an element from a list, potentially for insertion |
michael@0 | 155 | // into a second list. Most critically, the Extract() method is idempotent, |
michael@0 | 156 | // turning the indicated element into an extracted singleton whether it was |
michael@0 | 157 | // contained in a list or not. This functionality allows one (or more) of |
michael@0 | 158 | // threads to do the extraction. The iterator that identifies this extractable |
michael@0 | 159 | // element (in this case, a pointer to the list element) can be used after |
michael@0 | 160 | // arbitrary manipulation of the (possibly) enclosing list container. In |
michael@0 | 161 | // general, STL containers do not provide iterators that can be used across |
michael@0 | 162 | // modifications (insertions/extractions) of the enclosing containers, and |
michael@0 | 163 | // certainly don't provide iterators that can be used if the identified |
michael@0 | 164 | // element is *deleted* (removed) from the container. |
michael@0 | 165 | |
michael@0 | 166 | // It is possible to use multiple redundant containers, such as an STL list, |
michael@0 | 167 | // and an STL map, to achieve similar container semantics. This container has |
michael@0 | 168 | // only O(1) methods, while the corresponding (multiple) STL container approach |
michael@0 | 169 | // would have more complex O(log(N)) methods (yeah... N isn't that large). |
michael@0 | 170 | // Multiple containers also makes correctness more difficult to assert, as |
michael@0 | 171 | // data is redundantly stored and maintained, which is generally evil. |
michael@0 | 172 | |
michael@0 | 173 | ConditionVariable::Event::Event() : handle_(0) { |
michael@0 | 174 | next_ = prev_ = this; // Self referencing circular. |
michael@0 | 175 | } |
michael@0 | 176 | |
michael@0 | 177 | ConditionVariable::Event::~Event() { |
michael@0 | 178 | if (0 == handle_) { |
michael@0 | 179 | // This is the list holder |
michael@0 | 180 | while (!IsEmpty()) { |
michael@0 | 181 | Event* cv_event = PopFront(); |
michael@0 | 182 | DCHECK(cv_event->ValidateAsItem()); |
michael@0 | 183 | delete cv_event; |
michael@0 | 184 | } |
michael@0 | 185 | } |
michael@0 | 186 | DCHECK(IsSingleton()); |
michael@0 | 187 | if (0 != handle_) { |
michael@0 | 188 | int ret_val = CloseHandle(handle_); |
michael@0 | 189 | DCHECK(ret_val); |
michael@0 | 190 | } |
michael@0 | 191 | } |
michael@0 | 192 | |
michael@0 | 193 | // Change a container instance permanently into an element of a list. |
michael@0 | 194 | void ConditionVariable::Event::InitListElement() { |
michael@0 | 195 | DCHECK(!handle_); |
michael@0 | 196 | handle_ = CreateEvent(NULL, false, false, NULL); |
michael@0 | 197 | CHECK(handle_); |
michael@0 | 198 | } |
michael@0 | 199 | |
michael@0 | 200 | // Methods for use on lists. |
michael@0 | 201 | bool ConditionVariable::Event::IsEmpty() const { |
michael@0 | 202 | DCHECK(ValidateAsList()); |
michael@0 | 203 | return IsSingleton(); |
michael@0 | 204 | } |
michael@0 | 205 | |
michael@0 | 206 | void ConditionVariable::Event::PushBack(Event* other) { |
michael@0 | 207 | DCHECK(ValidateAsList()); |
michael@0 | 208 | DCHECK(other->ValidateAsItem()); |
michael@0 | 209 | DCHECK(other->IsSingleton()); |
michael@0 | 210 | // Prepare other for insertion. |
michael@0 | 211 | other->prev_ = prev_; |
michael@0 | 212 | other->next_ = this; |
michael@0 | 213 | // Cut into list. |
michael@0 | 214 | prev_->next_ = other; |
michael@0 | 215 | prev_ = other; |
michael@0 | 216 | DCHECK(ValidateAsDistinct(other)); |
michael@0 | 217 | } |
michael@0 | 218 | |
michael@0 | 219 | ConditionVariable::Event* ConditionVariable::Event::PopFront() { |
michael@0 | 220 | DCHECK(ValidateAsList()); |
michael@0 | 221 | DCHECK(!IsSingleton()); |
michael@0 | 222 | return next_->Extract(); |
michael@0 | 223 | } |
michael@0 | 224 | |
michael@0 | 225 | ConditionVariable::Event* ConditionVariable::Event::PopBack() { |
michael@0 | 226 | DCHECK(ValidateAsList()); |
michael@0 | 227 | DCHECK(!IsSingleton()); |
michael@0 | 228 | return prev_->Extract(); |
michael@0 | 229 | } |
michael@0 | 230 | |
michael@0 | 231 | // Methods for use on list elements. |
michael@0 | 232 | // Accessor method. |
michael@0 | 233 | HANDLE ConditionVariable::Event::handle() const { |
michael@0 | 234 | DCHECK(ValidateAsItem()); |
michael@0 | 235 | return handle_; |
michael@0 | 236 | } |
michael@0 | 237 | |
michael@0 | 238 | // Pull an element from a list (if it's in one). |
michael@0 | 239 | ConditionVariable::Event* ConditionVariable::Event::Extract() { |
michael@0 | 240 | DCHECK(ValidateAsItem()); |
michael@0 | 241 | if (!IsSingleton()) { |
michael@0 | 242 | // Stitch neighbors together. |
michael@0 | 243 | next_->prev_ = prev_; |
michael@0 | 244 | prev_->next_ = next_; |
michael@0 | 245 | // Make extractee into a singleton. |
michael@0 | 246 | prev_ = next_ = this; |
michael@0 | 247 | } |
michael@0 | 248 | DCHECK(IsSingleton()); |
michael@0 | 249 | return this; |
michael@0 | 250 | } |
michael@0 | 251 | |
michael@0 | 252 | // Method for use on a list element or on a list. |
michael@0 | 253 | bool ConditionVariable::Event::IsSingleton() const { |
michael@0 | 254 | DCHECK(ValidateLinks()); |
michael@0 | 255 | return next_ == this; |
michael@0 | 256 | } |
michael@0 | 257 | |
michael@0 | 258 | // Provide pre/post conditions to validate correct manipulations. |
michael@0 | 259 | bool ConditionVariable::Event::ValidateAsDistinct(Event* other) const { |
michael@0 | 260 | return ValidateLinks() && other->ValidateLinks() && (this != other); |
michael@0 | 261 | } |
michael@0 | 262 | |
michael@0 | 263 | bool ConditionVariable::Event::ValidateAsItem() const { |
michael@0 | 264 | return (0 != handle_) && ValidateLinks(); |
michael@0 | 265 | } |
michael@0 | 266 | |
michael@0 | 267 | bool ConditionVariable::Event::ValidateAsList() const { |
michael@0 | 268 | return (0 == handle_) && ValidateLinks(); |
michael@0 | 269 | } |
michael@0 | 270 | |
michael@0 | 271 | bool ConditionVariable::Event::ValidateLinks() const { |
michael@0 | 272 | // Make sure both of our neighbors have links that point back to us. |
michael@0 | 273 | // We don't do the O(n) check and traverse the whole loop, and instead only |
michael@0 | 274 | // do a local check to (and returning from) our immediate neighbors. |
michael@0 | 275 | return (next_->prev_ == this) && (prev_->next_ == this); |
michael@0 | 276 | } |
michael@0 | 277 | |
michael@0 | 278 | |
michael@0 | 279 | /* |
michael@0 | 280 | FAQ On subtle implementation details: |
michael@0 | 281 | |
michael@0 | 282 | 1) What makes this problem subtle? Please take a look at "Strategies |
michael@0 | 283 | for Implementing POSIX Condition Variables on Win32" by Douglas |
michael@0 | 284 | C. Schmidt and Irfan Pyarali. |
michael@0 | 285 | http://www.cs.wustl.edu/~schmidt/win32-cv-1.html It includes |
michael@0 | 286 | discussions of numerous flawed strategies for implementing this |
michael@0 | 287 | functionality. I'm not convinced that even the final proposed |
michael@0 | 288 | implementation has semantics that are as nice as this implementation |
michael@0 | 289 | (especially with regard to Broadcast() and the impact on threads that |
michael@0 | 290 | try to Wait() after a Broadcast() has been called, but before all the |
michael@0 | 291 | original waiting threads have been signaled). |
michael@0 | 292 | |
michael@0 | 293 | 2) Why can't you use a single wait_event for all threads that call |
michael@0 | 294 | Wait()? See FAQ-question-1, or consider the following: If a single |
michael@0 | 295 | event were used, then numerous threads calling Wait() could release |
michael@0 | 296 | their cs locks, and be preempted just before calling |
michael@0 | 297 | WaitForSingleObject(). If a call to Broadcast() was then presented on |
michael@0 | 298 | a second thread, it would be impossible to actually signal all |
michael@0 | 299 | waiting(?) threads. Some number of SetEvent() calls *could* be made, |
michael@0 | 300 | but there could be no guarantee that those led to to more than one |
michael@0 | 301 | signaled thread (SetEvent()'s may be discarded after the first!), and |
michael@0 | 302 | there could be no guarantee that the SetEvent() calls didn't just |
michael@0 | 303 | awaken "other" threads that hadn't even started waiting yet (oops). |
michael@0 | 304 | Without any limit on the number of requisite SetEvent() calls, the |
michael@0 | 305 | system would be forced to do many such calls, allowing many new waits |
michael@0 | 306 | to receive spurious signals. |
michael@0 | 307 | |
michael@0 | 308 | 3) How does this implementation cause spurious signal events? The |
michael@0 | 309 | cause in this implementation involves a race between a signal via |
michael@0 | 310 | time-out and a signal via Signal() or Broadcast(). The series of |
michael@0 | 311 | actions leading to this are: |
michael@0 | 312 | |
michael@0 | 313 | a) Timer fires, and a waiting thread exits the line of code: |
michael@0 | 314 | |
michael@0 | 315 | WaitForSingleObject(waiting_event, max_time.InMilliseconds()); |
michael@0 | 316 | |
michael@0 | 317 | b) That thread (in (a)) is randomly pre-empted after the above line, |
michael@0 | 318 | leaving the waiting_event reset (unsignaled) and still in the |
michael@0 | 319 | waiting_list_. |
michael@0 | 320 | |
michael@0 | 321 | c) A call to Signal() (or Broadcast()) on a second thread proceeds, and |
michael@0 | 322 | selects the waiting cv_event (identified in step (b)) as the event to revive |
michael@0 | 323 | via a call to SetEvent(). |
michael@0 | 324 | |
michael@0 | 325 | d) The Signal() method (step c) calls SetEvent() on waiting_event (step b). |
michael@0 | 326 | |
michael@0 | 327 | e) The waiting cv_event (step b) is now signaled, but no thread is |
michael@0 | 328 | waiting on it. |
michael@0 | 329 | |
michael@0 | 330 | f) When that waiting_event (step b) is reused, it will immediately |
michael@0 | 331 | be signaled (spuriously). |
michael@0 | 332 | |
michael@0 | 333 | |
michael@0 | 334 | 4) Why do you recycle events, and cause spurious signals? First off, |
michael@0 | 335 | the spurious events are very rare. They can only (I think) appear |
michael@0 | 336 | when the race described in FAQ-question-3 takes place. This should be |
michael@0 | 337 | very rare. Most(?) uses will involve only timer expiration, or only |
michael@0 | 338 | Signal/Broadcast() actions. When both are used, it will be rare that |
michael@0 | 339 | the race will appear, and it would require MANY Wait() and signaling |
michael@0 | 340 | activities. If this implementation did not recycle events, then it |
michael@0 | 341 | would have to create and destroy events for every call to Wait(). |
michael@0 | 342 | That allocation/deallocation and associated construction/destruction |
michael@0 | 343 | would be costly (per wait), and would only be a rare benefit (when the |
michael@0 | 344 | race was "lost" and a spurious signal took place). That would be bad |
michael@0 | 345 | (IMO) optimization trade-off. Finally, such spurious events are |
michael@0 | 346 | allowed by the specification of condition variables (such as |
michael@0 | 347 | implemented in Vista), and hence it is better if any user accommodates |
michael@0 | 348 | such spurious events (see usage note in condition_variable.h). |
michael@0 | 349 | |
michael@0 | 350 | 5) Why don't you reset events when you are about to recycle them, or |
michael@0 | 351 | about to reuse them, so that the spurious signals don't take place? |
michael@0 | 352 | The thread described in FAQ-question-3 step c may be pre-empted for an |
michael@0 | 353 | arbitrary length of time before proceeding to step d. As a result, |
michael@0 | 354 | the wait_event may actually be re-used *before* step (e) is reached. |
michael@0 | 355 | As a result, calling reset would not help significantly. |
michael@0 | 356 | |
michael@0 | 357 | 6) How is it that the callers lock is released atomically with the |
michael@0 | 358 | entry into a wait state? We commit to the wait activity when we |
michael@0 | 359 | allocate the wait_event for use in a given call to Wait(). This |
michael@0 | 360 | allocation takes place before the caller's lock is released (and |
michael@0 | 361 | actually before our internal_lock_ is released). That allocation is |
michael@0 | 362 | the defining moment when "the wait state has been entered," as that |
michael@0 | 363 | thread *can* now be signaled by a call to Broadcast() or Signal(). |
michael@0 | 364 | Hence we actually "commit to wait" before releasing the lock, making |
michael@0 | 365 | the pair effectively atomic. |
michael@0 | 366 | |
michael@0 | 367 | 8) Why do you need to lock your data structures during waiting, as the |
michael@0 | 368 | caller is already in possession of a lock? We need to Acquire() and |
michael@0 | 369 | Release() our internal lock during Signal() and Broadcast(). If we tried |
michael@0 | 370 | to use a callers lock for this purpose, we might conflict with their |
michael@0 | 371 | external use of the lock. For example, the caller may use to consistently |
michael@0 | 372 | hold a lock on one thread while calling Signal() on another, and that would |
michael@0 | 373 | block Signal(). |
michael@0 | 374 | |
michael@0 | 375 | 9) Couldn't a more efficient implementation be provided if you |
michael@0 | 376 | preclude using more than one external lock in conjunction with a |
michael@0 | 377 | single ConditionVariable instance? Yes, at least it could be viewed |
michael@0 | 378 | as a simpler API (since you don't have to reiterate the lock argument |
michael@0 | 379 | in each Wait() call). One of the constructors now takes a specific |
michael@0 | 380 | lock as an argument, and a there are corresponding Wait() calls that |
michael@0 | 381 | don't specify a lock now. It turns that the resulting implmentation |
michael@0 | 382 | can't be made more efficient, as the internal lock needs to be used by |
michael@0 | 383 | Signal() and Broadcast(), to access internal data structures. As a |
michael@0 | 384 | result, I was not able to utilize the user supplied lock (which is |
michael@0 | 385 | being used by the user elsewhere presumably) to protect the private |
michael@0 | 386 | member access. |
michael@0 | 387 | |
michael@0 | 388 | 9) Since you have a second lock, how can be be sure that there is no |
michael@0 | 389 | possible deadlock scenario? Our internal_lock_ is always the last |
michael@0 | 390 | lock acquired, and the first one released, and hence a deadlock (due |
michael@0 | 391 | to critical section problems) is impossible as a consequence of our |
michael@0 | 392 | lock. |
michael@0 | 393 | |
michael@0 | 394 | 10) When doing a Broadcast(), why did you copy all the events into |
michael@0 | 395 | an STL queue, rather than making a linked-loop, and iterating over it? |
michael@0 | 396 | The iterating during Broadcast() is done so outside the protection |
michael@0 | 397 | of the internal lock. As a result, other threads, such as the thread |
michael@0 | 398 | wherein a related event is waiting, could asynchronously manipulate |
michael@0 | 399 | the links around a cv_event. As a result, the link structure cannot |
michael@0 | 400 | be used outside a lock. Broadcast() could iterate over waiting |
michael@0 | 401 | events by cycling in-and-out of the protection of the internal_lock, |
michael@0 | 402 | but that appears more expensive than copying the list into an STL |
michael@0 | 403 | stack. |
michael@0 | 404 | |
michael@0 | 405 | 11) Why did the lock.h file need to be modified so much for this |
michael@0 | 406 | change? Central to a Condition Variable is the atomic release of a |
michael@0 | 407 | lock during a Wait(). This places Wait() functionality exactly |
michael@0 | 408 | mid-way between the two classes, Lock and Condition Variable. Given |
michael@0 | 409 | that there can be nested Acquire()'s of locks, and Wait() had to |
michael@0 | 410 | Release() completely a held lock, it was necessary to augment the Lock |
michael@0 | 411 | class with a recursion counter. Even more subtle is the fact that the |
michael@0 | 412 | recursion counter (in a Lock) must be protected, as many threads can |
michael@0 | 413 | access it asynchronously. As a positive fallout of this, there are |
michael@0 | 414 | now some DCHECKS to be sure no one Release()s a Lock more than they |
michael@0 | 415 | Acquire()ed it, and there is ifdef'ed functionality that can detect |
michael@0 | 416 | nested locks (legal under windows, but not under Posix). |
michael@0 | 417 | |
michael@0 | 418 | 12) Why is it that the cv_events removed from list in Broadcast() and Signal() |
michael@0 | 419 | are not leaked? How are they recovered?? The cv_events that appear to leak are |
michael@0 | 420 | taken from the waiting_list_. For each element in that list, there is currently |
michael@0 | 421 | a thread in or around the WaitForSingleObject() call of Wait(), and those |
michael@0 | 422 | threads have references to these otherwise leaked events. They are passed as |
michael@0 | 423 | arguments to be recycled just aftre returning from WaitForSingleObject(). |
michael@0 | 424 | |
michael@0 | 425 | 13) Why did you use a custom container class (the linked list), when STL has |
michael@0 | 426 | perfectly good containers, such as an STL list? The STL list, as with any |
michael@0 | 427 | container, does not guarantee the utility of an iterator across manipulation |
michael@0 | 428 | (such as insertions and deletions) of the underlying container. The custom |
michael@0 | 429 | double-linked-list container provided that assurance. I don't believe any |
michael@0 | 430 | combination of STL containers provided the services that were needed at the same |
michael@0 | 431 | O(1) efficiency as the custom linked list. The unusual requirement |
michael@0 | 432 | for the container class is that a reference to an item within a container (an |
michael@0 | 433 | iterator) needed to be maintained across an arbitrary manipulation of the |
michael@0 | 434 | container. This requirement exposes itself in the Wait() method, where a |
michael@0 | 435 | waiting_event must be selected prior to the WaitForSingleObject(), and then it |
michael@0 | 436 | must be used as part of recycling to remove the related instance from the |
michael@0 | 437 | waiting_list. A hash table (STL map) could be used, but I was embarrased to |
michael@0 | 438 | use a complex and relatively low efficiency container when a doubly linked list |
michael@0 | 439 | provided O(1) performance in all required operations. Since other operations |
michael@0 | 440 | to provide performance-and/or-fairness required queue (FIFO) and list (LIFO) |
michael@0 | 441 | containers, I would also have needed to use an STL list/queue as well as an STL |
michael@0 | 442 | map. In the end I decided it would be "fun" to just do it right, and I |
michael@0 | 443 | put so many assertions (DCHECKs) into the container class that it is trivial to |
michael@0 | 444 | code review and validate its correctness. |
michael@0 | 445 | |
michael@0 | 446 | */ |