ipc/chromium/src/base/waitable_event_posix.cc

Wed, 31 Dec 2014 06:09:35 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:09:35 +0100
changeset 0
6474c204b198
permissions
-rw-r--r--

Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.

michael@0 1 // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
michael@0 2 // Use of this source code is governed by a BSD-style license that can be
michael@0 3 // found in the LICENSE file.
michael@0 4
michael@0 5 #include "base/waitable_event.h"
michael@0 6
michael@0 7 #include "base/condition_variable.h"
michael@0 8 #include "base/lock.h"
michael@0 9 #include "base/message_loop.h"
michael@0 10
michael@0 11 // -----------------------------------------------------------------------------
michael@0 12 // A WaitableEvent on POSIX is implemented as a wait-list. Currently we don't
michael@0 13 // support cross-process events (where one process can signal an event which
michael@0 14 // others are waiting on). Because of this, we can avoid having one thread per
michael@0 15 // listener in several cases.
michael@0 16 //
michael@0 17 // The WaitableEvent maintains a list of waiters, protected by a lock. Each
michael@0 18 // waiter is either an async wait, in which case we have a Task and the
michael@0 19 // MessageLoop to run it on, or a blocking wait, in which case we have the
michael@0 20 // condition variable to signal.
michael@0 21 //
michael@0 22 // Waiting involves grabbing the lock and adding oneself to the wait list. Async
michael@0 23 // waits can be canceled, which means grabbing the lock and removing oneself
michael@0 24 // from the list.
michael@0 25 //
michael@0 26 // Waiting on multiple events is handled by adding a single, synchronous wait to
michael@0 27 // the wait-list of many events. An event passes a pointer to itself when
michael@0 28 // firing a waiter and so we can store that pointer to find out which event
michael@0 29 // triggered.
michael@0 30 // -----------------------------------------------------------------------------
michael@0 31
michael@0 32 namespace base {
michael@0 33
michael@0 34 // -----------------------------------------------------------------------------
michael@0 35 // This is just an abstract base class for waking the two types of waiters
michael@0 36 // -----------------------------------------------------------------------------
michael@0 37 WaitableEvent::WaitableEvent(bool manual_reset, bool initially_signaled)
michael@0 38 : kernel_(new WaitableEventKernel(manual_reset, initially_signaled)) {
michael@0 39 }
michael@0 40
michael@0 41 WaitableEvent::~WaitableEvent() {
michael@0 42 }
michael@0 43
michael@0 44 void WaitableEvent::Reset() {
michael@0 45 AutoLock locked(kernel_->lock_);
michael@0 46 kernel_->signaled_ = false;
michael@0 47 }
michael@0 48
michael@0 49 void WaitableEvent::Signal() {
michael@0 50 AutoLock locked(kernel_->lock_);
michael@0 51
michael@0 52 if (kernel_->signaled_)
michael@0 53 return;
michael@0 54
michael@0 55 if (kernel_->manual_reset_) {
michael@0 56 SignalAll();
michael@0 57 kernel_->signaled_ = true;
michael@0 58 } else {
michael@0 59 // In the case of auto reset, if no waiters were woken, we remain
michael@0 60 // signaled.
michael@0 61 if (!SignalOne())
michael@0 62 kernel_->signaled_ = true;
michael@0 63 }
michael@0 64 }
michael@0 65
michael@0 66 bool WaitableEvent::IsSignaled() {
michael@0 67 AutoLock locked(kernel_->lock_);
michael@0 68
michael@0 69 const bool result = kernel_->signaled_;
michael@0 70 if (result && !kernel_->manual_reset_)
michael@0 71 kernel_->signaled_ = false;
michael@0 72 return result;
michael@0 73 }
michael@0 74
michael@0 75 // -----------------------------------------------------------------------------
michael@0 76 // Synchronous waits
michael@0 77
michael@0 78 // -----------------------------------------------------------------------------
michael@0 79 // This is an synchronous waiter. The thread is waiting on the given condition
michael@0 80 // variable and the fired flag in this object.
michael@0 81 // -----------------------------------------------------------------------------
michael@0 82 class SyncWaiter : public WaitableEvent::Waiter {
michael@0 83 public:
michael@0 84 SyncWaiter(ConditionVariable* cv, Lock* lock)
michael@0 85 : fired_(false),
michael@0 86 cv_(cv),
michael@0 87 lock_(lock),
michael@0 88 signaling_event_(NULL) {
michael@0 89 }
michael@0 90
michael@0 91 bool Fire(WaitableEvent *signaling_event) {
michael@0 92 lock_->Acquire();
michael@0 93 const bool previous_value = fired_;
michael@0 94 fired_ = true;
michael@0 95 if (!previous_value)
michael@0 96 signaling_event_ = signaling_event;
michael@0 97 lock_->Release();
michael@0 98
michael@0 99 if (previous_value)
michael@0 100 return false;
michael@0 101
michael@0 102 cv_->Broadcast();
michael@0 103
michael@0 104 // SyncWaiters are stack allocated on the stack of the blocking thread.
michael@0 105 return true;
michael@0 106 }
michael@0 107
michael@0 108 WaitableEvent* signaled_event() const {
michael@0 109 return signaling_event_;
michael@0 110 }
michael@0 111
michael@0 112 // ---------------------------------------------------------------------------
michael@0 113 // These waiters are always stack allocated and don't delete themselves. Thus
michael@0 114 // there's no problem and the ABA tag is the same as the object pointer.
michael@0 115 // ---------------------------------------------------------------------------
michael@0 116 bool Compare(void* tag) {
michael@0 117 return this == tag;
michael@0 118 }
michael@0 119
michael@0 120 // ---------------------------------------------------------------------------
michael@0 121 // Called with lock held.
michael@0 122 // ---------------------------------------------------------------------------
michael@0 123 bool fired() const {
michael@0 124 return fired_;
michael@0 125 }
michael@0 126
michael@0 127 // ---------------------------------------------------------------------------
michael@0 128 // During a TimedWait, we need a way to make sure that an auto-reset
michael@0 129 // WaitableEvent doesn't think that this event has been signaled between
michael@0 130 // unlocking it and removing it from the wait-list. Called with lock held.
michael@0 131 // ---------------------------------------------------------------------------
michael@0 132 void Disable() {
michael@0 133 fired_ = true;
michael@0 134 }
michael@0 135
michael@0 136 private:
michael@0 137 bool fired_;
michael@0 138 ConditionVariable *const cv_;
michael@0 139 Lock *const lock_;
michael@0 140 WaitableEvent* signaling_event_; // The WaitableEvent which woke us
michael@0 141 };
michael@0 142
michael@0 143 bool WaitableEvent::TimedWait(const TimeDelta& max_time) {
michael@0 144 const TimeTicks end_time(TimeTicks::Now() + max_time);
michael@0 145 const bool finite_time = max_time.ToInternalValue() >= 0;
michael@0 146
michael@0 147 kernel_->lock_.Acquire();
michael@0 148 if (kernel_->signaled_) {
michael@0 149 if (!kernel_->manual_reset_) {
michael@0 150 // In this case we were signaled when we had no waiters. Now that
michael@0 151 // someone has waited upon us, we can automatically reset.
michael@0 152 kernel_->signaled_ = false;
michael@0 153 }
michael@0 154
michael@0 155 kernel_->lock_.Release();
michael@0 156 return true;
michael@0 157 }
michael@0 158
michael@0 159 Lock lock;
michael@0 160 lock.Acquire();
michael@0 161 ConditionVariable cv(&lock);
michael@0 162 SyncWaiter sw(&cv, &lock);
michael@0 163
michael@0 164 Enqueue(&sw);
michael@0 165 kernel_->lock_.Release();
michael@0 166 // We are violating locking order here by holding the SyncWaiter lock but not
michael@0 167 // the WaitableEvent lock. However, this is safe because we don't lock @lock_
michael@0 168 // again before unlocking it.
michael@0 169
michael@0 170 for (;;) {
michael@0 171 const TimeTicks current_time(TimeTicks::Now());
michael@0 172
michael@0 173 if (sw.fired() || (finite_time && current_time >= end_time)) {
michael@0 174 const bool return_value = sw.fired();
michael@0 175
michael@0 176 // We can't acquire @lock_ before releasing @lock (because of locking
michael@0 177 // order), however, inbetween the two a signal could be fired and @sw
michael@0 178 // would accept it, however we will still return false, so the signal
michael@0 179 // would be lost on an auto-reset WaitableEvent. Thus we call Disable
michael@0 180 // which makes sw::Fire return false.
michael@0 181 sw.Disable();
michael@0 182 lock.Release();
michael@0 183
michael@0 184 kernel_->lock_.Acquire();
michael@0 185 kernel_->Dequeue(&sw, &sw);
michael@0 186 kernel_->lock_.Release();
michael@0 187
michael@0 188 return return_value;
michael@0 189 }
michael@0 190
michael@0 191 if (finite_time) {
michael@0 192 const TimeDelta max_wait(end_time - current_time);
michael@0 193 cv.TimedWait(max_wait);
michael@0 194 } else {
michael@0 195 cv.Wait();
michael@0 196 }
michael@0 197 }
michael@0 198 }
michael@0 199
michael@0 200 bool WaitableEvent::Wait() {
michael@0 201 return TimedWait(TimeDelta::FromSeconds(-1));
michael@0 202 }
michael@0 203
michael@0 204 // -----------------------------------------------------------------------------
michael@0 205
michael@0 206
michael@0 207 // -----------------------------------------------------------------------------
michael@0 208 // Synchronous waiting on multiple objects.
michael@0 209
michael@0 210 static bool // StrictWeakOrdering
michael@0 211 cmp_fst_addr(const std::pair<WaitableEvent*, unsigned> &a,
michael@0 212 const std::pair<WaitableEvent*, unsigned> &b) {
michael@0 213 return a.first < b.first;
michael@0 214 }
michael@0 215
michael@0 216 // static
michael@0 217 size_t WaitableEvent::WaitMany(WaitableEvent** raw_waitables,
michael@0 218 size_t count) {
michael@0 219 DCHECK(count) << "Cannot wait on no events";
michael@0 220
michael@0 221 // We need to acquire the locks in a globally consistent order. Thus we sort
michael@0 222 // the array of waitables by address. We actually sort a pairs so that we can
michael@0 223 // map back to the original index values later.
michael@0 224 std::vector<std::pair<WaitableEvent*, size_t> > waitables;
michael@0 225 waitables.reserve(count);
michael@0 226 for (size_t i = 0; i < count; ++i)
michael@0 227 waitables.push_back(std::make_pair(raw_waitables[i], i));
michael@0 228
michael@0 229 DCHECK_EQ(count, waitables.size());
michael@0 230
michael@0 231 sort(waitables.begin(), waitables.end(), cmp_fst_addr);
michael@0 232
michael@0 233 // The set of waitables must be distinct. Since we have just sorted by
michael@0 234 // address, we can check this cheaply by comparing pairs of consecutive
michael@0 235 // elements.
michael@0 236 for (size_t i = 0; i < waitables.size() - 1; ++i) {
michael@0 237 DCHECK(waitables[i].first != waitables[i+1].first);
michael@0 238 }
michael@0 239
michael@0 240 Lock lock;
michael@0 241 ConditionVariable cv(&lock);
michael@0 242 SyncWaiter sw(&cv, &lock);
michael@0 243
michael@0 244 const size_t r = EnqueueMany(&waitables[0], count, &sw);
michael@0 245 if (r) {
michael@0 246 // One of the events is already signaled. The SyncWaiter has not been
michael@0 247 // enqueued anywhere. EnqueueMany returns the count of remaining waitables
michael@0 248 // when the signaled one was seen, so the index of the signaled event is
michael@0 249 // @count - @r.
michael@0 250 return waitables[count - r].second;
michael@0 251 }
michael@0 252
michael@0 253 // At this point, we hold the locks on all the WaitableEvents and we have
michael@0 254 // enqueued our waiter in them all.
michael@0 255 lock.Acquire();
michael@0 256 // Release the WaitableEvent locks in the reverse order
michael@0 257 for (size_t i = 0; i < count; ++i) {
michael@0 258 waitables[count - (1 + i)].first->kernel_->lock_.Release();
michael@0 259 }
michael@0 260
michael@0 261 for (;;) {
michael@0 262 if (sw.fired())
michael@0 263 break;
michael@0 264
michael@0 265 cv.Wait();
michael@0 266 }
michael@0 267 lock.Release();
michael@0 268
michael@0 269 // The address of the WaitableEvent which fired is stored in the SyncWaiter.
michael@0 270 WaitableEvent *const signaled_event = sw.signaled_event();
michael@0 271 // This will store the index of the raw_waitables which fired.
michael@0 272 size_t signaled_index = 0;
michael@0 273
michael@0 274 // Take the locks of each WaitableEvent in turn (except the signaled one) and
michael@0 275 // remove our SyncWaiter from the wait-list
michael@0 276 for (size_t i = 0; i < count; ++i) {
michael@0 277 if (raw_waitables[i] != signaled_event) {
michael@0 278 raw_waitables[i]->kernel_->lock_.Acquire();
michael@0 279 // There's no possible ABA issue with the address of the SyncWaiter here
michael@0 280 // because it lives on the stack. Thus the tag value is just the pointer
michael@0 281 // value again.
michael@0 282 raw_waitables[i]->kernel_->Dequeue(&sw, &sw);
michael@0 283 raw_waitables[i]->kernel_->lock_.Release();
michael@0 284 } else {
michael@0 285 signaled_index = i;
michael@0 286 }
michael@0 287 }
michael@0 288
michael@0 289 return signaled_index;
michael@0 290 }
michael@0 291
michael@0 292 // -----------------------------------------------------------------------------
michael@0 293 // If return value == 0:
michael@0 294 // The locks of the WaitableEvents have been taken in order and the Waiter has
michael@0 295 // been enqueued in the wait-list of each. None of the WaitableEvents are
michael@0 296 // currently signaled
michael@0 297 // else:
michael@0 298 // None of the WaitableEvent locks are held. The Waiter has not been enqueued
michael@0 299 // in any of them and the return value is the index of the first WaitableEvent
michael@0 300 // which was signaled, from the end of the array.
michael@0 301 // -----------------------------------------------------------------------------
michael@0 302 // static
michael@0 303 size_t WaitableEvent::EnqueueMany
michael@0 304 (std::pair<WaitableEvent*, size_t>* waitables,
michael@0 305 size_t count, Waiter* waiter) {
michael@0 306 if (!count)
michael@0 307 return 0;
michael@0 308
michael@0 309 waitables[0].first->kernel_->lock_.Acquire();
michael@0 310 if (waitables[0].first->kernel_->signaled_) {
michael@0 311 if (!waitables[0].first->kernel_->manual_reset_)
michael@0 312 waitables[0].first->kernel_->signaled_ = false;
michael@0 313 waitables[0].first->kernel_->lock_.Release();
michael@0 314 return count;
michael@0 315 }
michael@0 316
michael@0 317 const size_t r = EnqueueMany(waitables + 1, count - 1, waiter);
michael@0 318 if (r) {
michael@0 319 waitables[0].first->kernel_->lock_.Release();
michael@0 320 } else {
michael@0 321 waitables[0].first->Enqueue(waiter);
michael@0 322 }
michael@0 323
michael@0 324 return r;
michael@0 325 }
michael@0 326
michael@0 327 // -----------------------------------------------------------------------------
michael@0 328
michael@0 329
michael@0 330 // -----------------------------------------------------------------------------
michael@0 331 // Private functions...
michael@0 332
michael@0 333 // -----------------------------------------------------------------------------
michael@0 334 // Wake all waiting waiters. Called with lock held.
michael@0 335 // -----------------------------------------------------------------------------
michael@0 336 bool WaitableEvent::SignalAll() {
michael@0 337 bool signaled_at_least_one = false;
michael@0 338
michael@0 339 for (std::list<Waiter*>::iterator
michael@0 340 i = kernel_->waiters_.begin(); i != kernel_->waiters_.end(); ++i) {
michael@0 341 if ((*i)->Fire(this))
michael@0 342 signaled_at_least_one = true;
michael@0 343 }
michael@0 344
michael@0 345 kernel_->waiters_.clear();
michael@0 346 return signaled_at_least_one;
michael@0 347 }
michael@0 348
michael@0 349 // ---------------------------------------------------------------------------
michael@0 350 // Try to wake a single waiter. Return true if one was woken. Called with lock
michael@0 351 // held.
michael@0 352 // ---------------------------------------------------------------------------
michael@0 353 bool WaitableEvent::SignalOne() {
michael@0 354 for (;;) {
michael@0 355 if (kernel_->waiters_.empty())
michael@0 356 return false;
michael@0 357
michael@0 358 const bool r = (*kernel_->waiters_.begin())->Fire(this);
michael@0 359 kernel_->waiters_.pop_front();
michael@0 360 if (r)
michael@0 361 return true;
michael@0 362 }
michael@0 363 }
michael@0 364
michael@0 365 // -----------------------------------------------------------------------------
michael@0 366 // Add a waiter to the list of those waiting. Called with lock held.
michael@0 367 // -----------------------------------------------------------------------------
michael@0 368 void WaitableEvent::Enqueue(Waiter* waiter) {
michael@0 369 kernel_->waiters_.push_back(waiter);
michael@0 370 }
michael@0 371
michael@0 372 // -----------------------------------------------------------------------------
michael@0 373 // Remove a waiter from the list of those waiting. Return true if the waiter was
michael@0 374 // actually removed. Called with lock held.
michael@0 375 // -----------------------------------------------------------------------------
michael@0 376 bool WaitableEvent::WaitableEventKernel::Dequeue(Waiter* waiter, void* tag) {
michael@0 377 for (std::list<Waiter*>::iterator
michael@0 378 i = waiters_.begin(); i != waiters_.end(); ++i) {
michael@0 379 if (*i == waiter && (*i)->Compare(tag)) {
michael@0 380 waiters_.erase(i);
michael@0 381 return true;
michael@0 382 }
michael@0 383 }
michael@0 384
michael@0 385 return false;
michael@0 386 }
michael@0 387
michael@0 388 // -----------------------------------------------------------------------------
michael@0 389
michael@0 390 } // namespace base

mercurial