Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
1 // Copyright (c) 2009 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/message_loop.h"
7 #include <algorithm>
9 #include "mozilla/Atomics.h"
10 #include "base/compiler_specific.h"
11 #include "base/lazy_instance.h"
12 #include "base/logging.h"
13 #include "base/message_pump_default.h"
14 #include "base/string_util.h"
15 #include "base/thread_local.h"
17 #if defined(OS_MACOSX)
18 #include "base/message_pump_mac.h"
19 #endif
20 #if defined(OS_POSIX)
21 #include "base/message_pump_libevent.h"
22 #endif
23 #if defined(OS_LINUX) || defined(OS_BSD)
24 #if defined(MOZ_WIDGET_GTK)
25 #include "base/message_pump_glib.h"
26 #endif
27 #ifdef MOZ_WIDGET_QT
28 #include "base/message_pump_qt.h"
29 #endif
30 #endif
31 #ifdef ANDROID
32 #include "base/message_pump_android.h"
33 #endif
34 #ifdef MOZ_TASK_TRACER
35 #include "GeckoTaskTracer.h"
36 #endif
38 #include "MessagePump.h"
40 using base::Time;
41 using base::TimeDelta;
42 using base::TimeTicks;
44 // A lazily created thread local storage for quick access to a thread's message
45 // loop, if one exists. This should be safe and free of static constructors.
46 static base::LazyInstance<base::ThreadLocalPointer<MessageLoop> > lazy_tls_ptr(
47 base::LINKER_INITIALIZED);
49 //------------------------------------------------------------------------------
51 // Logical events for Histogram profiling. Run with -message-loop-histogrammer
52 // to get an accounting of messages and actions taken on each thread.
53 static const int kTaskRunEvent = 0x1;
54 static const int kTimerEvent = 0x2;
56 // Provide range of message IDs for use in histogramming and debug display.
57 static const int kLeastNonZeroMessageId = 1;
58 static const int kMaxMessageId = 1099;
59 static const int kNumberOfDistinctMessagesDisplayed = 1100;
61 //------------------------------------------------------------------------------
63 #if defined(OS_WIN)
65 // Upon a SEH exception in this thread, it restores the original unhandled
66 // exception filter.
67 static int SEHFilter(LPTOP_LEVEL_EXCEPTION_FILTER old_filter) {
68 ::SetUnhandledExceptionFilter(old_filter);
69 return EXCEPTION_CONTINUE_SEARCH;
70 }
72 // Retrieves a pointer to the current unhandled exception filter. There
73 // is no standalone getter method.
74 static LPTOP_LEVEL_EXCEPTION_FILTER GetTopSEHFilter() {
75 LPTOP_LEVEL_EXCEPTION_FILTER top_filter = NULL;
76 top_filter = ::SetUnhandledExceptionFilter(0);
77 ::SetUnhandledExceptionFilter(top_filter);
78 return top_filter;
79 }
81 #endif // defined(OS_WIN)
83 //------------------------------------------------------------------------------
85 // static
86 MessageLoop* MessageLoop::current() {
87 // TODO(darin): sadly, we cannot enable this yet since people call us even
88 // when they have no intention of using us.
89 //DCHECK(loop) << "Ouch, did you forget to initialize me?";
90 return lazy_tls_ptr.Pointer()->Get();
91 }
93 static mozilla::Atomic<int32_t> message_loop_id_seq(0);
95 MessageLoop::MessageLoop(Type type)
96 : type_(type),
97 id_(++message_loop_id_seq),
98 nestable_tasks_allowed_(true),
99 exception_restoration_(false),
100 state_(NULL),
101 run_depth_base_(1),
102 #ifdef OS_WIN
103 os_modal_loop_(false),
104 #endif // OS_WIN
105 transient_hang_timeout_(0),
106 permanent_hang_timeout_(0),
107 next_sequence_num_(0) {
108 DCHECK(!current()) << "should only have one message loop per thread";
109 lazy_tls_ptr.Pointer()->Set(this);
110 if (type_ == TYPE_MOZILLA_UI) {
111 pump_ = new mozilla::ipc::MessagePump();
112 return;
113 }
114 if (type_ == TYPE_MOZILLA_CHILD) {
115 pump_ = new mozilla::ipc::MessagePumpForChildProcess();
116 // There is a MessageLoop Run call from XRE_InitChildProcess
117 // and another one from MessagePumpForChildProcess. The one
118 // from MessagePumpForChildProcess becomes the base, so we need
119 // to set run_depth_base_ to 2 or we'll never be able to process
120 // Idle tasks.
121 run_depth_base_ = 2;
122 return;
123 }
124 if (type_ == TYPE_MOZILLA_NONMAINTHREAD) {
125 pump_ = new mozilla::ipc::MessagePumpForNonMainThreads();
126 return;
127 }
129 #if defined(OS_WIN)
130 // TODO(rvargas): Get rid of the OS guards.
131 if (type_ == TYPE_DEFAULT) {
132 pump_ = new base::MessagePumpDefault();
133 } else if (type_ == TYPE_IO) {
134 pump_ = new base::MessagePumpForIO();
135 } else {
136 DCHECK(type_ == TYPE_UI);
137 pump_ = new base::MessagePumpForUI();
138 }
139 #elif defined(OS_POSIX)
140 if (type_ == TYPE_UI) {
141 #if defined(OS_MACOSX)
142 pump_ = base::MessagePumpMac::Create();
143 #elif defined(OS_LINUX) || defined(OS_BSD)
144 pump_ = new base::MessagePumpForUI();
145 #endif // OS_LINUX
146 } else if (type_ == TYPE_IO) {
147 pump_ = new base::MessagePumpLibevent();
148 } else {
149 pump_ = new base::MessagePumpDefault();
150 }
151 #endif // OS_POSIX
152 }
154 MessageLoop::~MessageLoop() {
155 DCHECK(this == current());
157 // Let interested parties have one last shot at accessing this.
158 FOR_EACH_OBSERVER(DestructionObserver, destruction_observers_,
159 WillDestroyCurrentMessageLoop());
161 DCHECK(!state_);
163 // Clean up any unprocessed tasks, but take care: deleting a task could
164 // result in the addition of more tasks (e.g., via DeleteSoon). We set a
165 // limit on the number of times we will allow a deleted task to generate more
166 // tasks. Normally, we should only pass through this loop once or twice. If
167 // we end up hitting the loop limit, then it is probably due to one task that
168 // is being stubborn. Inspect the queues to see who is left.
169 bool did_work;
170 for (int i = 0; i < 100; ++i) {
171 DeletePendingTasks();
172 ReloadWorkQueue();
173 // If we end up with empty queues, then break out of the loop.
174 did_work = DeletePendingTasks();
175 if (!did_work)
176 break;
177 }
178 DCHECK(!did_work);
180 // OK, now make it so that no one can find us.
181 lazy_tls_ptr.Pointer()->Set(NULL);
182 }
184 void MessageLoop::AddDestructionObserver(DestructionObserver *obs) {
185 DCHECK(this == current());
186 destruction_observers_.AddObserver(obs);
187 }
189 void MessageLoop::RemoveDestructionObserver(DestructionObserver *obs) {
190 DCHECK(this == current());
191 destruction_observers_.RemoveObserver(obs);
192 }
194 void MessageLoop::Run() {
195 AutoRunState save_state(this);
196 RunHandler();
197 }
199 void MessageLoop::RunAllPending() {
200 AutoRunState save_state(this);
201 state_->quit_received = true; // Means run until we would otherwise block.
202 RunHandler();
203 }
205 // Runs the loop in two different SEH modes:
206 // enable_SEH_restoration_ = false : any unhandled exception goes to the last
207 // one that calls SetUnhandledExceptionFilter().
208 // enable_SEH_restoration_ = true : any unhandled exception goes to the filter
209 // that was existed before the loop was run.
210 void MessageLoop::RunHandler() {
211 #if defined(OS_WIN)
212 if (exception_restoration_) {
213 LPTOP_LEVEL_EXCEPTION_FILTER current_filter = GetTopSEHFilter();
214 MOZ_SEH_TRY {
215 RunInternal();
216 } MOZ_SEH_EXCEPT(SEHFilter(current_filter)) {
217 }
218 return;
219 }
220 #endif
222 RunInternal();
223 }
225 //------------------------------------------------------------------------------
227 void MessageLoop::RunInternal() {
228 DCHECK(this == current());
229 pump_->Run(this);
230 }
232 //------------------------------------------------------------------------------
233 // Wrapper functions for use in above message loop framework.
235 bool MessageLoop::ProcessNextDelayedNonNestableTask() {
236 if (state_->run_depth > run_depth_base_)
237 return false;
239 if (deferred_non_nestable_work_queue_.empty())
240 return false;
242 Task* task = deferred_non_nestable_work_queue_.front().task;
243 deferred_non_nestable_work_queue_.pop();
245 RunTask(task);
246 return true;
247 }
249 //------------------------------------------------------------------------------
251 void MessageLoop::Quit() {
252 DCHECK(current() == this);
253 if (state_) {
254 state_->quit_received = true;
255 } else {
256 NOTREACHED() << "Must be inside Run to call Quit";
257 }
258 }
260 void MessageLoop::PostTask(
261 const tracked_objects::Location& from_here, Task* task) {
262 PostTask_Helper(from_here, task, 0, true);
263 }
265 void MessageLoop::PostDelayedTask(
266 const tracked_objects::Location& from_here, Task* task, int delay_ms) {
267 PostTask_Helper(from_here, task, delay_ms, true);
268 }
270 void MessageLoop::PostNonNestableTask(
271 const tracked_objects::Location& from_here, Task* task) {
272 PostTask_Helper(from_here, task, 0, false);
273 }
275 void MessageLoop::PostNonNestableDelayedTask(
276 const tracked_objects::Location& from_here, Task* task, int delay_ms) {
277 PostTask_Helper(from_here, task, delay_ms, false);
278 }
280 void MessageLoop::PostIdleTask(
281 const tracked_objects::Location& from_here, Task* task) {
282 DCHECK(current() == this);
284 #ifdef MOZ_TASK_TRACER
285 task = mozilla::tasktracer::CreateTracedTask(task);
286 #endif
288 task->SetBirthPlace(from_here);
289 PendingTask pending_task(task, false);
290 deferred_non_nestable_work_queue_.push(pending_task);
291 }
293 // Possibly called on a background thread!
294 void MessageLoop::PostTask_Helper(
295 const tracked_objects::Location& from_here, Task* task, int delay_ms,
296 bool nestable) {
298 #ifdef MOZ_TASK_TRACER
299 task = mozilla::tasktracer::CreateTracedTask(task);
300 #endif
302 task->SetBirthPlace(from_here);
304 PendingTask pending_task(task, nestable);
306 if (delay_ms > 0) {
307 pending_task.delayed_run_time =
308 TimeTicks::Now() + TimeDelta::FromMilliseconds(delay_ms);
309 } else {
310 DCHECK(delay_ms == 0) << "delay should not be negative";
311 }
313 // Warning: Don't try to short-circuit, and handle this thread's tasks more
314 // directly, as it could starve handling of foreign threads. Put every task
315 // into this queue.
317 scoped_refptr<base::MessagePump> pump;
318 {
319 AutoLock locked(incoming_queue_lock_);
320 incoming_queue_.push(pending_task);
321 pump = pump_;
322 }
323 // Since the incoming_queue_ may contain a task that destroys this message
324 // loop, we cannot exit incoming_queue_lock_ until we are done with |this|.
325 // We use a stack-based reference to the message pump so that we can call
326 // ScheduleWork outside of incoming_queue_lock_.
328 pump->ScheduleWork();
329 }
331 void MessageLoop::SetNestableTasksAllowed(bool allowed) {
332 if (nestable_tasks_allowed_ != allowed) {
333 nestable_tasks_allowed_ = allowed;
334 if (!nestable_tasks_allowed_)
335 return;
336 // Start the native pump if we are not already pumping.
337 pump_->ScheduleWorkForNestedLoop();
338 }
339 }
341 void MessageLoop::ScheduleWork() {
342 // Start the native pump if we are not already pumping.
343 pump_->ScheduleWork();
344 }
346 bool MessageLoop::NestableTasksAllowed() const {
347 return nestable_tasks_allowed_;
348 }
350 //------------------------------------------------------------------------------
352 void MessageLoop::RunTask(Task* task) {
353 DCHECK(nestable_tasks_allowed_);
354 // Execute the task and assume the worst: It is probably not reentrant.
355 nestable_tasks_allowed_ = false;
357 task->Run();
358 delete task;
360 nestable_tasks_allowed_ = true;
361 }
363 bool MessageLoop::DeferOrRunPendingTask(const PendingTask& pending_task) {
364 if (pending_task.nestable || state_->run_depth <= run_depth_base_) {
365 RunTask(pending_task.task);
366 // Show that we ran a task (Note: a new one might arrive as a
367 // consequence!).
368 return true;
369 }
371 // We couldn't run the task now because we're in a nested message loop
372 // and the task isn't nestable.
373 deferred_non_nestable_work_queue_.push(pending_task);
374 return false;
375 }
377 void MessageLoop::AddToDelayedWorkQueue(const PendingTask& pending_task) {
378 // Move to the delayed work queue. Initialize the sequence number
379 // before inserting into the delayed_work_queue_. The sequence number
380 // is used to faciliate FIFO sorting when two tasks have the same
381 // delayed_run_time value.
382 PendingTask new_pending_task(pending_task);
383 new_pending_task.sequence_num = next_sequence_num_++;
384 delayed_work_queue_.push(new_pending_task);
385 }
387 void MessageLoop::ReloadWorkQueue() {
388 // We can improve performance of our loading tasks from incoming_queue_ to
389 // work_queue_ by waiting until the last minute (work_queue_ is empty) to
390 // load. That reduces the number of locks-per-task significantly when our
391 // queues get large.
392 if (!work_queue_.empty())
393 return; // Wait till we *really* need to lock and load.
395 // Acquire all we can from the inter-thread queue with one lock acquisition.
396 {
397 AutoLock lock(incoming_queue_lock_);
398 if (incoming_queue_.empty())
399 return;
400 std::swap(incoming_queue_, work_queue_);
401 DCHECK(incoming_queue_.empty());
402 }
403 }
405 bool MessageLoop::DeletePendingTasks() {
406 MOZ_ASSERT(work_queue_.empty());
407 bool did_work = !deferred_non_nestable_work_queue_.empty();
408 while (!deferred_non_nestable_work_queue_.empty()) {
409 Task* task = deferred_non_nestable_work_queue_.front().task;
410 deferred_non_nestable_work_queue_.pop();
411 delete task;
412 }
413 did_work |= !delayed_work_queue_.empty();
414 while (!delayed_work_queue_.empty()) {
415 Task* task = delayed_work_queue_.top().task;
416 delayed_work_queue_.pop();
417 delete task;
418 }
419 return did_work;
420 }
422 bool MessageLoop::DoWork() {
423 if (!nestable_tasks_allowed_) {
424 // Task can't be executed right now.
425 return false;
426 }
428 for (;;) {
429 ReloadWorkQueue();
430 if (work_queue_.empty())
431 break;
433 // Execute oldest task.
434 do {
435 PendingTask pending_task = work_queue_.front();
436 work_queue_.pop();
437 if (!pending_task.delayed_run_time.is_null()) {
438 AddToDelayedWorkQueue(pending_task);
439 // If we changed the topmost task, then it is time to re-schedule.
440 if (delayed_work_queue_.top().task == pending_task.task)
441 pump_->ScheduleDelayedWork(pending_task.delayed_run_time);
442 } else {
443 if (DeferOrRunPendingTask(pending_task))
444 return true;
445 }
446 } while (!work_queue_.empty());
447 }
449 // Nothing happened.
450 return false;
451 }
453 bool MessageLoop::DoDelayedWork(TimeTicks* next_delayed_work_time) {
454 if (!nestable_tasks_allowed_ || delayed_work_queue_.empty()) {
455 *next_delayed_work_time = TimeTicks();
456 return false;
457 }
459 if (delayed_work_queue_.top().delayed_run_time > TimeTicks::Now()) {
460 *next_delayed_work_time = delayed_work_queue_.top().delayed_run_time;
461 return false;
462 }
464 PendingTask pending_task = delayed_work_queue_.top();
465 delayed_work_queue_.pop();
467 if (!delayed_work_queue_.empty())
468 *next_delayed_work_time = delayed_work_queue_.top().delayed_run_time;
470 return DeferOrRunPendingTask(pending_task);
471 }
473 bool MessageLoop::DoIdleWork() {
474 if (ProcessNextDelayedNonNestableTask())
475 return true;
477 if (state_->quit_received)
478 pump_->Quit();
480 return false;
481 }
483 //------------------------------------------------------------------------------
484 // MessageLoop::AutoRunState
486 MessageLoop::AutoRunState::AutoRunState(MessageLoop* loop) : loop_(loop) {
487 // Make the loop reference us.
488 previous_state_ = loop_->state_;
489 if (previous_state_) {
490 run_depth = previous_state_->run_depth + 1;
491 } else {
492 run_depth = 1;
493 }
494 loop_->state_ = this;
496 // Initialize the other fields:
497 quit_received = false;
498 #if defined(OS_WIN)
499 dispatcher = NULL;
500 #endif
501 }
503 MessageLoop::AutoRunState::~AutoRunState() {
504 loop_->state_ = previous_state_;
505 }
507 //------------------------------------------------------------------------------
508 // MessageLoop::PendingTask
510 bool MessageLoop::PendingTask::operator<(const PendingTask& other) const {
511 // Since the top of a priority queue is defined as the "greatest" element, we
512 // need to invert the comparison here. We want the smaller time to be at the
513 // top of the heap.
515 if (delayed_run_time < other.delayed_run_time)
516 return false;
518 if (delayed_run_time > other.delayed_run_time)
519 return true;
521 // If the times happen to match, then we use the sequence number to decide.
522 // Compare the difference to support integer roll-over.
523 return (sequence_num - other.sequence_num) > 0;
524 }
526 //------------------------------------------------------------------------------
527 // MessageLoopForUI
529 #if defined(OS_WIN)
531 void MessageLoopForUI::Run(Dispatcher* dispatcher) {
532 AutoRunState save_state(this);
533 state_->dispatcher = dispatcher;
534 RunHandler();
535 }
537 void MessageLoopForUI::AddObserver(Observer* observer) {
538 pump_win()->AddObserver(observer);
539 }
541 void MessageLoopForUI::RemoveObserver(Observer* observer) {
542 pump_win()->RemoveObserver(observer);
543 }
545 void MessageLoopForUI::WillProcessMessage(const MSG& message) {
546 pump_win()->WillProcessMessage(message);
547 }
548 void MessageLoopForUI::DidProcessMessage(const MSG& message) {
549 pump_win()->DidProcessMessage(message);
550 }
551 void MessageLoopForUI::PumpOutPendingPaintMessages() {
552 pump_ui()->PumpOutPendingPaintMessages();
553 }
555 #endif // defined(OS_WIN)
557 //------------------------------------------------------------------------------
558 // MessageLoopForIO
560 #if defined(OS_WIN)
562 void MessageLoopForIO::RegisterIOHandler(HANDLE file, IOHandler* handler) {
563 pump_io()->RegisterIOHandler(file, handler);
564 }
566 bool MessageLoopForIO::WaitForIOCompletion(DWORD timeout, IOHandler* filter) {
567 return pump_io()->WaitForIOCompletion(timeout, filter);
568 }
570 #elif defined(OS_POSIX)
572 bool MessageLoopForIO::WatchFileDescriptor(int fd,
573 bool persistent,
574 Mode mode,
575 FileDescriptorWatcher *controller,
576 Watcher *delegate) {
577 return pump_libevent()->WatchFileDescriptor(
578 fd,
579 persistent,
580 static_cast<base::MessagePumpLibevent::Mode>(mode),
581 controller,
582 delegate);
583 }
585 bool
586 MessageLoopForIO::CatchSignal(int sig,
587 SignalEvent* sigevent,
588 SignalWatcher* delegate)
589 {
590 return pump_libevent()->CatchSignal(sig, sigevent, delegate);
591 }
593 #endif