Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
michael@0 | 1 | /* |
michael@0 | 2 | * Copyright 2012 Google Inc. |
michael@0 | 3 | * |
michael@0 | 4 | * Use of this source code is governed by a BSD-style license that can be |
michael@0 | 5 | * found in the LICENSE file. |
michael@0 | 6 | */ |
michael@0 | 7 | |
michael@0 | 8 | #ifndef SkThreadPool_DEFINED |
michael@0 | 9 | #define SkThreadPool_DEFINED |
michael@0 | 10 | |
michael@0 | 11 | #include "SkCondVar.h" |
michael@0 | 12 | #include "SkRunnable.h" |
michael@0 | 13 | #include "SkTDArray.h" |
michael@0 | 14 | #include "SkTInternalLList.h" |
michael@0 | 15 | #include "SkThreadUtils.h" |
michael@0 | 16 | #include "SkTypes.h" |
michael@0 | 17 | |
michael@0 | 18 | #if defined(SK_BUILD_FOR_UNIX) || defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_ANDROID) |
michael@0 | 19 | # include <unistd.h> |
michael@0 | 20 | #endif |
michael@0 | 21 | |
michael@0 | 22 | // Returns the number of cores on this machine. |
michael@0 | 23 | static inline int num_cores() { |
michael@0 | 24 | #if defined(SK_BUILD_FOR_WIN32) |
michael@0 | 25 | SYSTEM_INFO sysinfo; |
michael@0 | 26 | GetSystemInfo(&sysinfo); |
michael@0 | 27 | return sysinfo.dwNumberOfProcessors; |
michael@0 | 28 | #elif defined(SK_BUILD_FOR_UNIX) || defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_ANDROID) |
michael@0 | 29 | return sysconf(_SC_NPROCESSORS_ONLN); |
michael@0 | 30 | #else |
michael@0 | 31 | return 1; |
michael@0 | 32 | #endif |
michael@0 | 33 | } |
michael@0 | 34 | |
michael@0 | 35 | template <typename T> |
michael@0 | 36 | class SkTThreadPool { |
michael@0 | 37 | public: |
michael@0 | 38 | /** |
michael@0 | 39 | * Create a threadpool with count threads, or one thread per core if kThreadPerCore. |
michael@0 | 40 | */ |
michael@0 | 41 | static const int kThreadPerCore = -1; |
michael@0 | 42 | explicit SkTThreadPool(int count); |
michael@0 | 43 | ~SkTThreadPool(); |
michael@0 | 44 | |
michael@0 | 45 | /** |
michael@0 | 46 | * Queues up an SkRunnable to run when a thread is available, or synchronously if count is 0. |
michael@0 | 47 | * Does not take ownership. NULL is a safe no-op. If T is not void, the runnable will be passed |
michael@0 | 48 | * a reference to a T on the thread's local stack. |
michael@0 | 49 | */ |
michael@0 | 50 | void add(SkTRunnable<T>*); |
michael@0 | 51 | |
michael@0 | 52 | /** |
michael@0 | 53 | * Block until all added SkRunnables have completed. Once called, calling add() is undefined. |
michael@0 | 54 | */ |
michael@0 | 55 | void wait(); |
michael@0 | 56 | |
michael@0 | 57 | private: |
michael@0 | 58 | struct LinkedRunnable { |
michael@0 | 59 | SkTRunnable<T>* fRunnable; // Unowned. |
michael@0 | 60 | SK_DECLARE_INTERNAL_LLIST_INTERFACE(LinkedRunnable); |
michael@0 | 61 | }; |
michael@0 | 62 | |
michael@0 | 63 | enum State { |
michael@0 | 64 | kRunning_State, // Normal case. We've been constructed and no one has called wait(). |
michael@0 | 65 | kWaiting_State, // wait has been called, but there still might be work to do or being done. |
michael@0 | 66 | kHalting_State, // There's no work to do and no thread is busy. All threads can shut down. |
michael@0 | 67 | }; |
michael@0 | 68 | |
michael@0 | 69 | SkTInternalLList<LinkedRunnable> fQueue; |
michael@0 | 70 | SkCondVar fReady; |
michael@0 | 71 | SkTDArray<SkThread*> fThreads; |
michael@0 | 72 | State fState; |
michael@0 | 73 | int fBusyThreads; |
michael@0 | 74 | |
michael@0 | 75 | static void Loop(void*); // Static because we pass in this. |
michael@0 | 76 | }; |
michael@0 | 77 | |
michael@0 | 78 | template <typename T> |
michael@0 | 79 | SkTThreadPool<T>::SkTThreadPool(int count) : fState(kRunning_State), fBusyThreads(0) { |
michael@0 | 80 | if (count < 0) { |
michael@0 | 81 | count = num_cores(); |
michael@0 | 82 | } |
michael@0 | 83 | // Create count threads, all running SkTThreadPool::Loop. |
michael@0 | 84 | for (int i = 0; i < count; i++) { |
michael@0 | 85 | SkThread* thread = SkNEW_ARGS(SkThread, (&SkTThreadPool::Loop, this)); |
michael@0 | 86 | *fThreads.append() = thread; |
michael@0 | 87 | thread->start(); |
michael@0 | 88 | } |
michael@0 | 89 | } |
michael@0 | 90 | |
michael@0 | 91 | template <typename T> |
michael@0 | 92 | SkTThreadPool<T>::~SkTThreadPool() { |
michael@0 | 93 | if (kRunning_State == fState) { |
michael@0 | 94 | this->wait(); |
michael@0 | 95 | } |
michael@0 | 96 | } |
michael@0 | 97 | |
michael@0 | 98 | namespace SkThreadPoolPrivate { |
michael@0 | 99 | |
michael@0 | 100 | template <typename T> |
michael@0 | 101 | struct ThreadLocal { |
michael@0 | 102 | void run(SkTRunnable<T>* r) { r->run(data); } |
michael@0 | 103 | T data; |
michael@0 | 104 | }; |
michael@0 | 105 | |
michael@0 | 106 | template <> |
michael@0 | 107 | struct ThreadLocal<void> { |
michael@0 | 108 | void run(SkTRunnable<void>* r) { r->run(); } |
michael@0 | 109 | }; |
michael@0 | 110 | |
michael@0 | 111 | } // namespace SkThreadPoolPrivate |
michael@0 | 112 | |
michael@0 | 113 | template <typename T> |
michael@0 | 114 | void SkTThreadPool<T>::add(SkTRunnable<T>* r) { |
michael@0 | 115 | if (r == NULL) { |
michael@0 | 116 | return; |
michael@0 | 117 | } |
michael@0 | 118 | |
michael@0 | 119 | if (fThreads.isEmpty()) { |
michael@0 | 120 | SkThreadPoolPrivate::ThreadLocal<T> threadLocal; |
michael@0 | 121 | threadLocal.run(r); |
michael@0 | 122 | return; |
michael@0 | 123 | } |
michael@0 | 124 | |
michael@0 | 125 | LinkedRunnable* linkedRunnable = SkNEW(LinkedRunnable); |
michael@0 | 126 | linkedRunnable->fRunnable = r; |
michael@0 | 127 | fReady.lock(); |
michael@0 | 128 | SkASSERT(fState != kHalting_State); // Shouldn't be able to add work when we're halting. |
michael@0 | 129 | fQueue.addToHead(linkedRunnable); |
michael@0 | 130 | fReady.signal(); |
michael@0 | 131 | fReady.unlock(); |
michael@0 | 132 | } |
michael@0 | 133 | |
michael@0 | 134 | |
michael@0 | 135 | template <typename T> |
michael@0 | 136 | void SkTThreadPool<T>::wait() { |
michael@0 | 137 | fReady.lock(); |
michael@0 | 138 | fState = kWaiting_State; |
michael@0 | 139 | fReady.broadcast(); |
michael@0 | 140 | fReady.unlock(); |
michael@0 | 141 | |
michael@0 | 142 | // Wait for all threads to stop. |
michael@0 | 143 | for (int i = 0; i < fThreads.count(); i++) { |
michael@0 | 144 | fThreads[i]->join(); |
michael@0 | 145 | SkDELETE(fThreads[i]); |
michael@0 | 146 | } |
michael@0 | 147 | SkASSERT(fQueue.isEmpty()); |
michael@0 | 148 | } |
michael@0 | 149 | |
michael@0 | 150 | template <typename T> |
michael@0 | 151 | /*static*/ void SkTThreadPool<T>::Loop(void* arg) { |
michael@0 | 152 | // The SkTThreadPool passes itself as arg to each thread as they're created. |
michael@0 | 153 | SkTThreadPool<T>* pool = static_cast<SkTThreadPool<T>*>(arg); |
michael@0 | 154 | SkThreadPoolPrivate::ThreadLocal<T> threadLocal; |
michael@0 | 155 | |
michael@0 | 156 | while (true) { |
michael@0 | 157 | // We have to be holding the lock to read the queue and to call wait. |
michael@0 | 158 | pool->fReady.lock(); |
michael@0 | 159 | while(pool->fQueue.isEmpty()) { |
michael@0 | 160 | // Does the client want to stop and are all the threads ready to stop? |
michael@0 | 161 | // If so, we move into the halting state, and whack all the threads so they notice. |
michael@0 | 162 | if (kWaiting_State == pool->fState && pool->fBusyThreads == 0) { |
michael@0 | 163 | pool->fState = kHalting_State; |
michael@0 | 164 | pool->fReady.broadcast(); |
michael@0 | 165 | } |
michael@0 | 166 | // Any time we find ourselves in the halting state, it's quitting time. |
michael@0 | 167 | if (kHalting_State == pool->fState) { |
michael@0 | 168 | pool->fReady.unlock(); |
michael@0 | 169 | return; |
michael@0 | 170 | } |
michael@0 | 171 | // wait yields the lock while waiting, but will have it again when awoken. |
michael@0 | 172 | pool->fReady.wait(); |
michael@0 | 173 | } |
michael@0 | 174 | // We've got the lock back here, no matter if we ran wait or not. |
michael@0 | 175 | |
michael@0 | 176 | // The queue is not empty, so we have something to run. Claim it. |
michael@0 | 177 | LinkedRunnable* r = pool->fQueue.tail(); |
michael@0 | 178 | |
michael@0 | 179 | pool->fQueue.remove(r); |
michael@0 | 180 | |
michael@0 | 181 | // Having claimed our SkRunnable, we now give up the lock while we run it. |
michael@0 | 182 | // Otherwise, we'd only ever do work on one thread at a time, which rather |
michael@0 | 183 | // defeats the point of this code. |
michael@0 | 184 | pool->fBusyThreads++; |
michael@0 | 185 | pool->fReady.unlock(); |
michael@0 | 186 | |
michael@0 | 187 | // OK, now really do the work. |
michael@0 | 188 | threadLocal.run(r->fRunnable); |
michael@0 | 189 | SkDELETE(r); |
michael@0 | 190 | |
michael@0 | 191 | // Let everyone know we're not busy. |
michael@0 | 192 | pool->fReady.lock(); |
michael@0 | 193 | pool->fBusyThreads--; |
michael@0 | 194 | pool->fReady.unlock(); |
michael@0 | 195 | } |
michael@0 | 196 | |
michael@0 | 197 | SkASSERT(false); // Unreachable. The only exit happens when pool->fState is kHalting_State. |
michael@0 | 198 | } |
michael@0 | 199 | |
michael@0 | 200 | typedef SkTThreadPool<void> SkThreadPool; |
michael@0 | 201 | |
michael@0 | 202 | #endif |