Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
michael@0 | 2 | /* This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 3 | * License, v. 2.0. If a copy of the MPL was not distributed with this file, |
michael@0 | 4 | * You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 5 | |
michael@0 | 6 | #include <map> |
michael@0 | 7 | #include <memory> |
michael@0 | 8 | |
michael@0 | 9 | #include <dlfcn.h> |
michael@0 | 10 | #include <errno.h> |
michael@0 | 11 | #include <fcntl.h> |
michael@0 | 12 | #include <setjmp.h> |
michael@0 | 13 | #include <signal.h> |
michael@0 | 14 | #include <poll.h> |
michael@0 | 15 | #include <pthread.h> |
michael@0 | 16 | #include <alloca.h> |
michael@0 | 17 | #include <sys/epoll.h> |
michael@0 | 18 | #include <sys/mman.h> |
michael@0 | 19 | #include <sys/prctl.h> |
michael@0 | 20 | #include <sys/types.h> |
michael@0 | 21 | #include <sys/socket.h> |
michael@0 | 22 | #include <sys/stat.h> |
michael@0 | 23 | #include <sys/syscall.h> |
michael@0 | 24 | #include <vector> |
michael@0 | 25 | |
michael@0 | 26 | #include "mozilla/LinkedList.h" |
michael@0 | 27 | #include "Nuwa.h" |
michael@0 | 28 | |
michael@0 | 29 | using namespace mozilla; |
michael@0 | 30 | |
michael@0 | 31 | extern "C" MFBT_API int tgkill(pid_t tgid, pid_t tid, int signalno) { |
michael@0 | 32 | return syscall(__NR_tgkill, tgid, tid, signalno); |
michael@0 | 33 | } |
michael@0 | 34 | |
michael@0 | 35 | /** |
michael@0 | 36 | * Provides the wrappers to a selected set of pthread and system-level functions |
michael@0 | 37 | * as the basis for implementing Zygote-like preforking mechanism. |
michael@0 | 38 | */ |
michael@0 | 39 | |
michael@0 | 40 | /** |
michael@0 | 41 | * Real functions for the wrappers. |
michael@0 | 42 | */ |
michael@0 | 43 | extern "C" { |
michael@0 | 44 | int __real_pthread_create(pthread_t *thread, |
michael@0 | 45 | const pthread_attr_t *attr, |
michael@0 | 46 | void *(*start_routine) (void *), |
michael@0 | 47 | void *arg); |
michael@0 | 48 | int __real_pthread_key_create(pthread_key_t *key, void (*destructor)(void*)); |
michael@0 | 49 | int __real_pthread_key_delete(pthread_key_t key); |
michael@0 | 50 | pthread_t __real_pthread_self(); |
michael@0 | 51 | int __real_pthread_join(pthread_t thread, void **retval); |
michael@0 | 52 | int __real_epoll_wait(int epfd, |
michael@0 | 53 | struct epoll_event *events, |
michael@0 | 54 | int maxevents, |
michael@0 | 55 | int timeout); |
michael@0 | 56 | int __real_pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mtx); |
michael@0 | 57 | int __real_pthread_cond_timedwait(pthread_cond_t *cond, |
michael@0 | 58 | pthread_mutex_t *mtx, |
michael@0 | 59 | const struct timespec *abstime); |
michael@0 | 60 | int __real___pthread_cond_timedwait(pthread_cond_t *cond, |
michael@0 | 61 | pthread_mutex_t *mtx, |
michael@0 | 62 | const struct timespec *abstime, |
michael@0 | 63 | clockid_t clock); |
michael@0 | 64 | int __real_pthread_mutex_lock(pthread_mutex_t *mtx); |
michael@0 | 65 | int __real_poll(struct pollfd *fds, nfds_t nfds, int timeout); |
michael@0 | 66 | int __real_epoll_create(int size); |
michael@0 | 67 | int __real_socketpair(int domain, int type, int protocol, int sv[2]); |
michael@0 | 68 | int __real_pipe2(int __pipedes[2], int flags); |
michael@0 | 69 | int __real_pipe(int __pipedes[2]); |
michael@0 | 70 | int __real_epoll_ctl(int aEpollFd, int aOp, int aFd, struct epoll_event *aEvent); |
michael@0 | 71 | int __real_close(int aFd); |
michael@0 | 72 | } |
michael@0 | 73 | |
michael@0 | 74 | #define REAL(s) __real_##s |
michael@0 | 75 | |
michael@0 | 76 | /** |
michael@0 | 77 | * A Nuwa process is started by preparing. After preparing, it waits |
michael@0 | 78 | * for all threads becoming frozen. Then, it is ready while all |
michael@0 | 79 | * threads are frozen. |
michael@0 | 80 | */ |
michael@0 | 81 | static bool sIsNuwaProcess = false; // This process is a Nuwa process. |
michael@0 | 82 | static bool sIsFreezing = false; // Waiting for all threads getting frozen. |
michael@0 | 83 | static bool sNuwaReady = false; // Nuwa process is ready. |
michael@0 | 84 | static bool sNuwaPendingSpawn = false; // Are there any pending spawn requests? |
michael@0 | 85 | static bool sNuwaForking = false; |
michael@0 | 86 | |
michael@0 | 87 | // Fds of transports of top level protocols. |
michael@0 | 88 | static NuwaProtoFdInfo sProtoFdInfos[NUWA_TOPLEVEL_MAX]; |
michael@0 | 89 | static int sProtoFdInfosSize = 0; |
michael@0 | 90 | |
michael@0 | 91 | template <typename T> |
michael@0 | 92 | struct LibcAllocator: public std::allocator<T> |
michael@0 | 93 | { |
michael@0 | 94 | LibcAllocator() |
michael@0 | 95 | { |
michael@0 | 96 | void* libcHandle = dlopen("libc.so", RTLD_LAZY); |
michael@0 | 97 | mMallocImpl = reinterpret_cast<void*(*)(size_t)>(dlsym(libcHandle, "malloc")); |
michael@0 | 98 | mFreeImpl = reinterpret_cast<void(*)(void*)>(dlsym(libcHandle, "free")); |
michael@0 | 99 | |
michael@0 | 100 | if (!(mMallocImpl && mFreeImpl)) { |
michael@0 | 101 | // libc should be available, or we'll deadlock in using TLSInfoList. |
michael@0 | 102 | abort(); |
michael@0 | 103 | } |
michael@0 | 104 | } |
michael@0 | 105 | |
michael@0 | 106 | inline typename std::allocator<T>::pointer |
michael@0 | 107 | allocate(typename std::allocator<T>::size_type n, |
michael@0 | 108 | const void * = 0) |
michael@0 | 109 | { |
michael@0 | 110 | return reinterpret_cast<T *>(mMallocImpl(sizeof(T) * n)); |
michael@0 | 111 | } |
michael@0 | 112 | |
michael@0 | 113 | inline void |
michael@0 | 114 | deallocate(typename std::allocator<T>::pointer p, |
michael@0 | 115 | typename std::allocator<T>::size_type n) |
michael@0 | 116 | { |
michael@0 | 117 | mFreeImpl(p); |
michael@0 | 118 | } |
michael@0 | 119 | |
michael@0 | 120 | template<typename U> |
michael@0 | 121 | struct rebind |
michael@0 | 122 | { |
michael@0 | 123 | typedef LibcAllocator<U> other; |
michael@0 | 124 | }; |
michael@0 | 125 | private: |
michael@0 | 126 | void* (*mMallocImpl)(size_t); |
michael@0 | 127 | void (*mFreeImpl)(void*); |
michael@0 | 128 | }; |
michael@0 | 129 | |
michael@0 | 130 | /** |
michael@0 | 131 | * TLSInfoList should use malloc() and free() in libc to avoid the deadlock that |
michael@0 | 132 | * jemalloc calls into __wrap_pthread_mutex_lock() and then deadlocks while |
michael@0 | 133 | * the same thread already acquired sThreadCountLock. |
michael@0 | 134 | */ |
michael@0 | 135 | typedef std::vector<std::pair<pthread_key_t, void *>, |
michael@0 | 136 | LibcAllocator<std::pair<pthread_key_t, void *> > > |
michael@0 | 137 | TLSInfoList; |
michael@0 | 138 | |
michael@0 | 139 | /** |
michael@0 | 140 | * Return the system's page size |
michael@0 | 141 | */ |
michael@0 | 142 | static size_t getPageSize(void) { |
michael@0 | 143 | #ifdef HAVE_GETPAGESIZE |
michael@0 | 144 | return getpagesize(); |
michael@0 | 145 | #elif defined(_SC_PAGESIZE) |
michael@0 | 146 | return sysconf(_SC_PAGESIZE); |
michael@0 | 147 | #elif defined(PAGE_SIZE) |
michael@0 | 148 | return PAGE_SIZE; |
michael@0 | 149 | #else |
michael@0 | 150 | #warning "Hard-coding page size to 4096 bytes" |
michael@0 | 151 | return 4096 |
michael@0 | 152 | #endif |
michael@0 | 153 | } |
michael@0 | 154 | |
michael@0 | 155 | /** |
michael@0 | 156 | * Align the pointer to the next page boundary unless it's already aligned |
michael@0 | 157 | */ |
michael@0 | 158 | static uintptr_t ceilToPage(uintptr_t aPtr) { |
michael@0 | 159 | size_t pageSize = getPageSize(); |
michael@0 | 160 | |
michael@0 | 161 | return ((aPtr + pageSize - 1) / pageSize) * pageSize; |
michael@0 | 162 | } |
michael@0 | 163 | |
michael@0 | 164 | /** |
michael@0 | 165 | * The stack size is chosen carefully so the frozen threads doesn't consume too |
michael@0 | 166 | * much memory in the Nuwa process. The threads shouldn't run deep recursive |
michael@0 | 167 | * methods or do large allocations on the stack to avoid stack overflow. |
michael@0 | 168 | */ |
michael@0 | 169 | #ifndef NUWA_STACK_SIZE |
michael@0 | 170 | #define NUWA_STACK_SIZE (1024 * 128) |
michael@0 | 171 | #endif |
michael@0 | 172 | |
michael@0 | 173 | #define NATIVE_THREAD_NAME_LENGTH 16 |
michael@0 | 174 | |
michael@0 | 175 | struct thread_info : public mozilla::LinkedListElement<thread_info> { |
michael@0 | 176 | pthread_t origThreadID; |
michael@0 | 177 | pthread_t recreatedThreadID; |
michael@0 | 178 | pthread_attr_t threadAttr; |
michael@0 | 179 | jmp_buf jmpEnv; |
michael@0 | 180 | jmp_buf retEnv; |
michael@0 | 181 | |
michael@0 | 182 | int flags; |
michael@0 | 183 | |
michael@0 | 184 | void *(*startupFunc)(void *arg); |
michael@0 | 185 | void *startupArg; |
michael@0 | 186 | |
michael@0 | 187 | // The thread specific function to recreate the new thread. It's executed |
michael@0 | 188 | // after the thread is recreated. |
michael@0 | 189 | void (*recrFunc)(void *arg); |
michael@0 | 190 | void *recrArg; |
michael@0 | 191 | |
michael@0 | 192 | TLSInfoList tlsInfo; |
michael@0 | 193 | |
michael@0 | 194 | pthread_mutex_t *reacquireMutex; |
michael@0 | 195 | void *stk; |
michael@0 | 196 | |
michael@0 | 197 | pid_t origNativeThreadID; |
michael@0 | 198 | pid_t recreatedNativeThreadID; |
michael@0 | 199 | char nativeThreadName[NATIVE_THREAD_NAME_LENGTH]; |
michael@0 | 200 | }; |
michael@0 | 201 | |
michael@0 | 202 | typedef struct thread_info thread_info_t; |
michael@0 | 203 | |
michael@0 | 204 | static thread_info_t *sCurrentRecreatingThread = nullptr; |
michael@0 | 205 | |
michael@0 | 206 | /** |
michael@0 | 207 | * This function runs the custom recreation function registered when calling |
michael@0 | 208 | * NuwaMarkCurrentThread() after thread stack is restored. |
michael@0 | 209 | */ |
michael@0 | 210 | static void |
michael@0 | 211 | RunCustomRecreation() { |
michael@0 | 212 | thread_info_t *tinfo = sCurrentRecreatingThread; |
michael@0 | 213 | if (tinfo->recrFunc != nullptr) { |
michael@0 | 214 | tinfo->recrFunc(tinfo->recrArg); |
michael@0 | 215 | } |
michael@0 | 216 | } |
michael@0 | 217 | |
michael@0 | 218 | /** |
michael@0 | 219 | * Every thread should be marked as either TINFO_FLAG_NUWA_SUPPORT or |
michael@0 | 220 | * TINFO_FLAG_NUWA_SKIP, or it means a potential error. We force |
michael@0 | 221 | * Gecko code to mark every single thread to make sure there are no accidents |
michael@0 | 222 | * when recreating threads with Nuwa. |
michael@0 | 223 | * |
michael@0 | 224 | * Threads marked as TINFO_FLAG_NUWA_SUPPORT can be checkpointed explicitly, by |
michael@0 | 225 | * calling NuwaCheckpointCurrentThread(), or implicitly when they call into wrapped |
michael@0 | 226 | * functions like pthread_mutex_lock(), epoll_wait(), etc. |
michael@0 | 227 | * TINFO_FLAG_NUWA_EXPLICIT_CHECKPOINT denotes the explicitly checkpointed thread. |
michael@0 | 228 | */ |
michael@0 | 229 | #define TINFO_FLAG_NUWA_SUPPORT 0x1 |
michael@0 | 230 | #define TINFO_FLAG_NUWA_SKIP 0x2 |
michael@0 | 231 | #define TINFO_FLAG_NUWA_EXPLICIT_CHECKPOINT 0x4 |
michael@0 | 232 | |
michael@0 | 233 | typedef struct nuwa_construct { |
michael@0 | 234 | void (*construct)(void *); |
michael@0 | 235 | void *arg; |
michael@0 | 236 | } nuwa_construct_t; |
michael@0 | 237 | |
michael@0 | 238 | static std::vector<nuwa_construct_t> sConstructors; |
michael@0 | 239 | static std::vector<nuwa_construct_t> sFinalConstructors; |
michael@0 | 240 | |
michael@0 | 241 | typedef std::map<pthread_key_t, void (*)(void *)> TLSKeySet; |
michael@0 | 242 | static TLSKeySet sTLSKeys; |
michael@0 | 243 | |
michael@0 | 244 | /** |
michael@0 | 245 | * This mutex is used to block the running threads and freeze their contexts. |
michael@0 | 246 | * PrepareNuwaProcess() is the first one to acquire the lock. Further attempts |
michael@0 | 247 | * to acquire this mutex (in the freeze point macros) will block and freeze the |
michael@0 | 248 | * calling thread. |
michael@0 | 249 | */ |
michael@0 | 250 | static pthread_mutex_t sThreadFreezeLock = PTHREAD_MUTEX_INITIALIZER; |
michael@0 | 251 | |
michael@0 | 252 | static thread_info_t sMainThread; |
michael@0 | 253 | static LinkedList<thread_info_t> sAllThreads; |
michael@0 | 254 | static int sThreadCount = 0; |
michael@0 | 255 | static int sThreadFreezeCount = 0; |
michael@0 | 256 | /** |
michael@0 | 257 | * This mutex protects the access to thread info: |
michael@0 | 258 | * sAllThreads, sThreadCount, sThreadFreezeCount, sRecreateVIPCount. |
michael@0 | 259 | */ |
michael@0 | 260 | static pthread_mutex_t sThreadCountLock = PTHREAD_MUTEX_INITIALIZER; |
michael@0 | 261 | /** |
michael@0 | 262 | * This condition variable lets MakeNuwaProcess() wait until all recreated |
michael@0 | 263 | * threads are frozen. |
michael@0 | 264 | */ |
michael@0 | 265 | static pthread_cond_t sThreadChangeCond = PTHREAD_COND_INITIALIZER; |
michael@0 | 266 | |
michael@0 | 267 | /** |
michael@0 | 268 | * This mutex and condition variable is used to serialize the fork requests |
michael@0 | 269 | * from the parent process. |
michael@0 | 270 | */ |
michael@0 | 271 | static pthread_mutex_t sForkLock = PTHREAD_MUTEX_INITIALIZER; |
michael@0 | 272 | static pthread_cond_t sForkWaitCond = PTHREAD_COND_INITIALIZER; |
michael@0 | 273 | |
michael@0 | 274 | /** |
michael@0 | 275 | * sForkWaitCondChanged will be reset to false on the IPC thread before |
michael@0 | 276 | * and will be changed to true on the main thread to indicate that the condition |
michael@0 | 277 | * that the IPC thread is waiting for has already changed. |
michael@0 | 278 | */ |
michael@0 | 279 | static bool sForkWaitCondChanged = false; |
michael@0 | 280 | |
michael@0 | 281 | /** |
michael@0 | 282 | * This mutex protects the access to sTLSKeys, which keeps track of existing |
michael@0 | 283 | * TLS Keys. |
michael@0 | 284 | */ |
michael@0 | 285 | static pthread_mutex_t sTLSKeyLock = PTHREAD_MUTEX_INITIALIZER; |
michael@0 | 286 | static int sThreadSkipCount = 0; |
michael@0 | 287 | |
michael@0 | 288 | static thread_info_t * |
michael@0 | 289 | GetThreadInfoInner(pthread_t threadID) { |
michael@0 | 290 | for (thread_info_t *tinfo = sAllThreads.getFirst(); |
michael@0 | 291 | tinfo; |
michael@0 | 292 | tinfo = tinfo->getNext()) { |
michael@0 | 293 | if (pthread_equal(tinfo->origThreadID, threadID)) { |
michael@0 | 294 | return tinfo; |
michael@0 | 295 | } |
michael@0 | 296 | } |
michael@0 | 297 | |
michael@0 | 298 | return nullptr; |
michael@0 | 299 | } |
michael@0 | 300 | |
michael@0 | 301 | /** |
michael@0 | 302 | * Get thread info using the specified thread ID. |
michael@0 | 303 | * |
michael@0 | 304 | * @return thread_info_t which has threadID == specified threadID |
michael@0 | 305 | */ |
michael@0 | 306 | static thread_info_t * |
michael@0 | 307 | GetThreadInfo(pthread_t threadID) { |
michael@0 | 308 | if (sIsNuwaProcess) { |
michael@0 | 309 | REAL(pthread_mutex_lock)(&sThreadCountLock); |
michael@0 | 310 | } |
michael@0 | 311 | thread_info_t *tinfo = GetThreadInfoInner(threadID); |
michael@0 | 312 | if (sIsNuwaProcess) { |
michael@0 | 313 | pthread_mutex_unlock(&sThreadCountLock); |
michael@0 | 314 | } |
michael@0 | 315 | return tinfo; |
michael@0 | 316 | } |
michael@0 | 317 | |
michael@0 | 318 | /** |
michael@0 | 319 | * Get thread info using the specified native thread ID. |
michael@0 | 320 | * |
michael@0 | 321 | * @return thread_info_t with nativeThreadID == specified threadID |
michael@0 | 322 | */ |
michael@0 | 323 | static thread_info_t* |
michael@0 | 324 | GetThreadInfo(pid_t threadID) { |
michael@0 | 325 | if (sIsNuwaProcess) { |
michael@0 | 326 | REAL(pthread_mutex_lock)(&sThreadCountLock); |
michael@0 | 327 | } |
michael@0 | 328 | thread_info_t *thrinfo = nullptr; |
michael@0 | 329 | for (thread_info_t *tinfo = sAllThreads.getFirst(); |
michael@0 | 330 | tinfo; |
michael@0 | 331 | tinfo = tinfo->getNext()) { |
michael@0 | 332 | if (tinfo->origNativeThreadID == threadID) { |
michael@0 | 333 | thrinfo = tinfo; |
michael@0 | 334 | break; |
michael@0 | 335 | } |
michael@0 | 336 | } |
michael@0 | 337 | if (sIsNuwaProcess) { |
michael@0 | 338 | pthread_mutex_unlock(&sThreadCountLock); |
michael@0 | 339 | } |
michael@0 | 340 | |
michael@0 | 341 | return thrinfo; |
michael@0 | 342 | } |
michael@0 | 343 | |
michael@0 | 344 | #if !defined(HAVE_THREAD_TLS_KEYWORD) |
michael@0 | 345 | /** |
michael@0 | 346 | * Get thread info of the current thread. |
michael@0 | 347 | * |
michael@0 | 348 | * @return thread_info_t for the current thread. |
michael@0 | 349 | */ |
michael@0 | 350 | static thread_info_t * |
michael@0 | 351 | GetCurThreadInfo() { |
michael@0 | 352 | pthread_t threadID = REAL(pthread_self)(); |
michael@0 | 353 | pthread_t thread_info_t::*threadIDptr = |
michael@0 | 354 | (sIsNuwaProcess ? |
michael@0 | 355 | &thread_info_t::origThreadID : |
michael@0 | 356 | &thread_info_t::recreatedThreadID); |
michael@0 | 357 | |
michael@0 | 358 | REAL(pthread_mutex_lock)(&sThreadCountLock); |
michael@0 | 359 | thread_info_t *tinfo; |
michael@0 | 360 | for (tinfo = sAllThreads.getFirst(); |
michael@0 | 361 | tinfo; |
michael@0 | 362 | tinfo = tinfo->getNext()) { |
michael@0 | 363 | if (pthread_equal(tinfo->*threadIDptr, threadID)) { |
michael@0 | 364 | break; |
michael@0 | 365 | } |
michael@0 | 366 | } |
michael@0 | 367 | pthread_mutex_unlock(&sThreadCountLock); |
michael@0 | 368 | return tinfo; |
michael@0 | 369 | } |
michael@0 | 370 | #define CUR_THREAD_INFO GetCurThreadInfo() |
michael@0 | 371 | #define SET_THREAD_INFO(x) /* Nothing to do. */ |
michael@0 | 372 | #else |
michael@0 | 373 | // Is not nullptr only for threads created by pthread_create() in an Nuwa process. |
michael@0 | 374 | // It is always nullptr for the main thread. |
michael@0 | 375 | static __thread thread_info_t *sCurThreadInfo = nullptr; |
michael@0 | 376 | #define CUR_THREAD_INFO sCurThreadInfo |
michael@0 | 377 | #define SET_THREAD_INFO(x) do { sCurThreadInfo = (x); } while(0) |
michael@0 | 378 | #endif // HAVE_THREAD_TLS_KEYWORD |
michael@0 | 379 | |
michael@0 | 380 | /* |
michael@0 | 381 | * Track all epoll fds and handling events. |
michael@0 | 382 | */ |
michael@0 | 383 | class EpollManager { |
michael@0 | 384 | public: |
michael@0 | 385 | class EpollInfo { |
michael@0 | 386 | public: |
michael@0 | 387 | typedef struct epoll_event Events; |
michael@0 | 388 | typedef std::map<int, Events> EpollEventsMap; |
michael@0 | 389 | typedef EpollEventsMap::iterator iterator; |
michael@0 | 390 | typedef EpollEventsMap::const_iterator const_iterator; |
michael@0 | 391 | |
michael@0 | 392 | EpollInfo(): mBackSize(0) {} |
michael@0 | 393 | EpollInfo(int aBackSize): mBackSize(aBackSize) {} |
michael@0 | 394 | EpollInfo(const EpollInfo &aOther): mEvents(aOther.mEvents) |
michael@0 | 395 | , mBackSize(aOther.mBackSize) { |
michael@0 | 396 | } |
michael@0 | 397 | ~EpollInfo() { |
michael@0 | 398 | mEvents.clear(); |
michael@0 | 399 | } |
michael@0 | 400 | |
michael@0 | 401 | void AddEvents(int aFd, Events &aEvents) { |
michael@0 | 402 | std::pair<iterator, bool> pair = |
michael@0 | 403 | mEvents.insert(std::make_pair(aFd, aEvents)); |
michael@0 | 404 | if (!pair.second) { |
michael@0 | 405 | abort(); |
michael@0 | 406 | } |
michael@0 | 407 | } |
michael@0 | 408 | |
michael@0 | 409 | void RemoveEvents(int aFd) { |
michael@0 | 410 | if (!mEvents.erase(aFd)) { |
michael@0 | 411 | abort(); |
michael@0 | 412 | } |
michael@0 | 413 | } |
michael@0 | 414 | |
michael@0 | 415 | void ModifyEvents(int aFd, Events &aEvents) { |
michael@0 | 416 | iterator it = mEvents.find(aFd); |
michael@0 | 417 | if (it == mEvents.end()) { |
michael@0 | 418 | abort(); |
michael@0 | 419 | } |
michael@0 | 420 | it->second = aEvents; |
michael@0 | 421 | } |
michael@0 | 422 | |
michael@0 | 423 | const Events &FindEvents(int aFd) const { |
michael@0 | 424 | const_iterator it = mEvents.find(aFd); |
michael@0 | 425 | if (it == mEvents.end()) { |
michael@0 | 426 | abort(); |
michael@0 | 427 | } |
michael@0 | 428 | return it->second; |
michael@0 | 429 | } |
michael@0 | 430 | |
michael@0 | 431 | int Size() const { return mEvents.size(); } |
michael@0 | 432 | |
michael@0 | 433 | // Iterator with values of <fd, Events> pairs. |
michael@0 | 434 | const_iterator begin() const { return mEvents.begin(); } |
michael@0 | 435 | const_iterator end() const { return mEvents.end(); } |
michael@0 | 436 | |
michael@0 | 437 | int BackSize() const { return mBackSize; } |
michael@0 | 438 | |
michael@0 | 439 | private: |
michael@0 | 440 | EpollEventsMap mEvents; |
michael@0 | 441 | int mBackSize; |
michael@0 | 442 | |
michael@0 | 443 | friend class EpollManager; |
michael@0 | 444 | }; |
michael@0 | 445 | |
michael@0 | 446 | typedef std::map<int, EpollInfo> EpollInfoMap; |
michael@0 | 447 | typedef EpollInfoMap::iterator iterator; |
michael@0 | 448 | typedef EpollInfoMap::const_iterator const_iterator; |
michael@0 | 449 | |
michael@0 | 450 | public: |
michael@0 | 451 | void AddEpollInfo(int aEpollFd, int aBackSize) { |
michael@0 | 452 | EpollInfo *oldinfo = FindEpollInfo(aEpollFd); |
michael@0 | 453 | if (oldinfo != nullptr) { |
michael@0 | 454 | abort(); |
michael@0 | 455 | } |
michael@0 | 456 | mEpollFdsInfo[aEpollFd] = EpollInfo(aBackSize); |
michael@0 | 457 | } |
michael@0 | 458 | |
michael@0 | 459 | EpollInfo *FindEpollInfo(int aEpollFd) { |
michael@0 | 460 | iterator it = mEpollFdsInfo.find(aEpollFd); |
michael@0 | 461 | if (it == mEpollFdsInfo.end()) { |
michael@0 | 462 | return nullptr; |
michael@0 | 463 | } |
michael@0 | 464 | return &it->second; |
michael@0 | 465 | } |
michael@0 | 466 | |
michael@0 | 467 | void RemoveEpollInfo(int aEpollFd) { |
michael@0 | 468 | if (!mEpollFdsInfo.erase(aEpollFd)) { |
michael@0 | 469 | abort(); |
michael@0 | 470 | } |
michael@0 | 471 | } |
michael@0 | 472 | |
michael@0 | 473 | int Size() const { return mEpollFdsInfo.size(); } |
michael@0 | 474 | |
michael@0 | 475 | // Iterator of <epollfd, EpollInfo> pairs. |
michael@0 | 476 | const_iterator begin() const { return mEpollFdsInfo.begin(); } |
michael@0 | 477 | const_iterator end() const { return mEpollFdsInfo.end(); } |
michael@0 | 478 | |
michael@0 | 479 | static EpollManager *Singleton() { |
michael@0 | 480 | if (!sInstance) { |
michael@0 | 481 | sInstance = new EpollManager(); |
michael@0 | 482 | } |
michael@0 | 483 | return sInstance; |
michael@0 | 484 | } |
michael@0 | 485 | |
michael@0 | 486 | static void Shutdown() { |
michael@0 | 487 | if (!sInstance) { |
michael@0 | 488 | abort(); |
michael@0 | 489 | } |
michael@0 | 490 | |
michael@0 | 491 | delete sInstance; |
michael@0 | 492 | sInstance = nullptr; |
michael@0 | 493 | } |
michael@0 | 494 | |
michael@0 | 495 | private: |
michael@0 | 496 | static EpollManager *sInstance; |
michael@0 | 497 | ~EpollManager() { |
michael@0 | 498 | mEpollFdsInfo.clear(); |
michael@0 | 499 | } |
michael@0 | 500 | |
michael@0 | 501 | EpollInfoMap mEpollFdsInfo; |
michael@0 | 502 | |
michael@0 | 503 | EpollManager() {} |
michael@0 | 504 | }; |
michael@0 | 505 | |
michael@0 | 506 | EpollManager* EpollManager::sInstance; |
michael@0 | 507 | |
michael@0 | 508 | static thread_info_t * |
michael@0 | 509 | thread_info_new(void) { |
michael@0 | 510 | /* link tinfo to sAllThreads */ |
michael@0 | 511 | thread_info_t *tinfo = new thread_info_t(); |
michael@0 | 512 | tinfo->flags = 0; |
michael@0 | 513 | tinfo->recrFunc = nullptr; |
michael@0 | 514 | tinfo->recrArg = nullptr; |
michael@0 | 515 | tinfo->recreatedThreadID = 0; |
michael@0 | 516 | tinfo->recreatedNativeThreadID = 0; |
michael@0 | 517 | tinfo->reacquireMutex = nullptr; |
michael@0 | 518 | tinfo->stk = malloc(NUWA_STACK_SIZE + getPageSize()); |
michael@0 | 519 | |
michael@0 | 520 | // We use a smaller stack size. Add protection to stack overflow: mprotect() |
michael@0 | 521 | // stack top (the page at the lowest address) so we crash instead of corrupt |
michael@0 | 522 | // other content that is malloc()'d. |
michael@0 | 523 | uintptr_t pageGuard = ceilToPage((uintptr_t)tinfo->stk); |
michael@0 | 524 | mprotect((void*)pageGuard, getPageSize(), PROT_READ); |
michael@0 | 525 | |
michael@0 | 526 | pthread_attr_init(&tinfo->threadAttr); |
michael@0 | 527 | |
michael@0 | 528 | REAL(pthread_mutex_lock)(&sThreadCountLock); |
michael@0 | 529 | // Insert to the tail. |
michael@0 | 530 | sAllThreads.insertBack(tinfo); |
michael@0 | 531 | |
michael@0 | 532 | sThreadCount++; |
michael@0 | 533 | pthread_cond_signal(&sThreadChangeCond); |
michael@0 | 534 | pthread_mutex_unlock(&sThreadCountLock); |
michael@0 | 535 | |
michael@0 | 536 | return tinfo; |
michael@0 | 537 | } |
michael@0 | 538 | |
michael@0 | 539 | static void |
michael@0 | 540 | thread_info_cleanup(void *arg) { |
michael@0 | 541 | if (sNuwaForking) { |
michael@0 | 542 | // We shouldn't have any thread exiting when we are forking a new process. |
michael@0 | 543 | abort(); |
michael@0 | 544 | } |
michael@0 | 545 | |
michael@0 | 546 | thread_info_t *tinfo = (thread_info_t *)arg; |
michael@0 | 547 | pthread_attr_destroy(&tinfo->threadAttr); |
michael@0 | 548 | |
michael@0 | 549 | REAL(pthread_mutex_lock)(&sThreadCountLock); |
michael@0 | 550 | /* unlink tinfo from sAllThreads */ |
michael@0 | 551 | tinfo->remove(); |
michael@0 | 552 | |
michael@0 | 553 | sThreadCount--; |
michael@0 | 554 | pthread_cond_signal(&sThreadChangeCond); |
michael@0 | 555 | pthread_mutex_unlock(&sThreadCountLock); |
michael@0 | 556 | |
michael@0 | 557 | free(tinfo->stk); |
michael@0 | 558 | delete tinfo; |
michael@0 | 559 | } |
michael@0 | 560 | |
michael@0 | 561 | static void * |
michael@0 | 562 | _thread_create_startup(void *arg) { |
michael@0 | 563 | thread_info_t *tinfo = (thread_info_t *)arg; |
michael@0 | 564 | void *r; |
michael@0 | 565 | |
michael@0 | 566 | // Save thread info; especially, stackaddr & stacksize. |
michael@0 | 567 | // Reuse the stack in the new thread. |
michael@0 | 568 | pthread_getattr_np(REAL(pthread_self)(), &tinfo->threadAttr); |
michael@0 | 569 | |
michael@0 | 570 | SET_THREAD_INFO(tinfo); |
michael@0 | 571 | tinfo->origThreadID = REAL(pthread_self)(); |
michael@0 | 572 | tinfo->origNativeThreadID = gettid(); |
michael@0 | 573 | |
michael@0 | 574 | pthread_cleanup_push(thread_info_cleanup, tinfo); |
michael@0 | 575 | |
michael@0 | 576 | r = tinfo->startupFunc(tinfo->startupArg); |
michael@0 | 577 | |
michael@0 | 578 | if (!sIsNuwaProcess) { |
michael@0 | 579 | return r; |
michael@0 | 580 | } |
michael@0 | 581 | |
michael@0 | 582 | pthread_cleanup_pop(1); |
michael@0 | 583 | |
michael@0 | 584 | return r; |
michael@0 | 585 | } |
michael@0 | 586 | |
michael@0 | 587 | // reserve STACK_RESERVED_SZ * 4 bytes for thread_recreate_startup(). |
michael@0 | 588 | #define STACK_RESERVED_SZ 64 |
michael@0 | 589 | #define STACK_SENTINEL(v) ((v)[0]) |
michael@0 | 590 | #define STACK_SENTINEL_VALUE(v) ((uint32_t)(v) ^ 0xdeadbeef) |
michael@0 | 591 | |
michael@0 | 592 | static void * |
michael@0 | 593 | thread_create_startup(void *arg) { |
michael@0 | 594 | /* |
michael@0 | 595 | * Dark Art!! Never try to do the same unless you are ABSOLUTELY sure of |
michael@0 | 596 | * what you are doing! |
michael@0 | 597 | * |
michael@0 | 598 | * This function is here for reserving stack space before calling |
michael@0 | 599 | * _thread_create_startup(). see also thread_create_startup(); |
michael@0 | 600 | */ |
michael@0 | 601 | void *r; |
michael@0 | 602 | volatile uint32_t reserved[STACK_RESERVED_SZ]; |
michael@0 | 603 | |
michael@0 | 604 | // Reserve stack space. |
michael@0 | 605 | STACK_SENTINEL(reserved) = STACK_SENTINEL_VALUE(reserved); |
michael@0 | 606 | |
michael@0 | 607 | r = _thread_create_startup(arg); |
michael@0 | 608 | |
michael@0 | 609 | // Check if the reservation is enough. |
michael@0 | 610 | if (STACK_SENTINEL(reserved) != STACK_SENTINEL_VALUE(reserved)) { |
michael@0 | 611 | abort(); // Did not reserve enough stack space. |
michael@0 | 612 | } |
michael@0 | 613 | |
michael@0 | 614 | thread_info_t *tinfo = CUR_THREAD_INFO; |
michael@0 | 615 | if (!sIsNuwaProcess) { |
michael@0 | 616 | longjmp(tinfo->retEnv, 1); |
michael@0 | 617 | |
michael@0 | 618 | // Never go here! |
michael@0 | 619 | abort(); |
michael@0 | 620 | } |
michael@0 | 621 | |
michael@0 | 622 | return r; |
michael@0 | 623 | } |
michael@0 | 624 | |
michael@0 | 625 | extern "C" MFBT_API int |
michael@0 | 626 | __wrap_pthread_create(pthread_t *thread, |
michael@0 | 627 | const pthread_attr_t *attr, |
michael@0 | 628 | void *(*start_routine) (void *), |
michael@0 | 629 | void *arg) { |
michael@0 | 630 | if (!sIsNuwaProcess) { |
michael@0 | 631 | return REAL(pthread_create)(thread, attr, start_routine, arg); |
michael@0 | 632 | } |
michael@0 | 633 | |
michael@0 | 634 | thread_info_t *tinfo = thread_info_new(); |
michael@0 | 635 | tinfo->startupFunc = start_routine; |
michael@0 | 636 | tinfo->startupArg = arg; |
michael@0 | 637 | pthread_attr_setstack(&tinfo->threadAttr, tinfo->stk, NUWA_STACK_SIZE); |
michael@0 | 638 | |
michael@0 | 639 | int rv = REAL(pthread_create)(thread, |
michael@0 | 640 | &tinfo->threadAttr, |
michael@0 | 641 | thread_create_startup, |
michael@0 | 642 | tinfo); |
michael@0 | 643 | if (rv) { |
michael@0 | 644 | thread_info_cleanup(tinfo); |
michael@0 | 645 | } else { |
michael@0 | 646 | tinfo->origThreadID = *thread; |
michael@0 | 647 | } |
michael@0 | 648 | |
michael@0 | 649 | return rv; |
michael@0 | 650 | } |
michael@0 | 651 | |
michael@0 | 652 | // TLS related |
michael@0 | 653 | |
michael@0 | 654 | /** |
michael@0 | 655 | * Iterates over the existing TLS keys and store the TLS data for the current |
michael@0 | 656 | * thread in tinfo. |
michael@0 | 657 | */ |
michael@0 | 658 | static void |
michael@0 | 659 | SaveTLSInfo(thread_info_t *tinfo) { |
michael@0 | 660 | REAL(pthread_mutex_lock)(&sTLSKeyLock); |
michael@0 | 661 | tinfo->tlsInfo.clear(); |
michael@0 | 662 | for (TLSKeySet::const_iterator it = sTLSKeys.begin(); |
michael@0 | 663 | it != sTLSKeys.end(); |
michael@0 | 664 | it++) { |
michael@0 | 665 | void *value = pthread_getspecific(it->first); |
michael@0 | 666 | if (value == nullptr) { |
michael@0 | 667 | continue; |
michael@0 | 668 | } |
michael@0 | 669 | |
michael@0 | 670 | pthread_key_t key = it->first; |
michael@0 | 671 | tinfo->tlsInfo.push_back(TLSInfoList::value_type(key, value)); |
michael@0 | 672 | } |
michael@0 | 673 | pthread_mutex_unlock(&sTLSKeyLock); |
michael@0 | 674 | } |
michael@0 | 675 | |
michael@0 | 676 | /** |
michael@0 | 677 | * Restores the TLS data for the current thread from tinfo. |
michael@0 | 678 | */ |
michael@0 | 679 | static void |
michael@0 | 680 | RestoreTLSInfo(thread_info_t *tinfo) { |
michael@0 | 681 | for (TLSInfoList::const_iterator it = tinfo->tlsInfo.begin(); |
michael@0 | 682 | it != tinfo->tlsInfo.end(); |
michael@0 | 683 | it++) { |
michael@0 | 684 | pthread_key_t key = it->first; |
michael@0 | 685 | const void *value = it->second; |
michael@0 | 686 | if (pthread_setspecific(key, value)) { |
michael@0 | 687 | abort(); |
michael@0 | 688 | } |
michael@0 | 689 | } |
michael@0 | 690 | |
michael@0 | 691 | SET_THREAD_INFO(tinfo); |
michael@0 | 692 | tinfo->recreatedThreadID = REAL(pthread_self)(); |
michael@0 | 693 | tinfo->recreatedNativeThreadID = gettid(); |
michael@0 | 694 | } |
michael@0 | 695 | |
michael@0 | 696 | extern "C" MFBT_API int |
michael@0 | 697 | __wrap_pthread_key_create(pthread_key_t *key, void (*destructor)(void*)) { |
michael@0 | 698 | int rv = REAL(pthread_key_create)(key, destructor); |
michael@0 | 699 | if (rv != 0) { |
michael@0 | 700 | return rv; |
michael@0 | 701 | } |
michael@0 | 702 | REAL(pthread_mutex_lock)(&sTLSKeyLock); |
michael@0 | 703 | sTLSKeys.insert(TLSKeySet::value_type(*key, destructor)); |
michael@0 | 704 | pthread_mutex_unlock(&sTLSKeyLock); |
michael@0 | 705 | return 0; |
michael@0 | 706 | } |
michael@0 | 707 | |
michael@0 | 708 | extern "C" MFBT_API int |
michael@0 | 709 | __wrap_pthread_key_delete(pthread_key_t key) { |
michael@0 | 710 | if (!sIsNuwaProcess) { |
michael@0 | 711 | return REAL(pthread_key_delete)(key); |
michael@0 | 712 | } |
michael@0 | 713 | int rv = REAL(pthread_key_delete)(key); |
michael@0 | 714 | if (rv != 0) { |
michael@0 | 715 | return rv; |
michael@0 | 716 | } |
michael@0 | 717 | REAL(pthread_mutex_lock)(&sTLSKeyLock); |
michael@0 | 718 | sTLSKeys.erase(key); |
michael@0 | 719 | pthread_mutex_unlock(&sTLSKeyLock); |
michael@0 | 720 | return 0; |
michael@0 | 721 | } |
michael@0 | 722 | |
michael@0 | 723 | extern "C" MFBT_API pthread_t |
michael@0 | 724 | __wrap_pthread_self() { |
michael@0 | 725 | thread_info_t *tinfo = CUR_THREAD_INFO; |
michael@0 | 726 | if (tinfo) { |
michael@0 | 727 | // For recreated thread, masquerade as the original thread in the Nuwa |
michael@0 | 728 | // process. |
michael@0 | 729 | return tinfo->origThreadID; |
michael@0 | 730 | } |
michael@0 | 731 | return REAL(pthread_self)(); |
michael@0 | 732 | } |
michael@0 | 733 | |
michael@0 | 734 | extern "C" MFBT_API int |
michael@0 | 735 | __wrap_pthread_join(pthread_t thread, void **retval) { |
michael@0 | 736 | thread_info_t *tinfo = GetThreadInfo(thread); |
michael@0 | 737 | if (tinfo == nullptr) { |
michael@0 | 738 | return REAL(pthread_join)(thread, retval); |
michael@0 | 739 | } |
michael@0 | 740 | // pthread_join() need to use the real thread ID in the spawned process. |
michael@0 | 741 | return REAL(pthread_join)(tinfo->recreatedThreadID, retval); |
michael@0 | 742 | } |
michael@0 | 743 | |
michael@0 | 744 | /** |
michael@0 | 745 | * The following are used to synchronize between the main thread and the |
michael@0 | 746 | * thread being recreated. The main thread will wait until the thread is woken |
michael@0 | 747 | * up from the freeze points or the blocking intercepted functions and then |
michael@0 | 748 | * proceed to recreate the next frozen thread. |
michael@0 | 749 | * |
michael@0 | 750 | * In thread recreation, the main thread recreates the frozen threads one by |
michael@0 | 751 | * one. The recreated threads will be "gated" until the main thread "opens the |
michael@0 | 752 | * gate" to let them run freely as if they were created from scratch. The VIP |
michael@0 | 753 | * threads gets the chance to run first after their thread stacks are recreated |
michael@0 | 754 | * (using longjmp()) so they can adjust their contexts to a valid, consistent |
michael@0 | 755 | * state. The threads frozen waiting for pthread condition variables are VIP |
michael@0 | 756 | * threads. After woken up they need to run first to make the associated mutex |
michael@0 | 757 | * in a valid state to maintain the semantics of the intercepted function calls |
michael@0 | 758 | * (like pthread_cond_wait()). |
michael@0 | 759 | */ |
michael@0 | 760 | |
michael@0 | 761 | // Used to synchronize the main thread and the thread being recreated so that |
michael@0 | 762 | // only one thread is allowed to be recreated at a time. |
michael@0 | 763 | static pthread_mutex_t sRecreateWaitLock = PTHREAD_MUTEX_INITIALIZER; |
michael@0 | 764 | // Used to block recreated threads until the main thread "opens the gate". |
michael@0 | 765 | static pthread_mutex_t sRecreateGateLock = PTHREAD_MUTEX_INITIALIZER; |
michael@0 | 766 | // Used to block the main thread from "opening the gate" until all VIP threads |
michael@0 | 767 | // have been recreated. |
michael@0 | 768 | static pthread_mutex_t sRecreateVIPGateLock = PTHREAD_MUTEX_INITIALIZER; |
michael@0 | 769 | static pthread_cond_t sRecreateVIPCond = PTHREAD_COND_INITIALIZER; |
michael@0 | 770 | static int sRecreateVIPCount = 0; |
michael@0 | 771 | static int sRecreateGatePassed = 0; |
michael@0 | 772 | |
michael@0 | 773 | /** |
michael@0 | 774 | * Thread recreation macros. |
michael@0 | 775 | * |
michael@0 | 776 | * The following macros are used in the forked process to synchronize and |
michael@0 | 777 | * control the progress of thread recreation. |
michael@0 | 778 | * |
michael@0 | 779 | * 1. RECREATE_START() is first called in the beginning of thread |
michael@0 | 780 | * recreation to set sRecreateWaitLock and sRecreateGateLock in locked |
michael@0 | 781 | * state. |
michael@0 | 782 | * 2. For each frozen thread: |
michael@0 | 783 | * 2.1. RECREATE_BEFORE() to set the thread being recreated. |
michael@0 | 784 | * 2.2. thread_recreate() to recreate the frozen thread. |
michael@0 | 785 | * 2.3. Main thread calls RECREATE_WAIT() to wait on sRecreateWaitLock until |
michael@0 | 786 | * the thread is recreated from the freeze point and calls |
michael@0 | 787 | * RECREATE_CONTINUE() to release sRecreateWaitLock. |
michael@0 | 788 | * 2.3. Non-VIP threads are blocked on RECREATE_GATE(). VIP threads calls |
michael@0 | 789 | * RECREATE_PASS_VIP() to mark that a VIP thread is successfully |
michael@0 | 790 | * recreated and then is blocked by calling RECREATE_GATE_VIP(). |
michael@0 | 791 | * 3. RECREATE_WAIT_ALL_VIP() to wait until all VIP threads passed, that is, |
michael@0 | 792 | * VIP threads already has their contexts (mainly pthread mutex) in a valid |
michael@0 | 793 | * state. |
michael@0 | 794 | * 4. RECREATE_OPEN_GATE() to unblock threads blocked by sRecreateGateLock. |
michael@0 | 795 | * 5. RECREATE_FINISH() to complete thread recreation. |
michael@0 | 796 | */ |
michael@0 | 797 | #define RECREATE_START() \ |
michael@0 | 798 | do { \ |
michael@0 | 799 | REAL(pthread_mutex_lock)(&sRecreateWaitLock); \ |
michael@0 | 800 | REAL(pthread_mutex_lock)(&sRecreateGateLock); \ |
michael@0 | 801 | } while(0) |
michael@0 | 802 | #define RECREATE_BEFORE(info) do { sCurrentRecreatingThread = info; } while(0) |
michael@0 | 803 | #define RECREATE_WAIT() REAL(pthread_mutex_lock)(&sRecreateWaitLock) |
michael@0 | 804 | #define RECREATE_CONTINUE() do { \ |
michael@0 | 805 | RunCustomRecreation(); \ |
michael@0 | 806 | pthread_mutex_unlock(&sRecreateWaitLock); \ |
michael@0 | 807 | } while(0) |
michael@0 | 808 | #define RECREATE_FINISH() pthread_mutex_unlock(&sRecreateWaitLock) |
michael@0 | 809 | #define RECREATE_GATE() \ |
michael@0 | 810 | do { \ |
michael@0 | 811 | REAL(pthread_mutex_lock)(&sRecreateGateLock); \ |
michael@0 | 812 | sRecreateGatePassed++; \ |
michael@0 | 813 | pthread_mutex_unlock(&sRecreateGateLock); \ |
michael@0 | 814 | } while(0) |
michael@0 | 815 | #define RECREATE_OPEN_GATE() pthread_mutex_unlock(&sRecreateGateLock) |
michael@0 | 816 | #define RECREATE_GATE_VIP() \ |
michael@0 | 817 | do { \ |
michael@0 | 818 | REAL(pthread_mutex_lock)(&sRecreateGateLock); \ |
michael@0 | 819 | pthread_mutex_unlock(&sRecreateGateLock); \ |
michael@0 | 820 | } while(0) |
michael@0 | 821 | #define RECREATE_PASS_VIP() \ |
michael@0 | 822 | do { \ |
michael@0 | 823 | REAL(pthread_mutex_lock)(&sRecreateVIPGateLock); \ |
michael@0 | 824 | sRecreateGatePassed++; \ |
michael@0 | 825 | pthread_cond_signal(&sRecreateVIPCond); \ |
michael@0 | 826 | pthread_mutex_unlock(&sRecreateVIPGateLock); \ |
michael@0 | 827 | } while(0) |
michael@0 | 828 | #define RECREATE_WAIT_ALL_VIP() \ |
michael@0 | 829 | do { \ |
michael@0 | 830 | REAL(pthread_mutex_lock)(&sRecreateVIPGateLock); \ |
michael@0 | 831 | while(sRecreateGatePassed < sRecreateVIPCount) { \ |
michael@0 | 832 | REAL(pthread_cond_wait)(&sRecreateVIPCond, \ |
michael@0 | 833 | &sRecreateVIPGateLock); \ |
michael@0 | 834 | } \ |
michael@0 | 835 | pthread_mutex_unlock(&sRecreateVIPGateLock); \ |
michael@0 | 836 | } while(0) |
michael@0 | 837 | |
michael@0 | 838 | /** |
michael@0 | 839 | * Thread freeze points. Note that the freeze points are implemented as macros |
michael@0 | 840 | * so as not to garble the content of the stack after setjmp(). |
michael@0 | 841 | * |
michael@0 | 842 | * In the nuwa process, when a thread supporting nuwa calls a wrapper |
michael@0 | 843 | * function, freeze point 1 setjmp()s to save the state. We only allow the |
michael@0 | 844 | * thread to be frozen in the wrapper functions. If thread freezing is not |
michael@0 | 845 | * enabled yet, the wrapper functions act like their wrapped counterparts, |
michael@0 | 846 | * except for the extra actions in the freeze points. If thread freezing is |
michael@0 | 847 | * enabled, the thread will be frozen by calling one of the wrapper functions. |
michael@0 | 848 | * The threads can be frozen in any of the following points: |
michael@0 | 849 | * |
michael@0 | 850 | * 1) Freeze point 1: this is the point where we setjmp() in the nuwa process |
michael@0 | 851 | * and longjmp() in the spawned process. If freezing is enabled, then the |
michael@0 | 852 | * current thread blocks by acquiring an already locked mutex, |
michael@0 | 853 | * sThreadFreezeLock. |
michael@0 | 854 | * 2) The wrapped function: the function that might block waiting for some |
michael@0 | 855 | * resource or condition. |
michael@0 | 856 | * 3) Freeze point 2: blocks the current thread by acquiring sThreadFreezeLock. |
michael@0 | 857 | * If freezing is not enabled then revert the counter change in freeze |
michael@0 | 858 | * point 1. |
michael@0 | 859 | */ |
michael@0 | 860 | #define THREAD_FREEZE_POINT1() \ |
michael@0 | 861 | bool freezeCountChg = false; \ |
michael@0 | 862 | bool recreated = false; \ |
michael@0 | 863 | volatile bool freezePoint2 = false; \ |
michael@0 | 864 | thread_info_t *tinfo; \ |
michael@0 | 865 | if (sIsNuwaProcess && \ |
michael@0 | 866 | (tinfo = CUR_THREAD_INFO) && \ |
michael@0 | 867 | (tinfo->flags & TINFO_FLAG_NUWA_SUPPORT) && \ |
michael@0 | 868 | !(tinfo->flags & TINFO_FLAG_NUWA_EXPLICIT_CHECKPOINT)) { \ |
michael@0 | 869 | if (!setjmp(tinfo->jmpEnv)) { \ |
michael@0 | 870 | REAL(pthread_mutex_lock)(&sThreadCountLock); \ |
michael@0 | 871 | SaveTLSInfo(tinfo); \ |
michael@0 | 872 | sThreadFreezeCount++; \ |
michael@0 | 873 | freezeCountChg = true; \ |
michael@0 | 874 | pthread_cond_signal(&sThreadChangeCond); \ |
michael@0 | 875 | pthread_mutex_unlock(&sThreadCountLock); \ |
michael@0 | 876 | \ |
michael@0 | 877 | if (sIsFreezing) { \ |
michael@0 | 878 | REAL(pthread_mutex_lock)(&sThreadFreezeLock); \ |
michael@0 | 879 | /* Never return from the pthread_mutex_lock() call. */ \ |
michael@0 | 880 | abort(); \ |
michael@0 | 881 | } \ |
michael@0 | 882 | } else { \ |
michael@0 | 883 | RECREATE_CONTINUE(); \ |
michael@0 | 884 | RECREATE_GATE(); \ |
michael@0 | 885 | freezeCountChg = false; \ |
michael@0 | 886 | recreated = true; \ |
michael@0 | 887 | } \ |
michael@0 | 888 | } |
michael@0 | 889 | |
michael@0 | 890 | #define THREAD_FREEZE_POINT1_VIP() \ |
michael@0 | 891 | bool freezeCountChg = false; \ |
michael@0 | 892 | bool recreated = false; \ |
michael@0 | 893 | volatile bool freezePoint1 = false; \ |
michael@0 | 894 | volatile bool freezePoint2 = false; \ |
michael@0 | 895 | thread_info_t *tinfo; \ |
michael@0 | 896 | if (sIsNuwaProcess && \ |
michael@0 | 897 | (tinfo = CUR_THREAD_INFO) && \ |
michael@0 | 898 | (tinfo->flags & TINFO_FLAG_NUWA_SUPPORT) && \ |
michael@0 | 899 | !(tinfo->flags & TINFO_FLAG_NUWA_EXPLICIT_CHECKPOINT)) { \ |
michael@0 | 900 | if (!setjmp(tinfo->jmpEnv)) { \ |
michael@0 | 901 | REAL(pthread_mutex_lock)(&sThreadCountLock); \ |
michael@0 | 902 | SaveTLSInfo(tinfo); \ |
michael@0 | 903 | sThreadFreezeCount++; \ |
michael@0 | 904 | sRecreateVIPCount++; \ |
michael@0 | 905 | freezeCountChg = true; \ |
michael@0 | 906 | pthread_cond_signal(&sThreadChangeCond); \ |
michael@0 | 907 | pthread_mutex_unlock(&sThreadCountLock); \ |
michael@0 | 908 | \ |
michael@0 | 909 | if (sIsFreezing) { \ |
michael@0 | 910 | freezePoint1 = true; \ |
michael@0 | 911 | REAL(pthread_mutex_lock)(&sThreadFreezeLock); \ |
michael@0 | 912 | /* Never return from the pthread_mutex_lock() call. */ \ |
michael@0 | 913 | abort(); \ |
michael@0 | 914 | } \ |
michael@0 | 915 | } else { \ |
michael@0 | 916 | freezeCountChg = false; \ |
michael@0 | 917 | recreated = true; \ |
michael@0 | 918 | } \ |
michael@0 | 919 | } |
michael@0 | 920 | |
michael@0 | 921 | #define THREAD_FREEZE_POINT2() \ |
michael@0 | 922 | if (freezeCountChg) { \ |
michael@0 | 923 | REAL(pthread_mutex_lock)(&sThreadCountLock); \ |
michael@0 | 924 | if (sNuwaReady && sIsNuwaProcess) { \ |
michael@0 | 925 | pthread_mutex_unlock(&sThreadCountLock); \ |
michael@0 | 926 | freezePoint2 = true; \ |
michael@0 | 927 | REAL(pthread_mutex_lock)(&sThreadFreezeLock); \ |
michael@0 | 928 | /* Never return from the pthread_mutex_lock() call. */ \ |
michael@0 | 929 | abort(); \ |
michael@0 | 930 | } \ |
michael@0 | 931 | sThreadFreezeCount--; \ |
michael@0 | 932 | pthread_cond_signal(&sThreadChangeCond); \ |
michael@0 | 933 | pthread_mutex_unlock(&sThreadCountLock); \ |
michael@0 | 934 | } |
michael@0 | 935 | |
michael@0 | 936 | #define THREAD_FREEZE_POINT2_VIP() \ |
michael@0 | 937 | if (freezeCountChg) { \ |
michael@0 | 938 | REAL(pthread_mutex_lock)(&sThreadCountLock); \ |
michael@0 | 939 | if (sNuwaReady && sIsNuwaProcess) { \ |
michael@0 | 940 | pthread_mutex_unlock(&sThreadCountLock); \ |
michael@0 | 941 | freezePoint2 = true; \ |
michael@0 | 942 | REAL(pthread_mutex_lock)(&sThreadFreezeLock); \ |
michael@0 | 943 | /* Never return from the pthread_mutex_lock() call. */ \ |
michael@0 | 944 | abort(); \ |
michael@0 | 945 | } \ |
michael@0 | 946 | sThreadFreezeCount--; \ |
michael@0 | 947 | sRecreateVIPCount--; \ |
michael@0 | 948 | pthread_cond_signal(&sThreadChangeCond); \ |
michael@0 | 949 | pthread_mutex_unlock(&sThreadCountLock); \ |
michael@0 | 950 | } |
michael@0 | 951 | |
michael@0 | 952 | /** |
michael@0 | 953 | * Wrapping the blocking functions: epoll_wait(), poll(), pthread_mutex_lock(), |
michael@0 | 954 | * pthread_cond_wait() and pthread_cond_timedwait(): |
michael@0 | 955 | * |
michael@0 | 956 | * These functions are wrapped by the above freeze point macros. Once a new |
michael@0 | 957 | * process is forked, the recreated thread will be blocked in one of the wrapper |
michael@0 | 958 | * functions. When recreating the thread, we longjmp() to |
michael@0 | 959 | * THREAD_FREEZE_POINT1() to recover the thread stack. Care must be taken to |
michael@0 | 960 | * maintain the semantics of the wrapped function: |
michael@0 | 961 | * |
michael@0 | 962 | * - epoll_wait() and poll(): just retry the function. |
michael@0 | 963 | * - pthread_mutex_lock(): don't lock if frozen at freeze point 2 (lock is |
michael@0 | 964 | * already acquired). |
michael@0 | 965 | * - pthread_cond_wait() and pthread_cond_timedwait(): if the thread is frozen |
michael@0 | 966 | * waiting the condition variable, the mutex is already released, we need to |
michael@0 | 967 | * reacquire the mutex before calling the wrapped function again so the mutex |
michael@0 | 968 | * will be in a valid state. |
michael@0 | 969 | */ |
michael@0 | 970 | |
michael@0 | 971 | extern "C" MFBT_API int |
michael@0 | 972 | __wrap_epoll_wait(int epfd, |
michael@0 | 973 | struct epoll_event *events, |
michael@0 | 974 | int maxevents, |
michael@0 | 975 | int timeout) { |
michael@0 | 976 | int rv; |
michael@0 | 977 | |
michael@0 | 978 | THREAD_FREEZE_POINT1(); |
michael@0 | 979 | rv = REAL(epoll_wait)(epfd, events, maxevents, timeout); |
michael@0 | 980 | THREAD_FREEZE_POINT2(); |
michael@0 | 981 | |
michael@0 | 982 | return rv; |
michael@0 | 983 | } |
michael@0 | 984 | |
michael@0 | 985 | extern "C" MFBT_API int |
michael@0 | 986 | __wrap_pthread_cond_wait(pthread_cond_t *cond, |
michael@0 | 987 | pthread_mutex_t *mtx) { |
michael@0 | 988 | int rv = 0; |
michael@0 | 989 | |
michael@0 | 990 | THREAD_FREEZE_POINT1_VIP(); |
michael@0 | 991 | if (freezePoint2) { |
michael@0 | 992 | RECREATE_CONTINUE(); |
michael@0 | 993 | RECREATE_PASS_VIP(); |
michael@0 | 994 | RECREATE_GATE_VIP(); |
michael@0 | 995 | return rv; |
michael@0 | 996 | } |
michael@0 | 997 | if (recreated && mtx) { |
michael@0 | 998 | if (!freezePoint1 && pthread_mutex_trylock(mtx)) { |
michael@0 | 999 | // The thread was frozen in pthread_cond_wait() after releasing mtx in the |
michael@0 | 1000 | // Nuwa process. In recreating this thread, We failed to reacquire mtx |
michael@0 | 1001 | // with the pthread_mutex_trylock() call, that is, mtx was acquired by |
michael@0 | 1002 | // another thread. Because of this, we need the main thread's help to |
michael@0 | 1003 | // reacquire mtx so that it will be in a valid state. |
michael@0 | 1004 | tinfo->reacquireMutex = mtx; |
michael@0 | 1005 | } |
michael@0 | 1006 | RECREATE_CONTINUE(); |
michael@0 | 1007 | RECREATE_PASS_VIP(); |
michael@0 | 1008 | } |
michael@0 | 1009 | rv = REAL(pthread_cond_wait)(cond, mtx); |
michael@0 | 1010 | if (recreated && mtx) { |
michael@0 | 1011 | // We still need to be gated as not to acquire another mutex associated with |
michael@0 | 1012 | // another VIP thread and interfere with it. |
michael@0 | 1013 | RECREATE_GATE_VIP(); |
michael@0 | 1014 | } |
michael@0 | 1015 | THREAD_FREEZE_POINT2_VIP(); |
michael@0 | 1016 | |
michael@0 | 1017 | return rv; |
michael@0 | 1018 | } |
michael@0 | 1019 | |
michael@0 | 1020 | extern "C" MFBT_API int |
michael@0 | 1021 | __wrap_pthread_cond_timedwait(pthread_cond_t *cond, |
michael@0 | 1022 | pthread_mutex_t *mtx, |
michael@0 | 1023 | const struct timespec *abstime) { |
michael@0 | 1024 | int rv = 0; |
michael@0 | 1025 | |
michael@0 | 1026 | THREAD_FREEZE_POINT1_VIP(); |
michael@0 | 1027 | if (freezePoint2) { |
michael@0 | 1028 | RECREATE_CONTINUE(); |
michael@0 | 1029 | RECREATE_PASS_VIP(); |
michael@0 | 1030 | RECREATE_GATE_VIP(); |
michael@0 | 1031 | return rv; |
michael@0 | 1032 | } |
michael@0 | 1033 | if (recreated && mtx) { |
michael@0 | 1034 | if (!freezePoint1 && pthread_mutex_trylock(mtx)) { |
michael@0 | 1035 | tinfo->reacquireMutex = mtx; |
michael@0 | 1036 | } |
michael@0 | 1037 | RECREATE_CONTINUE(); |
michael@0 | 1038 | RECREATE_PASS_VIP(); |
michael@0 | 1039 | } |
michael@0 | 1040 | rv = REAL(pthread_cond_timedwait)(cond, mtx, abstime); |
michael@0 | 1041 | if (recreated && mtx) { |
michael@0 | 1042 | RECREATE_GATE_VIP(); |
michael@0 | 1043 | } |
michael@0 | 1044 | THREAD_FREEZE_POINT2_VIP(); |
michael@0 | 1045 | |
michael@0 | 1046 | return rv; |
michael@0 | 1047 | } |
michael@0 | 1048 | |
michael@0 | 1049 | extern "C" int __pthread_cond_timedwait(pthread_cond_t *cond, |
michael@0 | 1050 | pthread_mutex_t *mtx, |
michael@0 | 1051 | const struct timespec *abstime, |
michael@0 | 1052 | clockid_t clock); |
michael@0 | 1053 | |
michael@0 | 1054 | extern "C" MFBT_API int |
michael@0 | 1055 | __wrap___pthread_cond_timedwait(pthread_cond_t *cond, |
michael@0 | 1056 | pthread_mutex_t *mtx, |
michael@0 | 1057 | const struct timespec *abstime, |
michael@0 | 1058 | clockid_t clock) { |
michael@0 | 1059 | int rv = 0; |
michael@0 | 1060 | |
michael@0 | 1061 | THREAD_FREEZE_POINT1_VIP(); |
michael@0 | 1062 | if (freezePoint2) { |
michael@0 | 1063 | RECREATE_CONTINUE(); |
michael@0 | 1064 | RECREATE_PASS_VIP(); |
michael@0 | 1065 | RECREATE_GATE_VIP(); |
michael@0 | 1066 | return rv; |
michael@0 | 1067 | } |
michael@0 | 1068 | if (recreated && mtx) { |
michael@0 | 1069 | if (!freezePoint1 && pthread_mutex_trylock(mtx)) { |
michael@0 | 1070 | tinfo->reacquireMutex = mtx; |
michael@0 | 1071 | } |
michael@0 | 1072 | RECREATE_CONTINUE(); |
michael@0 | 1073 | RECREATE_PASS_VIP(); |
michael@0 | 1074 | } |
michael@0 | 1075 | rv = REAL(__pthread_cond_timedwait)(cond, mtx, abstime, clock); |
michael@0 | 1076 | if (recreated && mtx) { |
michael@0 | 1077 | RECREATE_GATE_VIP(); |
michael@0 | 1078 | } |
michael@0 | 1079 | THREAD_FREEZE_POINT2_VIP(); |
michael@0 | 1080 | |
michael@0 | 1081 | return rv; |
michael@0 | 1082 | } |
michael@0 | 1083 | |
michael@0 | 1084 | extern "C" MFBT_API int |
michael@0 | 1085 | __wrap_pthread_mutex_lock(pthread_mutex_t *mtx) { |
michael@0 | 1086 | int rv = 0; |
michael@0 | 1087 | |
michael@0 | 1088 | THREAD_FREEZE_POINT1(); |
michael@0 | 1089 | if (freezePoint2) { |
michael@0 | 1090 | return rv; |
michael@0 | 1091 | } |
michael@0 | 1092 | rv = REAL(pthread_mutex_lock)(mtx); |
michael@0 | 1093 | THREAD_FREEZE_POINT2(); |
michael@0 | 1094 | |
michael@0 | 1095 | return rv; |
michael@0 | 1096 | } |
michael@0 | 1097 | |
michael@0 | 1098 | extern "C" MFBT_API int |
michael@0 | 1099 | __wrap_poll(struct pollfd *fds, nfds_t nfds, int timeout) { |
michael@0 | 1100 | int rv; |
michael@0 | 1101 | |
michael@0 | 1102 | THREAD_FREEZE_POINT1(); |
michael@0 | 1103 | rv = REAL(poll)(fds, nfds, timeout); |
michael@0 | 1104 | THREAD_FREEZE_POINT2(); |
michael@0 | 1105 | |
michael@0 | 1106 | return rv; |
michael@0 | 1107 | } |
michael@0 | 1108 | |
michael@0 | 1109 | extern "C" MFBT_API int |
michael@0 | 1110 | __wrap_epoll_create(int size) { |
michael@0 | 1111 | int epollfd = REAL(epoll_create)(size); |
michael@0 | 1112 | |
michael@0 | 1113 | if (!sIsNuwaProcess) { |
michael@0 | 1114 | return epollfd; |
michael@0 | 1115 | } |
michael@0 | 1116 | |
michael@0 | 1117 | if (epollfd >= 0) { |
michael@0 | 1118 | EpollManager::Singleton()->AddEpollInfo(epollfd, size); |
michael@0 | 1119 | } |
michael@0 | 1120 | |
michael@0 | 1121 | return epollfd; |
michael@0 | 1122 | } |
michael@0 | 1123 | |
michael@0 | 1124 | /** |
michael@0 | 1125 | * Wrapping the functions to create file descriptor pairs. In the child process |
michael@0 | 1126 | * FD pairs are created for intra-process signaling. The generation of FD pairs |
michael@0 | 1127 | * need to be tracked in the nuwa process so they can be recreated in the |
michael@0 | 1128 | * spawned process. |
michael@0 | 1129 | */ |
michael@0 | 1130 | struct FdPairInfo { |
michael@0 | 1131 | enum { |
michael@0 | 1132 | kPipe, |
michael@0 | 1133 | kSocketpair |
michael@0 | 1134 | } call; |
michael@0 | 1135 | |
michael@0 | 1136 | int FDs[2]; |
michael@0 | 1137 | int flags; |
michael@0 | 1138 | int domain; |
michael@0 | 1139 | int type; |
michael@0 | 1140 | int protocol; |
michael@0 | 1141 | }; |
michael@0 | 1142 | |
michael@0 | 1143 | /** |
michael@0 | 1144 | * Protects the access to sSingalFds. |
michael@0 | 1145 | */ |
michael@0 | 1146 | static pthread_mutex_t sSignalFdLock = PTHREAD_MUTEX_INITIALIZER; |
michael@0 | 1147 | static std::vector<FdPairInfo> sSignalFds; |
michael@0 | 1148 | |
michael@0 | 1149 | extern "C" MFBT_API int |
michael@0 | 1150 | __wrap_socketpair(int domain, int type, int protocol, int sv[2]) |
michael@0 | 1151 | { |
michael@0 | 1152 | int rv = REAL(socketpair)(domain, type, protocol, sv); |
michael@0 | 1153 | |
michael@0 | 1154 | if (!sIsNuwaProcess || rv < 0) { |
michael@0 | 1155 | return rv; |
michael@0 | 1156 | } |
michael@0 | 1157 | |
michael@0 | 1158 | REAL(pthread_mutex_lock)(&sSignalFdLock); |
michael@0 | 1159 | FdPairInfo signalFd; |
michael@0 | 1160 | signalFd.call = FdPairInfo::kSocketpair; |
michael@0 | 1161 | signalFd.FDs[0] = sv[0]; |
michael@0 | 1162 | signalFd.FDs[1] = sv[1]; |
michael@0 | 1163 | signalFd.domain = domain; |
michael@0 | 1164 | signalFd.type = type; |
michael@0 | 1165 | signalFd.protocol = protocol; |
michael@0 | 1166 | |
michael@0 | 1167 | sSignalFds.push_back(signalFd); |
michael@0 | 1168 | pthread_mutex_unlock(&sSignalFdLock); |
michael@0 | 1169 | |
michael@0 | 1170 | return rv; |
michael@0 | 1171 | } |
michael@0 | 1172 | |
michael@0 | 1173 | extern "C" MFBT_API int |
michael@0 | 1174 | __wrap_pipe2(int __pipedes[2], int flags) |
michael@0 | 1175 | { |
michael@0 | 1176 | int rv = REAL(pipe2)(__pipedes, flags); |
michael@0 | 1177 | if (!sIsNuwaProcess || rv < 0) { |
michael@0 | 1178 | return rv; |
michael@0 | 1179 | } |
michael@0 | 1180 | |
michael@0 | 1181 | REAL(pthread_mutex_lock)(&sSignalFdLock); |
michael@0 | 1182 | FdPairInfo signalFd; |
michael@0 | 1183 | signalFd.call = FdPairInfo::kPipe; |
michael@0 | 1184 | signalFd.FDs[0] = __pipedes[0]; |
michael@0 | 1185 | signalFd.FDs[1] = __pipedes[1]; |
michael@0 | 1186 | signalFd.flags = flags; |
michael@0 | 1187 | sSignalFds.push_back(signalFd); |
michael@0 | 1188 | pthread_mutex_unlock(&sSignalFdLock); |
michael@0 | 1189 | return rv; |
michael@0 | 1190 | } |
michael@0 | 1191 | |
michael@0 | 1192 | extern "C" MFBT_API int |
michael@0 | 1193 | __wrap_pipe(int __pipedes[2]) |
michael@0 | 1194 | { |
michael@0 | 1195 | return __wrap_pipe2(__pipedes, 0); |
michael@0 | 1196 | } |
michael@0 | 1197 | |
michael@0 | 1198 | static void |
michael@0 | 1199 | DupeSingleFd(int newFd, int origFd) |
michael@0 | 1200 | { |
michael@0 | 1201 | struct stat sb; |
michael@0 | 1202 | if (fstat(origFd, &sb)) { |
michael@0 | 1203 | // Maybe the original FD is closed. |
michael@0 | 1204 | return; |
michael@0 | 1205 | } |
michael@0 | 1206 | int fd = fcntl(origFd, F_GETFD); |
michael@0 | 1207 | int fl = fcntl(origFd, F_GETFL); |
michael@0 | 1208 | dup2(newFd, origFd); |
michael@0 | 1209 | fcntl(origFd, F_SETFD, fd); |
michael@0 | 1210 | fcntl(origFd, F_SETFL, fl); |
michael@0 | 1211 | REAL(close)(newFd); |
michael@0 | 1212 | } |
michael@0 | 1213 | |
michael@0 | 1214 | extern "C" MFBT_API void |
michael@0 | 1215 | ReplaceSignalFds() |
michael@0 | 1216 | { |
michael@0 | 1217 | for (std::vector<FdPairInfo>::iterator it = sSignalFds.begin(); |
michael@0 | 1218 | it < sSignalFds.end(); ++it) { |
michael@0 | 1219 | int fds[2]; |
michael@0 | 1220 | int rc = 0; |
michael@0 | 1221 | switch (it->call) { |
michael@0 | 1222 | case FdPairInfo::kPipe: |
michael@0 | 1223 | rc = REAL(pipe2)(fds, it->flags); |
michael@0 | 1224 | break; |
michael@0 | 1225 | case FdPairInfo::kSocketpair: |
michael@0 | 1226 | rc = REAL(socketpair)(it->domain, it->type, it->protocol, fds); |
michael@0 | 1227 | break; |
michael@0 | 1228 | default: |
michael@0 | 1229 | continue; |
michael@0 | 1230 | } |
michael@0 | 1231 | |
michael@0 | 1232 | if (rc == 0) { |
michael@0 | 1233 | DupeSingleFd(fds[0], it->FDs[0]); |
michael@0 | 1234 | DupeSingleFd(fds[1], it->FDs[1]); |
michael@0 | 1235 | } |
michael@0 | 1236 | } |
michael@0 | 1237 | } |
michael@0 | 1238 | |
michael@0 | 1239 | extern "C" MFBT_API int |
michael@0 | 1240 | __wrap_epoll_ctl(int aEpollFd, int aOp, int aFd, struct epoll_event *aEvent) { |
michael@0 | 1241 | int rv = REAL(epoll_ctl)(aEpollFd, aOp, aFd, aEvent); |
michael@0 | 1242 | |
michael@0 | 1243 | if (!sIsNuwaProcess || rv == -1) { |
michael@0 | 1244 | return rv; |
michael@0 | 1245 | } |
michael@0 | 1246 | |
michael@0 | 1247 | EpollManager::EpollInfo *info = |
michael@0 | 1248 | EpollManager::Singleton()->FindEpollInfo(aEpollFd); |
michael@0 | 1249 | if (info == nullptr) { |
michael@0 | 1250 | abort(); |
michael@0 | 1251 | } |
michael@0 | 1252 | |
michael@0 | 1253 | switch(aOp) { |
michael@0 | 1254 | case EPOLL_CTL_ADD: |
michael@0 | 1255 | info->AddEvents(aFd, *aEvent); |
michael@0 | 1256 | break; |
michael@0 | 1257 | |
michael@0 | 1258 | case EPOLL_CTL_MOD: |
michael@0 | 1259 | info->ModifyEvents(aFd, *aEvent); |
michael@0 | 1260 | break; |
michael@0 | 1261 | |
michael@0 | 1262 | case EPOLL_CTL_DEL: |
michael@0 | 1263 | info->RemoveEvents(aFd); |
michael@0 | 1264 | break; |
michael@0 | 1265 | |
michael@0 | 1266 | default: |
michael@0 | 1267 | abort(); |
michael@0 | 1268 | } |
michael@0 | 1269 | |
michael@0 | 1270 | return rv; |
michael@0 | 1271 | } |
michael@0 | 1272 | |
michael@0 | 1273 | // XXX: thinker: Maybe, we should also track dup, dup2, and other functions. |
michael@0 | 1274 | extern "C" MFBT_API int |
michael@0 | 1275 | __wrap_close(int aFd) { |
michael@0 | 1276 | int rv = REAL(close)(aFd); |
michael@0 | 1277 | if (!sIsNuwaProcess || rv == -1) { |
michael@0 | 1278 | return rv; |
michael@0 | 1279 | } |
michael@0 | 1280 | |
michael@0 | 1281 | EpollManager::EpollInfo *info = |
michael@0 | 1282 | EpollManager::Singleton()->FindEpollInfo(aFd); |
michael@0 | 1283 | if (info) { |
michael@0 | 1284 | EpollManager::Singleton()->RemoveEpollInfo(aFd); |
michael@0 | 1285 | } |
michael@0 | 1286 | |
michael@0 | 1287 | return rv; |
michael@0 | 1288 | } |
michael@0 | 1289 | |
michael@0 | 1290 | extern "C" MFBT_API int |
michael@0 | 1291 | __wrap_tgkill(pid_t tgid, pid_t tid, int signalno) |
michael@0 | 1292 | { |
michael@0 | 1293 | if (sIsNuwaProcess) { |
michael@0 | 1294 | return tgkill(tgid, tid, signalno); |
michael@0 | 1295 | } |
michael@0 | 1296 | |
michael@0 | 1297 | if (tid == sMainThread.origNativeThreadID) { |
michael@0 | 1298 | return tgkill(tgid, sMainThread.recreatedNativeThreadID, signalno); |
michael@0 | 1299 | } |
michael@0 | 1300 | |
michael@0 | 1301 | thread_info_t *tinfo = (tid == sMainThread.origNativeThreadID ? |
michael@0 | 1302 | &sMainThread : |
michael@0 | 1303 | GetThreadInfo(tid)); |
michael@0 | 1304 | if (!tinfo) { |
michael@0 | 1305 | return tgkill(tgid, tid, signalno); |
michael@0 | 1306 | } |
michael@0 | 1307 | |
michael@0 | 1308 | return tgkill(tgid, tinfo->recreatedNativeThreadID, signalno); |
michael@0 | 1309 | } |
michael@0 | 1310 | |
michael@0 | 1311 | static void * |
michael@0 | 1312 | thread_recreate_startup(void *arg) { |
michael@0 | 1313 | /* |
michael@0 | 1314 | * Dark Art!! Never do the same unless you are ABSOLUTELY sure what you are |
michael@0 | 1315 | * doing! |
michael@0 | 1316 | * |
michael@0 | 1317 | * The stack space collapsed by this frame had been reserved by |
michael@0 | 1318 | * thread_create_startup(). And thread_create_startup() will |
michael@0 | 1319 | * return immediately after returning from real start routine, so |
michael@0 | 1320 | * all collapsed values does not affect the result. |
michael@0 | 1321 | * |
michael@0 | 1322 | * All outer frames of thread_create_startup() and |
michael@0 | 1323 | * thread_recreate_startup() are equivalent, so |
michael@0 | 1324 | * thread_create_startup() will return successfully. |
michael@0 | 1325 | */ |
michael@0 | 1326 | thread_info_t *tinfo = (thread_info_t *)arg; |
michael@0 | 1327 | |
michael@0 | 1328 | prctl(PR_SET_NAME, (unsigned long)&tinfo->nativeThreadName, 0, 0, 0); |
michael@0 | 1329 | RestoreTLSInfo(tinfo); |
michael@0 | 1330 | |
michael@0 | 1331 | if (setjmp(tinfo->retEnv) != 0) { |
michael@0 | 1332 | return nullptr; |
michael@0 | 1333 | } |
michael@0 | 1334 | |
michael@0 | 1335 | // longjump() to recreate the stack on the new thread. |
michael@0 | 1336 | longjmp(tinfo->jmpEnv, 1); |
michael@0 | 1337 | |
michael@0 | 1338 | // Never go here! |
michael@0 | 1339 | abort(); |
michael@0 | 1340 | |
michael@0 | 1341 | return nullptr; |
michael@0 | 1342 | } |
michael@0 | 1343 | |
michael@0 | 1344 | /** |
michael@0 | 1345 | * Recreate the context given by tinfo at a new thread. |
michael@0 | 1346 | */ |
michael@0 | 1347 | static void |
michael@0 | 1348 | thread_recreate(thread_info_t *tinfo) { |
michael@0 | 1349 | pthread_t thread; |
michael@0 | 1350 | |
michael@0 | 1351 | // Note that the thread_recreate_startup() runs on the stack specified by |
michael@0 | 1352 | // tinfo. |
michael@0 | 1353 | pthread_create(&thread, &tinfo->threadAttr, thread_recreate_startup, tinfo); |
michael@0 | 1354 | } |
michael@0 | 1355 | |
michael@0 | 1356 | /** |
michael@0 | 1357 | * Recreate all threads in a process forked from an Nuwa process. |
michael@0 | 1358 | */ |
michael@0 | 1359 | static void |
michael@0 | 1360 | RecreateThreads() { |
michael@0 | 1361 | sIsNuwaProcess = false; |
michael@0 | 1362 | sIsFreezing = false; |
michael@0 | 1363 | |
michael@0 | 1364 | sMainThread.recreatedThreadID = pthread_self(); |
michael@0 | 1365 | sMainThread.recreatedNativeThreadID = gettid(); |
michael@0 | 1366 | |
michael@0 | 1367 | // Run registered constructors. |
michael@0 | 1368 | for (std::vector<nuwa_construct_t>::iterator ctr = sConstructors.begin(); |
michael@0 | 1369 | ctr != sConstructors.end(); |
michael@0 | 1370 | ctr++) { |
michael@0 | 1371 | (*ctr).construct((*ctr).arg); |
michael@0 | 1372 | } |
michael@0 | 1373 | sConstructors.clear(); |
michael@0 | 1374 | |
michael@0 | 1375 | REAL(pthread_mutex_lock)(&sThreadCountLock); |
michael@0 | 1376 | thread_info_t *tinfo = sAllThreads.getFirst(); |
michael@0 | 1377 | pthread_mutex_unlock(&sThreadCountLock); |
michael@0 | 1378 | |
michael@0 | 1379 | RECREATE_START(); |
michael@0 | 1380 | while (tinfo != nullptr) { |
michael@0 | 1381 | if (tinfo->flags & TINFO_FLAG_NUWA_SUPPORT) { |
michael@0 | 1382 | RECREATE_BEFORE(tinfo); |
michael@0 | 1383 | thread_recreate(tinfo); |
michael@0 | 1384 | RECREATE_WAIT(); |
michael@0 | 1385 | if (tinfo->reacquireMutex) { |
michael@0 | 1386 | REAL(pthread_mutex_lock)(tinfo->reacquireMutex); |
michael@0 | 1387 | } |
michael@0 | 1388 | } else if(!(tinfo->flags & TINFO_FLAG_NUWA_SKIP)) { |
michael@0 | 1389 | // An unmarked thread is found other than the main thread. |
michael@0 | 1390 | |
michael@0 | 1391 | // All threads should be marked as one of SUPPORT or SKIP, or |
michael@0 | 1392 | // abort the process to make sure all threads in the Nuwa |
michael@0 | 1393 | // process are Nuwa-aware. |
michael@0 | 1394 | abort(); |
michael@0 | 1395 | } |
michael@0 | 1396 | |
michael@0 | 1397 | tinfo = tinfo->getNext(); |
michael@0 | 1398 | } |
michael@0 | 1399 | RECREATE_WAIT_ALL_VIP(); |
michael@0 | 1400 | RECREATE_OPEN_GATE(); |
michael@0 | 1401 | |
michael@0 | 1402 | RECREATE_FINISH(); |
michael@0 | 1403 | |
michael@0 | 1404 | // Run registered final constructors. |
michael@0 | 1405 | for (std::vector<nuwa_construct_t>::iterator ctr = sFinalConstructors.begin(); |
michael@0 | 1406 | ctr != sFinalConstructors.end(); |
michael@0 | 1407 | ctr++) { |
michael@0 | 1408 | (*ctr).construct((*ctr).arg); |
michael@0 | 1409 | } |
michael@0 | 1410 | sFinalConstructors.clear(); |
michael@0 | 1411 | } |
michael@0 | 1412 | |
michael@0 | 1413 | extern "C" { |
michael@0 | 1414 | |
michael@0 | 1415 | /** |
michael@0 | 1416 | * Recreate all epoll fds and restore status; include all events. |
michael@0 | 1417 | */ |
michael@0 | 1418 | static void |
michael@0 | 1419 | RecreateEpollFds() { |
michael@0 | 1420 | EpollManager *man = EpollManager::Singleton(); |
michael@0 | 1421 | |
michael@0 | 1422 | for (EpollManager::const_iterator info_it = man->begin(); |
michael@0 | 1423 | info_it != man->end(); |
michael@0 | 1424 | info_it++) { |
michael@0 | 1425 | int epollfd = info_it->first; |
michael@0 | 1426 | const EpollManager::EpollInfo *info = &info_it->second; |
michael@0 | 1427 | |
michael@0 | 1428 | int fdflags = fcntl(epollfd, F_GETFD); |
michael@0 | 1429 | if (fdflags == -1) { |
michael@0 | 1430 | abort(); |
michael@0 | 1431 | } |
michael@0 | 1432 | int fl = fcntl(epollfd, F_GETFL); |
michael@0 | 1433 | if (fl == -1) { |
michael@0 | 1434 | abort(); |
michael@0 | 1435 | } |
michael@0 | 1436 | |
michael@0 | 1437 | int newepollfd = REAL(epoll_create)(info->BackSize()); |
michael@0 | 1438 | if (newepollfd == -1) { |
michael@0 | 1439 | abort(); |
michael@0 | 1440 | } |
michael@0 | 1441 | int rv = REAL(close)(epollfd); |
michael@0 | 1442 | if (rv == -1) { |
michael@0 | 1443 | abort(); |
michael@0 | 1444 | } |
michael@0 | 1445 | rv = dup2(newepollfd, epollfd); |
michael@0 | 1446 | if (rv == -1) { |
michael@0 | 1447 | abort(); |
michael@0 | 1448 | } |
michael@0 | 1449 | rv = REAL(close)(newepollfd); |
michael@0 | 1450 | if (rv == -1) { |
michael@0 | 1451 | abort(); |
michael@0 | 1452 | } |
michael@0 | 1453 | |
michael@0 | 1454 | rv = fcntl(epollfd, F_SETFD, fdflags); |
michael@0 | 1455 | if (rv == -1) { |
michael@0 | 1456 | abort(); |
michael@0 | 1457 | } |
michael@0 | 1458 | rv = fcntl(epollfd, F_SETFL, fl); |
michael@0 | 1459 | if (rv == -1) { |
michael@0 | 1460 | abort(); |
michael@0 | 1461 | } |
michael@0 | 1462 | |
michael@0 | 1463 | for (EpollManager::EpollInfo::const_iterator events_it = info->begin(); |
michael@0 | 1464 | events_it != info->end(); |
michael@0 | 1465 | events_it++) { |
michael@0 | 1466 | int fd = events_it->first; |
michael@0 | 1467 | epoll_event events; |
michael@0 | 1468 | events = events_it->second; |
michael@0 | 1469 | rv = REAL(epoll_ctl)(epollfd, EPOLL_CTL_ADD, fd, &events); |
michael@0 | 1470 | if (rv == -1) { |
michael@0 | 1471 | abort(); |
michael@0 | 1472 | } |
michael@0 | 1473 | } |
michael@0 | 1474 | } |
michael@0 | 1475 | |
michael@0 | 1476 | // Shutdown EpollManager. It won't be needed in the spawned process. |
michael@0 | 1477 | EpollManager::Shutdown(); |
michael@0 | 1478 | } |
michael@0 | 1479 | |
michael@0 | 1480 | /** |
michael@0 | 1481 | * Fix IPC to make it ready. |
michael@0 | 1482 | * |
michael@0 | 1483 | * Especially, fix ContentChild. |
michael@0 | 1484 | */ |
michael@0 | 1485 | static void |
michael@0 | 1486 | ReplaceIPC(NuwaProtoFdInfo *aInfoList, int aInfoSize) { |
michael@0 | 1487 | int i; |
michael@0 | 1488 | int rv; |
michael@0 | 1489 | |
michael@0 | 1490 | for (i = 0; i < aInfoSize; i++) { |
michael@0 | 1491 | int fd = fcntl(aInfoList[i].originFd, F_GETFD); |
michael@0 | 1492 | if (fd == -1) { |
michael@0 | 1493 | abort(); |
michael@0 | 1494 | } |
michael@0 | 1495 | |
michael@0 | 1496 | int fl = fcntl(aInfoList[i].originFd, F_GETFL); |
michael@0 | 1497 | if (fl == -1) { |
michael@0 | 1498 | abort(); |
michael@0 | 1499 | } |
michael@0 | 1500 | |
michael@0 | 1501 | rv = dup2(aInfoList[i].newFds[NUWA_NEWFD_CHILD], aInfoList[i].originFd); |
michael@0 | 1502 | if (rv == -1) { |
michael@0 | 1503 | abort(); |
michael@0 | 1504 | } |
michael@0 | 1505 | |
michael@0 | 1506 | rv = fcntl(aInfoList[i].originFd, F_SETFD, fd); |
michael@0 | 1507 | if (rv == -1) { |
michael@0 | 1508 | abort(); |
michael@0 | 1509 | } |
michael@0 | 1510 | |
michael@0 | 1511 | rv = fcntl(aInfoList[i].originFd, F_SETFL, fl); |
michael@0 | 1512 | if (rv == -1) { |
michael@0 | 1513 | abort(); |
michael@0 | 1514 | } |
michael@0 | 1515 | } |
michael@0 | 1516 | } |
michael@0 | 1517 | |
michael@0 | 1518 | /** |
michael@0 | 1519 | * Add a new content process at the chrome process. |
michael@0 | 1520 | */ |
michael@0 | 1521 | static void |
michael@0 | 1522 | AddNewProcess(pid_t pid, NuwaProtoFdInfo *aInfoList, int aInfoSize) { |
michael@0 | 1523 | static bool (*AddNewIPCProcess)(pid_t, NuwaProtoFdInfo *, int) = nullptr; |
michael@0 | 1524 | |
michael@0 | 1525 | if (AddNewIPCProcess == nullptr) { |
michael@0 | 1526 | AddNewIPCProcess = (bool (*)(pid_t, NuwaProtoFdInfo *, int)) |
michael@0 | 1527 | dlsym(RTLD_DEFAULT, "AddNewIPCProcess"); |
michael@0 | 1528 | } |
michael@0 | 1529 | AddNewIPCProcess(pid, aInfoList, aInfoSize); |
michael@0 | 1530 | } |
michael@0 | 1531 | |
michael@0 | 1532 | static void |
michael@0 | 1533 | PrepareProtoSockets(NuwaProtoFdInfo *aInfoList, int aInfoSize) { |
michael@0 | 1534 | int i; |
michael@0 | 1535 | int rv; |
michael@0 | 1536 | |
michael@0 | 1537 | for (i = 0; i < aInfoSize; i++) { |
michael@0 | 1538 | rv = REAL(socketpair)(PF_UNIX, SOCK_STREAM, 0, aInfoList[i].newFds); |
michael@0 | 1539 | if (rv == -1) { |
michael@0 | 1540 | abort(); |
michael@0 | 1541 | } |
michael@0 | 1542 | } |
michael@0 | 1543 | } |
michael@0 | 1544 | |
michael@0 | 1545 | static void |
michael@0 | 1546 | CloseAllProtoSockets(NuwaProtoFdInfo *aInfoList, int aInfoSize) { |
michael@0 | 1547 | int i; |
michael@0 | 1548 | |
michael@0 | 1549 | for (i = 0; i < aInfoSize; i++) { |
michael@0 | 1550 | REAL(close)(aInfoList[i].newFds[0]); |
michael@0 | 1551 | REAL(close)(aInfoList[i].newFds[1]); |
michael@0 | 1552 | } |
michael@0 | 1553 | } |
michael@0 | 1554 | |
michael@0 | 1555 | static void |
michael@0 | 1556 | AfterForkHook() |
michael@0 | 1557 | { |
michael@0 | 1558 | void (*AfterNuwaFork)(); |
michael@0 | 1559 | |
michael@0 | 1560 | // This is defined in dom/ipc/ContentChild.cpp |
michael@0 | 1561 | AfterNuwaFork = (void (*)()) |
michael@0 | 1562 | dlsym(RTLD_DEFAULT, "AfterNuwaFork"); |
michael@0 | 1563 | AfterNuwaFork(); |
michael@0 | 1564 | } |
michael@0 | 1565 | |
michael@0 | 1566 | /** |
michael@0 | 1567 | * Fork a new process that is ready for running IPC. |
michael@0 | 1568 | * |
michael@0 | 1569 | * @return the PID of the new process. |
michael@0 | 1570 | */ |
michael@0 | 1571 | static int |
michael@0 | 1572 | ForkIPCProcess() { |
michael@0 | 1573 | int pid; |
michael@0 | 1574 | |
michael@0 | 1575 | REAL(pthread_mutex_lock)(&sForkLock); |
michael@0 | 1576 | |
michael@0 | 1577 | PrepareProtoSockets(sProtoFdInfos, sProtoFdInfosSize); |
michael@0 | 1578 | |
michael@0 | 1579 | sNuwaForking = true; |
michael@0 | 1580 | pid = fork(); |
michael@0 | 1581 | sNuwaForking = false; |
michael@0 | 1582 | if (pid == -1) { |
michael@0 | 1583 | abort(); |
michael@0 | 1584 | } |
michael@0 | 1585 | |
michael@0 | 1586 | if (pid > 0) { |
michael@0 | 1587 | // in the parent |
michael@0 | 1588 | AddNewProcess(pid, sProtoFdInfos, sProtoFdInfosSize); |
michael@0 | 1589 | CloseAllProtoSockets(sProtoFdInfos, sProtoFdInfosSize); |
michael@0 | 1590 | } else { |
michael@0 | 1591 | // in the child |
michael@0 | 1592 | if (getenv("MOZ_DEBUG_CHILD_PROCESS")) { |
michael@0 | 1593 | printf("\n\nNUWA CHILDCHILDCHILDCHILD\n debug me @ %d\n\n", getpid()); |
michael@0 | 1594 | sleep(30); |
michael@0 | 1595 | } |
michael@0 | 1596 | AfterForkHook(); |
michael@0 | 1597 | ReplaceSignalFds(); |
michael@0 | 1598 | ReplaceIPC(sProtoFdInfos, sProtoFdInfosSize); |
michael@0 | 1599 | RecreateEpollFds(); |
michael@0 | 1600 | RecreateThreads(); |
michael@0 | 1601 | CloseAllProtoSockets(sProtoFdInfos, sProtoFdInfosSize); |
michael@0 | 1602 | } |
michael@0 | 1603 | |
michael@0 | 1604 | sForkWaitCondChanged = true; |
michael@0 | 1605 | pthread_cond_signal(&sForkWaitCond); |
michael@0 | 1606 | pthread_mutex_unlock(&sForkLock); |
michael@0 | 1607 | |
michael@0 | 1608 | return pid; |
michael@0 | 1609 | } |
michael@0 | 1610 | |
michael@0 | 1611 | /** |
michael@0 | 1612 | * Prepare for spawning a new process. Called on the IPC thread. |
michael@0 | 1613 | */ |
michael@0 | 1614 | MFBT_API void |
michael@0 | 1615 | NuwaSpawnPrepare() { |
michael@0 | 1616 | REAL(pthread_mutex_lock)(&sForkLock); |
michael@0 | 1617 | |
michael@0 | 1618 | sForkWaitCondChanged = false; // Will be modified on the main thread. |
michael@0 | 1619 | } |
michael@0 | 1620 | |
michael@0 | 1621 | /** |
michael@0 | 1622 | * Let IPC thread wait until fork action on the main thread has completed. |
michael@0 | 1623 | */ |
michael@0 | 1624 | MFBT_API void |
michael@0 | 1625 | NuwaSpawnWait() { |
michael@0 | 1626 | while (!sForkWaitCondChanged) { |
michael@0 | 1627 | REAL(pthread_cond_wait)(&sForkWaitCond, &sForkLock); |
michael@0 | 1628 | } |
michael@0 | 1629 | pthread_mutex_unlock(&sForkLock); |
michael@0 | 1630 | } |
michael@0 | 1631 | |
michael@0 | 1632 | /** |
michael@0 | 1633 | * Spawn a new process. If not ready for spawn (still waiting for some threads |
michael@0 | 1634 | * to freeze), postpone the spawn request until ready. |
michael@0 | 1635 | * |
michael@0 | 1636 | * @return the pid of the new process, or 0 if not ready. |
michael@0 | 1637 | */ |
michael@0 | 1638 | MFBT_API pid_t |
michael@0 | 1639 | NuwaSpawn() { |
michael@0 | 1640 | if (gettid() != getpid()) { |
michael@0 | 1641 | // Not the main thread. |
michael@0 | 1642 | abort(); |
michael@0 | 1643 | } |
michael@0 | 1644 | |
michael@0 | 1645 | pid_t pid = 0; |
michael@0 | 1646 | |
michael@0 | 1647 | if (sNuwaReady) { |
michael@0 | 1648 | pid = ForkIPCProcess(); |
michael@0 | 1649 | } else { |
michael@0 | 1650 | sNuwaPendingSpawn = true; |
michael@0 | 1651 | } |
michael@0 | 1652 | |
michael@0 | 1653 | return pid; |
michael@0 | 1654 | } |
michael@0 | 1655 | |
michael@0 | 1656 | /** |
michael@0 | 1657 | * Prepare to freeze the Nuwa-supporting threads. |
michael@0 | 1658 | */ |
michael@0 | 1659 | MFBT_API void |
michael@0 | 1660 | PrepareNuwaProcess() { |
michael@0 | 1661 | sIsNuwaProcess = true; |
michael@0 | 1662 | // Explicitly ignore SIGCHLD so we don't have to call watpid() to reap |
michael@0 | 1663 | // dead child processes. |
michael@0 | 1664 | signal(SIGCHLD, SIG_IGN); |
michael@0 | 1665 | |
michael@0 | 1666 | // Make marked threads block in one freeze point. |
michael@0 | 1667 | REAL(pthread_mutex_lock)(&sThreadFreezeLock); |
michael@0 | 1668 | |
michael@0 | 1669 | // Populate sMainThread for mapping of tgkill. |
michael@0 | 1670 | sMainThread.origThreadID = pthread_self(); |
michael@0 | 1671 | sMainThread.origNativeThreadID = gettid(); |
michael@0 | 1672 | } |
michael@0 | 1673 | |
michael@0 | 1674 | // Make current process as a Nuwa process. |
michael@0 | 1675 | MFBT_API void |
michael@0 | 1676 | MakeNuwaProcess() { |
michael@0 | 1677 | void (*GetProtoFdInfos)(NuwaProtoFdInfo *, int, int *) = nullptr; |
michael@0 | 1678 | void (*OnNuwaProcessReady)() = nullptr; |
michael@0 | 1679 | sIsFreezing = true; |
michael@0 | 1680 | |
michael@0 | 1681 | REAL(pthread_mutex_lock)(&sThreadCountLock); |
michael@0 | 1682 | |
michael@0 | 1683 | // wait until all threads are frozen. |
michael@0 | 1684 | while ((sThreadFreezeCount + sThreadSkipCount) != sThreadCount) { |
michael@0 | 1685 | REAL(pthread_cond_wait)(&sThreadChangeCond, &sThreadCountLock); |
michael@0 | 1686 | } |
michael@0 | 1687 | |
michael@0 | 1688 | GetProtoFdInfos = (void (*)(NuwaProtoFdInfo *, int, int *)) |
michael@0 | 1689 | dlsym(RTLD_DEFAULT, "GetProtoFdInfos"); |
michael@0 | 1690 | GetProtoFdInfos(sProtoFdInfos, NUWA_TOPLEVEL_MAX, &sProtoFdInfosSize); |
michael@0 | 1691 | |
michael@0 | 1692 | sNuwaReady = true; |
michael@0 | 1693 | |
michael@0 | 1694 | pthread_mutex_unlock(&sThreadCountLock); |
michael@0 | 1695 | |
michael@0 | 1696 | OnNuwaProcessReady = (void (*)())dlsym(RTLD_DEFAULT, "OnNuwaProcessReady"); |
michael@0 | 1697 | OnNuwaProcessReady(); |
michael@0 | 1698 | |
michael@0 | 1699 | if (sNuwaPendingSpawn) { |
michael@0 | 1700 | sNuwaPendingSpawn = false; |
michael@0 | 1701 | NuwaSpawn(); |
michael@0 | 1702 | } |
michael@0 | 1703 | } |
michael@0 | 1704 | |
michael@0 | 1705 | /** |
michael@0 | 1706 | * Mark the current thread as supporting Nuwa. The thread will be recreated in |
michael@0 | 1707 | * the spawned process. |
michael@0 | 1708 | */ |
michael@0 | 1709 | MFBT_API void |
michael@0 | 1710 | NuwaMarkCurrentThread(void (*recreate)(void *), void *arg) { |
michael@0 | 1711 | if (!sIsNuwaProcess) { |
michael@0 | 1712 | return; |
michael@0 | 1713 | } |
michael@0 | 1714 | |
michael@0 | 1715 | thread_info_t *tinfo = CUR_THREAD_INFO; |
michael@0 | 1716 | if (tinfo == nullptr) { |
michael@0 | 1717 | abort(); |
michael@0 | 1718 | } |
michael@0 | 1719 | |
michael@0 | 1720 | tinfo->flags |= TINFO_FLAG_NUWA_SUPPORT; |
michael@0 | 1721 | tinfo->recrFunc = recreate; |
michael@0 | 1722 | tinfo->recrArg = arg; |
michael@0 | 1723 | |
michael@0 | 1724 | // XXX Thread name might be set later than this call. If this is the case, we |
michael@0 | 1725 | // might need to delay getting the thread name. |
michael@0 | 1726 | prctl(PR_GET_NAME, (unsigned long)&tinfo->nativeThreadName, 0, 0, 0); |
michael@0 | 1727 | } |
michael@0 | 1728 | |
michael@0 | 1729 | /** |
michael@0 | 1730 | * Mark the current thread as not supporting Nuwa. Don't recreate this thread in |
michael@0 | 1731 | * the spawned process. |
michael@0 | 1732 | */ |
michael@0 | 1733 | MFBT_API void |
michael@0 | 1734 | NuwaSkipCurrentThread() { |
michael@0 | 1735 | if (!sIsNuwaProcess) return; |
michael@0 | 1736 | |
michael@0 | 1737 | thread_info_t *tinfo = CUR_THREAD_INFO; |
michael@0 | 1738 | if (tinfo == nullptr) { |
michael@0 | 1739 | abort(); |
michael@0 | 1740 | } |
michael@0 | 1741 | |
michael@0 | 1742 | if (!(tinfo->flags & TINFO_FLAG_NUWA_SKIP)) { |
michael@0 | 1743 | sThreadSkipCount++; |
michael@0 | 1744 | } |
michael@0 | 1745 | tinfo->flags |= TINFO_FLAG_NUWA_SKIP; |
michael@0 | 1746 | } |
michael@0 | 1747 | |
michael@0 | 1748 | /** |
michael@0 | 1749 | * Force to freeze the current thread. |
michael@0 | 1750 | * |
michael@0 | 1751 | * This method does not return in Nuwa process. It returns for the |
michael@0 | 1752 | * recreated thread. |
michael@0 | 1753 | */ |
michael@0 | 1754 | MFBT_API void |
michael@0 | 1755 | NuwaFreezeCurrentThread() { |
michael@0 | 1756 | thread_info_t *tinfo = CUR_THREAD_INFO; |
michael@0 | 1757 | if (sIsNuwaProcess && |
michael@0 | 1758 | (tinfo = CUR_THREAD_INFO) && |
michael@0 | 1759 | (tinfo->flags & TINFO_FLAG_NUWA_SUPPORT)) { |
michael@0 | 1760 | if (!setjmp(tinfo->jmpEnv)) { |
michael@0 | 1761 | REAL(pthread_mutex_lock)(&sThreadCountLock); |
michael@0 | 1762 | SaveTLSInfo(tinfo); |
michael@0 | 1763 | sThreadFreezeCount++; |
michael@0 | 1764 | pthread_cond_signal(&sThreadChangeCond); |
michael@0 | 1765 | pthread_mutex_unlock(&sThreadCountLock); |
michael@0 | 1766 | |
michael@0 | 1767 | REAL(pthread_mutex_lock)(&sThreadFreezeLock); |
michael@0 | 1768 | } else { |
michael@0 | 1769 | RECREATE_CONTINUE(); |
michael@0 | 1770 | RECREATE_GATE(); |
michael@0 | 1771 | } |
michael@0 | 1772 | } |
michael@0 | 1773 | } |
michael@0 | 1774 | |
michael@0 | 1775 | /** |
michael@0 | 1776 | * The caller of NuwaCheckpointCurrentThread() is at the line it wishes to |
michael@0 | 1777 | * return after the thread is recreated. |
michael@0 | 1778 | * |
michael@0 | 1779 | * The checkpointed thread will restart at the calling line of |
michael@0 | 1780 | * NuwaCheckpointCurrentThread(). This macro returns true in the Nuwa process |
michael@0 | 1781 | * and false on the recreated thread in the forked process. |
michael@0 | 1782 | * |
michael@0 | 1783 | * NuwaCheckpointCurrentThread() is implemented as a macro so we can place the |
michael@0 | 1784 | * setjmp() call in the calling method without changing its stack pointer. This |
michael@0 | 1785 | * is essential for not corrupting the stack when the calling thread continues |
michael@0 | 1786 | * to request the main thread for forking a new process. The caller of |
michael@0 | 1787 | * NuwaCheckpointCurrentThread() should not return before the process forking |
michael@0 | 1788 | * finishes. |
michael@0 | 1789 | * |
michael@0 | 1790 | * @return true for Nuwa process, and false in the forked process. |
michael@0 | 1791 | */ |
michael@0 | 1792 | MFBT_API jmp_buf* |
michael@0 | 1793 | NuwaCheckpointCurrentThread1() { |
michael@0 | 1794 | thread_info_t *tinfo = CUR_THREAD_INFO; |
michael@0 | 1795 | if (sIsNuwaProcess && |
michael@0 | 1796 | (tinfo = CUR_THREAD_INFO) && |
michael@0 | 1797 | (tinfo->flags & TINFO_FLAG_NUWA_SUPPORT)) { |
michael@0 | 1798 | return &tinfo->jmpEnv; |
michael@0 | 1799 | } |
michael@0 | 1800 | abort(); |
michael@0 | 1801 | return nullptr; |
michael@0 | 1802 | } |
michael@0 | 1803 | |
michael@0 | 1804 | MFBT_API bool |
michael@0 | 1805 | NuwaCheckpointCurrentThread2(int setjmpCond) { |
michael@0 | 1806 | thread_info_t *tinfo = CUR_THREAD_INFO; |
michael@0 | 1807 | if (setjmpCond == 0) { |
michael@0 | 1808 | REAL(pthread_mutex_lock)(&sThreadCountLock); |
michael@0 | 1809 | if (!(tinfo->flags & TINFO_FLAG_NUWA_EXPLICIT_CHECKPOINT)) { |
michael@0 | 1810 | tinfo->flags |= TINFO_FLAG_NUWA_EXPLICIT_CHECKPOINT; |
michael@0 | 1811 | SaveTLSInfo(tinfo); |
michael@0 | 1812 | sThreadFreezeCount++; |
michael@0 | 1813 | } |
michael@0 | 1814 | pthread_cond_signal(&sThreadChangeCond); |
michael@0 | 1815 | pthread_mutex_unlock(&sThreadCountLock); |
michael@0 | 1816 | return true; |
michael@0 | 1817 | } |
michael@0 | 1818 | RECREATE_CONTINUE(); |
michael@0 | 1819 | RECREATE_GATE(); |
michael@0 | 1820 | return false; // Recreated thread. |
michael@0 | 1821 | } |
michael@0 | 1822 | |
michael@0 | 1823 | /** |
michael@0 | 1824 | * Register methods to be invoked before recreating threads in the spawned |
michael@0 | 1825 | * process. |
michael@0 | 1826 | */ |
michael@0 | 1827 | MFBT_API void |
michael@0 | 1828 | NuwaAddConstructor(void (*construct)(void *), void *arg) { |
michael@0 | 1829 | nuwa_construct_t ctr; |
michael@0 | 1830 | ctr.construct = construct; |
michael@0 | 1831 | ctr.arg = arg; |
michael@0 | 1832 | sConstructors.push_back(ctr); |
michael@0 | 1833 | } |
michael@0 | 1834 | |
michael@0 | 1835 | /** |
michael@0 | 1836 | * Register methods to be invoked after recreating threads in the spawned |
michael@0 | 1837 | * process. |
michael@0 | 1838 | */ |
michael@0 | 1839 | MFBT_API void |
michael@0 | 1840 | NuwaAddFinalConstructor(void (*construct)(void *), void *arg) { |
michael@0 | 1841 | nuwa_construct_t ctr; |
michael@0 | 1842 | ctr.construct = construct; |
michael@0 | 1843 | ctr.arg = arg; |
michael@0 | 1844 | sFinalConstructors.push_back(ctr); |
michael@0 | 1845 | } |
michael@0 | 1846 | |
michael@0 | 1847 | /** |
michael@0 | 1848 | * @return if the current process is the nuwa process. |
michael@0 | 1849 | */ |
michael@0 | 1850 | MFBT_API bool |
michael@0 | 1851 | IsNuwaProcess() { |
michael@0 | 1852 | return sIsNuwaProcess; |
michael@0 | 1853 | } |
michael@0 | 1854 | |
michael@0 | 1855 | /** |
michael@0 | 1856 | * @return if the nuwa process is ready for spawning new processes. |
michael@0 | 1857 | */ |
michael@0 | 1858 | MFBT_API bool |
michael@0 | 1859 | IsNuwaReady() { |
michael@0 | 1860 | return sNuwaReady; |
michael@0 | 1861 | } |
michael@0 | 1862 | |
michael@0 | 1863 | } // extern "C" |