mozglue/build/Nuwa.cpp

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/mozglue/build/Nuwa.cpp	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,1863 @@
     1.4 +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
     1.5 +/* This Source Code Form is subject to the terms of the Mozilla Public
     1.6 + * License, v. 2.0. If a copy of the MPL was not distributed with this file,
     1.7 + * You can obtain one at http://mozilla.org/MPL/2.0/. */
     1.8 +
     1.9 +#include <map>
    1.10 +#include <memory>
    1.11 +
    1.12 +#include <dlfcn.h>
    1.13 +#include <errno.h>
    1.14 +#include <fcntl.h>
    1.15 +#include <setjmp.h>
    1.16 +#include <signal.h>
    1.17 +#include <poll.h>
    1.18 +#include <pthread.h>
    1.19 +#include <alloca.h>
    1.20 +#include <sys/epoll.h>
    1.21 +#include <sys/mman.h>
    1.22 +#include <sys/prctl.h>
    1.23 +#include <sys/types.h>
    1.24 +#include <sys/socket.h>
    1.25 +#include <sys/stat.h>
    1.26 +#include <sys/syscall.h>
    1.27 +#include <vector>
    1.28 +
    1.29 +#include "mozilla/LinkedList.h"
    1.30 +#include "Nuwa.h"
    1.31 +
    1.32 +using namespace mozilla;
    1.33 +
    1.34 +extern "C" MFBT_API int tgkill(pid_t tgid, pid_t tid, int signalno) {
    1.35 +  return syscall(__NR_tgkill, tgid, tid, signalno);
    1.36 +}
    1.37 +
    1.38 +/**
    1.39 + * Provides the wrappers to a selected set of pthread and system-level functions
    1.40 + * as the basis for implementing Zygote-like preforking mechanism.
    1.41 + */
    1.42 +
    1.43 +/**
    1.44 + * Real functions for the wrappers.
    1.45 + */
    1.46 +extern "C" {
    1.47 +int __real_pthread_create(pthread_t *thread,
    1.48 +                          const pthread_attr_t *attr,
    1.49 +                          void *(*start_routine) (void *),
    1.50 +                          void *arg);
    1.51 +int __real_pthread_key_create(pthread_key_t *key, void (*destructor)(void*));
    1.52 +int __real_pthread_key_delete(pthread_key_t key);
    1.53 +pthread_t __real_pthread_self();
    1.54 +int __real_pthread_join(pthread_t thread, void **retval);
    1.55 +int __real_epoll_wait(int epfd,
    1.56 +                      struct epoll_event *events,
    1.57 +                      int maxevents,
    1.58 +                      int timeout);
    1.59 +int __real_pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mtx);
    1.60 +int __real_pthread_cond_timedwait(pthread_cond_t *cond,
    1.61 +                                  pthread_mutex_t *mtx,
    1.62 +                                  const struct timespec *abstime);
    1.63 +int __real___pthread_cond_timedwait(pthread_cond_t *cond,
    1.64 +                                    pthread_mutex_t *mtx,
    1.65 +                                    const struct timespec *abstime,
    1.66 +                                    clockid_t clock);
    1.67 +int __real_pthread_mutex_lock(pthread_mutex_t *mtx);
    1.68 +int __real_poll(struct pollfd *fds, nfds_t nfds, int timeout);
    1.69 +int __real_epoll_create(int size);
    1.70 +int __real_socketpair(int domain, int type, int protocol, int sv[2]);
    1.71 +int __real_pipe2(int __pipedes[2], int flags);
    1.72 +int __real_pipe(int __pipedes[2]);
    1.73 +int __real_epoll_ctl(int aEpollFd, int aOp, int aFd, struct epoll_event *aEvent);
    1.74 +int __real_close(int aFd);
    1.75 +}
    1.76 +
    1.77 +#define REAL(s) __real_##s
    1.78 +
    1.79 +/**
    1.80 + * A Nuwa process is started by preparing.  After preparing, it waits
    1.81 + * for all threads becoming frozen. Then, it is ready while all
    1.82 + * threads are frozen.
    1.83 + */
    1.84 +static bool sIsNuwaProcess = false; // This process is a Nuwa process.
    1.85 +static bool sIsFreezing = false; // Waiting for all threads getting frozen.
    1.86 +static bool sNuwaReady = false;  // Nuwa process is ready.
    1.87 +static bool sNuwaPendingSpawn = false; // Are there any pending spawn requests?
    1.88 +static bool sNuwaForking = false;
    1.89 +
    1.90 +// Fds of transports of top level protocols.
    1.91 +static NuwaProtoFdInfo sProtoFdInfos[NUWA_TOPLEVEL_MAX];
    1.92 +static int sProtoFdInfosSize = 0;
    1.93 +
    1.94 +template <typename T>
    1.95 +struct LibcAllocator: public std::allocator<T>
    1.96 +{
    1.97 +  LibcAllocator()
    1.98 +  {
    1.99 +    void* libcHandle = dlopen("libc.so", RTLD_LAZY);
   1.100 +    mMallocImpl = reinterpret_cast<void*(*)(size_t)>(dlsym(libcHandle, "malloc"));
   1.101 +    mFreeImpl = reinterpret_cast<void(*)(void*)>(dlsym(libcHandle, "free"));
   1.102 +
   1.103 +    if (!(mMallocImpl && mFreeImpl)) {
   1.104 +      // libc should be available, or we'll deadlock in using TLSInfoList.
   1.105 +      abort();
   1.106 +    }
   1.107 +  }
   1.108 +
   1.109 +  inline typename std::allocator<T>::pointer
   1.110 +  allocate(typename std::allocator<T>::size_type n,
   1.111 +           const void * = 0)
   1.112 +  {
   1.113 +    return reinterpret_cast<T *>(mMallocImpl(sizeof(T) * n));
   1.114 +  }
   1.115 +
   1.116 +  inline void
   1.117 +  deallocate(typename std::allocator<T>::pointer p,
   1.118 +             typename std::allocator<T>::size_type n)
   1.119 +  {
   1.120 +    mFreeImpl(p);
   1.121 +  }
   1.122 +
   1.123 +  template<typename U>
   1.124 +  struct rebind
   1.125 +  {
   1.126 +    typedef LibcAllocator<U> other;
   1.127 +  };
   1.128 +private:
   1.129 +  void* (*mMallocImpl)(size_t);
   1.130 +  void (*mFreeImpl)(void*);
   1.131 +};
   1.132 +
   1.133 +/**
   1.134 + * TLSInfoList should use malloc() and free() in libc to avoid the deadlock that
   1.135 + * jemalloc calls into __wrap_pthread_mutex_lock() and then deadlocks while
   1.136 + * the same thread already acquired sThreadCountLock.
   1.137 + */
   1.138 +typedef std::vector<std::pair<pthread_key_t, void *>,
   1.139 +                    LibcAllocator<std::pair<pthread_key_t, void *> > >
   1.140 +TLSInfoList;
   1.141 +
   1.142 +/**
   1.143 + * Return the system's page size
   1.144 + */
   1.145 +static size_t getPageSize(void) {
   1.146 +#ifdef HAVE_GETPAGESIZE
   1.147 +  return getpagesize();
   1.148 +#elif defined(_SC_PAGESIZE)
   1.149 +  return sysconf(_SC_PAGESIZE);
   1.150 +#elif defined(PAGE_SIZE)
   1.151 +  return PAGE_SIZE;
   1.152 +#else
   1.153 +  #warning "Hard-coding page size to 4096 bytes"
   1.154 +  return 4096
   1.155 +#endif
   1.156 +}
   1.157 +
   1.158 +/**
   1.159 + * Align the pointer to the next page boundary unless it's already aligned
   1.160 + */
   1.161 +static uintptr_t ceilToPage(uintptr_t aPtr) {
   1.162 +  size_t pageSize = getPageSize();
   1.163 +
   1.164 +  return ((aPtr + pageSize - 1) / pageSize) * pageSize;
   1.165 +}
   1.166 +
   1.167 +/**
   1.168 + * The stack size is chosen carefully so the frozen threads doesn't consume too
   1.169 + * much memory in the Nuwa process. The threads shouldn't run deep recursive
   1.170 + * methods or do large allocations on the stack to avoid stack overflow.
   1.171 + */
   1.172 +#ifndef NUWA_STACK_SIZE
   1.173 +#define NUWA_STACK_SIZE (1024 * 128)
   1.174 +#endif
   1.175 +
   1.176 +#define NATIVE_THREAD_NAME_LENGTH 16
   1.177 +
   1.178 +struct thread_info : public mozilla::LinkedListElement<thread_info> {
   1.179 +  pthread_t origThreadID;
   1.180 +  pthread_t recreatedThreadID;
   1.181 +  pthread_attr_t threadAttr;
   1.182 +  jmp_buf jmpEnv;
   1.183 +  jmp_buf retEnv;
   1.184 +
   1.185 +  int flags;
   1.186 +
   1.187 +  void *(*startupFunc)(void *arg);
   1.188 +  void *startupArg;
   1.189 +
   1.190 +  // The thread specific function to recreate the new thread. It's executed
   1.191 +  // after the thread is recreated.
   1.192 +  void (*recrFunc)(void *arg);
   1.193 +  void *recrArg;
   1.194 +
   1.195 +  TLSInfoList tlsInfo;
   1.196 +
   1.197 +  pthread_mutex_t *reacquireMutex;
   1.198 +  void *stk;
   1.199 +
   1.200 +  pid_t origNativeThreadID;
   1.201 +  pid_t recreatedNativeThreadID;
   1.202 +  char nativeThreadName[NATIVE_THREAD_NAME_LENGTH];
   1.203 +};
   1.204 +
   1.205 +typedef struct thread_info thread_info_t;
   1.206 +
   1.207 +static thread_info_t *sCurrentRecreatingThread = nullptr;
   1.208 +
   1.209 +/**
   1.210 + * This function runs the custom recreation function registered when calling
   1.211 + * NuwaMarkCurrentThread() after thread stack is restored.
   1.212 + */
   1.213 +static void
   1.214 +RunCustomRecreation() {
   1.215 +  thread_info_t *tinfo = sCurrentRecreatingThread;
   1.216 +  if (tinfo->recrFunc != nullptr) {
   1.217 +    tinfo->recrFunc(tinfo->recrArg);
   1.218 +  }
   1.219 +}
   1.220 +
   1.221 +/**
   1.222 + * Every thread should be marked as either TINFO_FLAG_NUWA_SUPPORT or
   1.223 + * TINFO_FLAG_NUWA_SKIP, or it means a potential error.  We force
   1.224 + * Gecko code to mark every single thread to make sure there are no accidents
   1.225 + * when recreating threads with Nuwa.
   1.226 + *
   1.227 + * Threads marked as TINFO_FLAG_NUWA_SUPPORT can be checkpointed explicitly, by
   1.228 + * calling NuwaCheckpointCurrentThread(), or implicitly when they call into wrapped
   1.229 + * functions like pthread_mutex_lock(), epoll_wait(), etc.
   1.230 + * TINFO_FLAG_NUWA_EXPLICIT_CHECKPOINT denotes the explicitly checkpointed thread.
   1.231 + */
   1.232 +#define TINFO_FLAG_NUWA_SUPPORT 0x1
   1.233 +#define TINFO_FLAG_NUWA_SKIP 0x2
   1.234 +#define TINFO_FLAG_NUWA_EXPLICIT_CHECKPOINT 0x4
   1.235 +
   1.236 +typedef struct nuwa_construct {
   1.237 +  void (*construct)(void *);
   1.238 +  void *arg;
   1.239 +} nuwa_construct_t;
   1.240 +
   1.241 +static std::vector<nuwa_construct_t> sConstructors;
   1.242 +static std::vector<nuwa_construct_t> sFinalConstructors;
   1.243 +
   1.244 +typedef std::map<pthread_key_t, void (*)(void *)> TLSKeySet;
   1.245 +static TLSKeySet sTLSKeys;
   1.246 +
   1.247 +/**
   1.248 + * This mutex is used to block the running threads and freeze their contexts.
   1.249 + * PrepareNuwaProcess() is the first one to acquire the lock. Further attempts
   1.250 + * to acquire this mutex (in the freeze point macros) will block and freeze the
   1.251 + * calling thread.
   1.252 + */
   1.253 +static pthread_mutex_t sThreadFreezeLock = PTHREAD_MUTEX_INITIALIZER;
   1.254 +
   1.255 +static thread_info_t sMainThread;
   1.256 +static LinkedList<thread_info_t> sAllThreads;
   1.257 +static int sThreadCount = 0;
   1.258 +static int sThreadFreezeCount = 0;
   1.259 +/**
   1.260 + * This mutex protects the access to thread info:
   1.261 + * sAllThreads, sThreadCount, sThreadFreezeCount, sRecreateVIPCount.
   1.262 + */
   1.263 +static pthread_mutex_t sThreadCountLock = PTHREAD_MUTEX_INITIALIZER;
   1.264 +/**
   1.265 + * This condition variable lets MakeNuwaProcess() wait until all recreated
   1.266 + * threads are frozen.
   1.267 + */
   1.268 +static pthread_cond_t sThreadChangeCond = PTHREAD_COND_INITIALIZER;
   1.269 +
   1.270 +/**
   1.271 + * This mutex and condition variable is used to serialize the fork requests
   1.272 + * from the parent process.
   1.273 + */
   1.274 +static pthread_mutex_t sForkLock = PTHREAD_MUTEX_INITIALIZER;
   1.275 +static pthread_cond_t sForkWaitCond = PTHREAD_COND_INITIALIZER;
   1.276 +
   1.277 +/**
   1.278 + * sForkWaitCondChanged will be reset to false on the IPC thread before
   1.279 + * and will be changed to true on the main thread to indicate that the condition
   1.280 + * that the IPC thread is waiting for has already changed.
   1.281 + */
   1.282 +static bool sForkWaitCondChanged = false;
   1.283 +
   1.284 +/**
   1.285 + * This mutex protects the access to sTLSKeys, which keeps track of existing
   1.286 + * TLS Keys.
   1.287 + */
   1.288 +static pthread_mutex_t sTLSKeyLock = PTHREAD_MUTEX_INITIALIZER;
   1.289 +static int sThreadSkipCount = 0;
   1.290 +
   1.291 +static thread_info_t *
   1.292 +GetThreadInfoInner(pthread_t threadID) {
   1.293 +  for (thread_info_t *tinfo = sAllThreads.getFirst();
   1.294 +       tinfo;
   1.295 +       tinfo = tinfo->getNext()) {
   1.296 +    if (pthread_equal(tinfo->origThreadID, threadID)) {
   1.297 +      return tinfo;
   1.298 +    }
   1.299 +  }
   1.300 +
   1.301 +  return nullptr;
   1.302 +}
   1.303 +
   1.304 +/**
   1.305 + * Get thread info using the specified thread ID.
   1.306 + *
   1.307 + * @return thread_info_t which has threadID == specified threadID
   1.308 + */
   1.309 +static thread_info_t *
   1.310 +GetThreadInfo(pthread_t threadID) {
   1.311 +  if (sIsNuwaProcess) {
   1.312 +    REAL(pthread_mutex_lock)(&sThreadCountLock);
   1.313 +  }
   1.314 +  thread_info_t *tinfo = GetThreadInfoInner(threadID);
   1.315 +  if (sIsNuwaProcess) {
   1.316 +    pthread_mutex_unlock(&sThreadCountLock);
   1.317 +  }
   1.318 +  return tinfo;
   1.319 +}
   1.320 +
   1.321 +/**
   1.322 + * Get thread info using the specified native thread ID.
   1.323 + *
   1.324 + * @return thread_info_t with nativeThreadID == specified threadID
   1.325 + */
   1.326 +static thread_info_t*
   1.327 +GetThreadInfo(pid_t threadID) {
   1.328 +  if (sIsNuwaProcess) {
   1.329 +    REAL(pthread_mutex_lock)(&sThreadCountLock);
   1.330 +  }
   1.331 +  thread_info_t *thrinfo = nullptr;
   1.332 +  for (thread_info_t *tinfo = sAllThreads.getFirst();
   1.333 +       tinfo;
   1.334 +       tinfo = tinfo->getNext()) {
   1.335 +    if (tinfo->origNativeThreadID == threadID) {
   1.336 +      thrinfo = tinfo;
   1.337 +      break;
   1.338 +    }
   1.339 +  }
   1.340 +  if (sIsNuwaProcess) {
   1.341 +    pthread_mutex_unlock(&sThreadCountLock);
   1.342 +  }
   1.343 +
   1.344 +  return thrinfo;
   1.345 +}
   1.346 +
   1.347 +#if !defined(HAVE_THREAD_TLS_KEYWORD)
   1.348 +/**
   1.349 + * Get thread info of the current thread.
   1.350 + *
   1.351 + * @return thread_info_t for the current thread.
   1.352 + */
   1.353 +static thread_info_t *
   1.354 +GetCurThreadInfo() {
   1.355 +  pthread_t threadID = REAL(pthread_self)();
   1.356 +  pthread_t thread_info_t::*threadIDptr =
   1.357 +      (sIsNuwaProcess ?
   1.358 +         &thread_info_t::origThreadID :
   1.359 +         &thread_info_t::recreatedThreadID);
   1.360 +
   1.361 +  REAL(pthread_mutex_lock)(&sThreadCountLock);
   1.362 +  thread_info_t *tinfo;
   1.363 +  for (tinfo = sAllThreads.getFirst();
   1.364 +       tinfo;
   1.365 +       tinfo = tinfo->getNext()) {
   1.366 +    if (pthread_equal(tinfo->*threadIDptr, threadID)) {
   1.367 +      break;
   1.368 +    }
   1.369 +  }
   1.370 +  pthread_mutex_unlock(&sThreadCountLock);
   1.371 +  return tinfo;
   1.372 +}
   1.373 +#define CUR_THREAD_INFO GetCurThreadInfo()
   1.374 +#define SET_THREAD_INFO(x) /* Nothing to do. */
   1.375 +#else
   1.376 +// Is not nullptr only for threads created by pthread_create() in an Nuwa process.
   1.377 +// It is always nullptr for the main thread.
   1.378 +static __thread thread_info_t *sCurThreadInfo = nullptr;
   1.379 +#define CUR_THREAD_INFO sCurThreadInfo
   1.380 +#define SET_THREAD_INFO(x) do { sCurThreadInfo = (x); } while(0)
   1.381 +#endif  // HAVE_THREAD_TLS_KEYWORD
   1.382 +
   1.383 +/*
   1.384 + * Track all epoll fds and handling events.
   1.385 + */
   1.386 +class EpollManager {
   1.387 +public:
   1.388 +  class EpollInfo {
   1.389 +  public:
   1.390 +    typedef struct epoll_event Events;
   1.391 +    typedef std::map<int, Events> EpollEventsMap;
   1.392 +    typedef EpollEventsMap::iterator iterator;
   1.393 +    typedef EpollEventsMap::const_iterator const_iterator;
   1.394 +
   1.395 +    EpollInfo(): mBackSize(0) {}
   1.396 +    EpollInfo(int aBackSize): mBackSize(aBackSize) {}
   1.397 +    EpollInfo(const EpollInfo &aOther): mEvents(aOther.mEvents)
   1.398 +                                      , mBackSize(aOther.mBackSize) {
   1.399 +    }
   1.400 +    ~EpollInfo() {
   1.401 +      mEvents.clear();
   1.402 +    }
   1.403 +
   1.404 +    void AddEvents(int aFd, Events &aEvents) {
   1.405 +      std::pair<iterator, bool> pair =
   1.406 +        mEvents.insert(std::make_pair(aFd, aEvents));
   1.407 +      if (!pair.second) {
   1.408 +        abort();
   1.409 +      }
   1.410 +    }
   1.411 +
   1.412 +    void RemoveEvents(int aFd) {
   1.413 +      if (!mEvents.erase(aFd)) {
   1.414 +        abort();
   1.415 +      }
   1.416 +    }
   1.417 +
   1.418 +    void ModifyEvents(int aFd, Events &aEvents) {
   1.419 +      iterator it = mEvents.find(aFd);
   1.420 +      if (it == mEvents.end()) {
   1.421 +        abort();
   1.422 +      }
   1.423 +      it->second = aEvents;
   1.424 +    }
   1.425 +
   1.426 +    const Events &FindEvents(int aFd) const {
   1.427 +      const_iterator it = mEvents.find(aFd);
   1.428 +      if (it == mEvents.end()) {
   1.429 +        abort();
   1.430 +      }
   1.431 +      return it->second;
   1.432 +    }
   1.433 +
   1.434 +    int Size() const { return mEvents.size(); }
   1.435 +
   1.436 +    // Iterator with values of <fd, Events> pairs.
   1.437 +    const_iterator begin() const { return mEvents.begin(); }
   1.438 +    const_iterator end() const { return mEvents.end(); }
   1.439 +
   1.440 +    int BackSize() const { return mBackSize; }
   1.441 +
   1.442 +  private:
   1.443 +    EpollEventsMap mEvents;
   1.444 +    int mBackSize;
   1.445 +
   1.446 +    friend class EpollManager;
   1.447 +  };
   1.448 +
   1.449 +  typedef std::map<int, EpollInfo> EpollInfoMap;
   1.450 +  typedef EpollInfoMap::iterator iterator;
   1.451 +  typedef EpollInfoMap::const_iterator const_iterator;
   1.452 +
   1.453 +public:
   1.454 +  void AddEpollInfo(int aEpollFd, int aBackSize) {
   1.455 +    EpollInfo *oldinfo = FindEpollInfo(aEpollFd);
   1.456 +    if (oldinfo != nullptr) {
   1.457 +      abort();
   1.458 +    }
   1.459 +    mEpollFdsInfo[aEpollFd] = EpollInfo(aBackSize);
   1.460 +  }
   1.461 +
   1.462 +  EpollInfo *FindEpollInfo(int aEpollFd) {
   1.463 +    iterator it = mEpollFdsInfo.find(aEpollFd);
   1.464 +    if (it == mEpollFdsInfo.end()) {
   1.465 +      return nullptr;
   1.466 +    }
   1.467 +    return &it->second;
   1.468 +  }
   1.469 +
   1.470 +  void RemoveEpollInfo(int aEpollFd) {
   1.471 +    if (!mEpollFdsInfo.erase(aEpollFd)) {
   1.472 +      abort();
   1.473 +    }
   1.474 +  }
   1.475 +
   1.476 +  int Size() const { return mEpollFdsInfo.size(); }
   1.477 +
   1.478 +  // Iterator of <epollfd, EpollInfo> pairs.
   1.479 +  const_iterator begin() const { return mEpollFdsInfo.begin(); }
   1.480 +  const_iterator end() const { return mEpollFdsInfo.end(); }
   1.481 +
   1.482 +  static EpollManager *Singleton() {
   1.483 +    if (!sInstance) {
   1.484 +      sInstance = new EpollManager();
   1.485 +    }
   1.486 +    return sInstance;
   1.487 +  }
   1.488 +
   1.489 +  static void Shutdown() {
   1.490 +    if (!sInstance) {
   1.491 +      abort();
   1.492 +    }
   1.493 +
   1.494 +    delete sInstance;
   1.495 +    sInstance = nullptr;
   1.496 +  }
   1.497 +
   1.498 +private:
   1.499 +  static EpollManager *sInstance;
   1.500 +  ~EpollManager() {
   1.501 +    mEpollFdsInfo.clear();
   1.502 +  }
   1.503 +
   1.504 +  EpollInfoMap mEpollFdsInfo;
   1.505 +
   1.506 +  EpollManager() {}
   1.507 +};
   1.508 +
   1.509 +EpollManager* EpollManager::sInstance;
   1.510 +
   1.511 +static thread_info_t *
   1.512 +thread_info_new(void) {
   1.513 +  /* link tinfo to sAllThreads */
   1.514 +  thread_info_t *tinfo = new thread_info_t();
   1.515 +  tinfo->flags = 0;
   1.516 +  tinfo->recrFunc = nullptr;
   1.517 +  tinfo->recrArg = nullptr;
   1.518 +  tinfo->recreatedThreadID = 0;
   1.519 +  tinfo->recreatedNativeThreadID = 0;
   1.520 +  tinfo->reacquireMutex = nullptr;
   1.521 +  tinfo->stk = malloc(NUWA_STACK_SIZE + getPageSize());
   1.522 +
   1.523 +  // We use a smaller stack size. Add protection to stack overflow: mprotect()
   1.524 +  // stack top (the page at the lowest address) so we crash instead of corrupt
   1.525 +  // other content that is malloc()'d.
   1.526 +  uintptr_t pageGuard = ceilToPage((uintptr_t)tinfo->stk);
   1.527 +  mprotect((void*)pageGuard, getPageSize(), PROT_READ);
   1.528 +
   1.529 +  pthread_attr_init(&tinfo->threadAttr);
   1.530 +
   1.531 +  REAL(pthread_mutex_lock)(&sThreadCountLock);
   1.532 +  // Insert to the tail.
   1.533 +  sAllThreads.insertBack(tinfo);
   1.534 +
   1.535 +  sThreadCount++;
   1.536 +  pthread_cond_signal(&sThreadChangeCond);
   1.537 +  pthread_mutex_unlock(&sThreadCountLock);
   1.538 +
   1.539 +  return tinfo;
   1.540 +}
   1.541 +
   1.542 +static void
   1.543 +thread_info_cleanup(void *arg) {
   1.544 +  if (sNuwaForking) {
   1.545 +    // We shouldn't have any thread exiting when we are forking a new process.
   1.546 +    abort();
   1.547 +  }
   1.548 +
   1.549 +  thread_info_t *tinfo = (thread_info_t *)arg;
   1.550 +  pthread_attr_destroy(&tinfo->threadAttr);
   1.551 +
   1.552 +  REAL(pthread_mutex_lock)(&sThreadCountLock);
   1.553 +  /* unlink tinfo from sAllThreads */
   1.554 +  tinfo->remove();
   1.555 +
   1.556 +  sThreadCount--;
   1.557 +  pthread_cond_signal(&sThreadChangeCond);
   1.558 +  pthread_mutex_unlock(&sThreadCountLock);
   1.559 +
   1.560 +  free(tinfo->stk);
   1.561 +  delete tinfo;
   1.562 +}
   1.563 +
   1.564 +static void *
   1.565 +_thread_create_startup(void *arg) {
   1.566 +  thread_info_t *tinfo = (thread_info_t *)arg;
   1.567 +  void *r;
   1.568 +
   1.569 +  // Save thread info; especially, stackaddr & stacksize.
   1.570 +  // Reuse the stack in the new thread.
   1.571 +  pthread_getattr_np(REAL(pthread_self)(), &tinfo->threadAttr);
   1.572 +
   1.573 +  SET_THREAD_INFO(tinfo);
   1.574 +  tinfo->origThreadID = REAL(pthread_self)();
   1.575 +  tinfo->origNativeThreadID = gettid();
   1.576 +
   1.577 +  pthread_cleanup_push(thread_info_cleanup, tinfo);
   1.578 +
   1.579 +  r = tinfo->startupFunc(tinfo->startupArg);
   1.580 +
   1.581 +  if (!sIsNuwaProcess) {
   1.582 +    return r;
   1.583 +  }
   1.584 +
   1.585 +  pthread_cleanup_pop(1);
   1.586 +
   1.587 +  return r;
   1.588 +}
   1.589 +
   1.590 +// reserve STACK_RESERVED_SZ * 4 bytes for thread_recreate_startup().
   1.591 +#define STACK_RESERVED_SZ 64
   1.592 +#define STACK_SENTINEL(v) ((v)[0])
   1.593 +#define STACK_SENTINEL_VALUE(v) ((uint32_t)(v) ^ 0xdeadbeef)
   1.594 +
   1.595 +static void *
   1.596 +thread_create_startup(void *arg) {
   1.597 +  /*
   1.598 +   * Dark Art!! Never try to do the same unless you are ABSOLUTELY sure of
   1.599 +   * what you are doing!
   1.600 +   *
   1.601 +   * This function is here for reserving stack space before calling
   1.602 +   * _thread_create_startup().  see also thread_create_startup();
   1.603 +   */
   1.604 +  void *r;
   1.605 +  volatile uint32_t reserved[STACK_RESERVED_SZ];
   1.606 +
   1.607 +  // Reserve stack space.
   1.608 +  STACK_SENTINEL(reserved) = STACK_SENTINEL_VALUE(reserved);
   1.609 +
   1.610 +  r = _thread_create_startup(arg);
   1.611 +
   1.612 +  // Check if the reservation is enough.
   1.613 +  if (STACK_SENTINEL(reserved) != STACK_SENTINEL_VALUE(reserved)) {
   1.614 +    abort();                    // Did not reserve enough stack space.
   1.615 +  }
   1.616 +
   1.617 +  thread_info_t *tinfo = CUR_THREAD_INFO;
   1.618 +  if (!sIsNuwaProcess) {
   1.619 +    longjmp(tinfo->retEnv, 1);
   1.620 +
   1.621 +    // Never go here!
   1.622 +    abort();
   1.623 +  }
   1.624 +
   1.625 +  return r;
   1.626 +}
   1.627 +
   1.628 +extern "C" MFBT_API int
   1.629 +__wrap_pthread_create(pthread_t *thread,
   1.630 +                      const pthread_attr_t *attr,
   1.631 +                      void *(*start_routine) (void *),
   1.632 +                      void *arg) {
   1.633 +  if (!sIsNuwaProcess) {
   1.634 +    return REAL(pthread_create)(thread, attr, start_routine, arg);
   1.635 +  }
   1.636 +
   1.637 +  thread_info_t *tinfo = thread_info_new();
   1.638 +  tinfo->startupFunc = start_routine;
   1.639 +  tinfo->startupArg = arg;
   1.640 +  pthread_attr_setstack(&tinfo->threadAttr, tinfo->stk, NUWA_STACK_SIZE);
   1.641 +
   1.642 +  int rv = REAL(pthread_create)(thread,
   1.643 +                                &tinfo->threadAttr,
   1.644 +                                thread_create_startup,
   1.645 +                                tinfo);
   1.646 +  if (rv) {
   1.647 +    thread_info_cleanup(tinfo);
   1.648 +  } else {
   1.649 +    tinfo->origThreadID = *thread;
   1.650 +  }
   1.651 +
   1.652 +  return rv;
   1.653 +}
   1.654 +
   1.655 +// TLS related
   1.656 +
   1.657 +/**
   1.658 + * Iterates over the existing TLS keys and store the TLS data for the current
   1.659 + * thread in tinfo.
   1.660 + */
   1.661 +static void
   1.662 +SaveTLSInfo(thread_info_t *tinfo) {
   1.663 +  REAL(pthread_mutex_lock)(&sTLSKeyLock);
   1.664 +  tinfo->tlsInfo.clear();
   1.665 +  for (TLSKeySet::const_iterator it = sTLSKeys.begin();
   1.666 +       it != sTLSKeys.end();
   1.667 +       it++) {
   1.668 +    void *value = pthread_getspecific(it->first);
   1.669 +    if (value == nullptr) {
   1.670 +      continue;
   1.671 +    }
   1.672 +
   1.673 +    pthread_key_t key = it->first;
   1.674 +    tinfo->tlsInfo.push_back(TLSInfoList::value_type(key, value));
   1.675 +  }
   1.676 +  pthread_mutex_unlock(&sTLSKeyLock);
   1.677 +}
   1.678 +
   1.679 +/**
   1.680 + * Restores the TLS data for the current thread from tinfo.
   1.681 + */
   1.682 +static void
   1.683 +RestoreTLSInfo(thread_info_t *tinfo) {
   1.684 +  for (TLSInfoList::const_iterator it = tinfo->tlsInfo.begin();
   1.685 +       it != tinfo->tlsInfo.end();
   1.686 +       it++) {
   1.687 +    pthread_key_t key = it->first;
   1.688 +    const void *value = it->second;
   1.689 +    if (pthread_setspecific(key, value)) {
   1.690 +      abort();
   1.691 +    }
   1.692 +  }
   1.693 +
   1.694 +  SET_THREAD_INFO(tinfo);
   1.695 +  tinfo->recreatedThreadID = REAL(pthread_self)();
   1.696 +  tinfo->recreatedNativeThreadID = gettid();
   1.697 +}
   1.698 +
   1.699 +extern "C" MFBT_API int
   1.700 +__wrap_pthread_key_create(pthread_key_t *key, void (*destructor)(void*)) {
   1.701 +  int rv = REAL(pthread_key_create)(key, destructor);
   1.702 +  if (rv != 0) {
   1.703 +    return rv;
   1.704 +  }
   1.705 +  REAL(pthread_mutex_lock)(&sTLSKeyLock);
   1.706 +  sTLSKeys.insert(TLSKeySet::value_type(*key, destructor));
   1.707 +  pthread_mutex_unlock(&sTLSKeyLock);
   1.708 +  return 0;
   1.709 +}
   1.710 +
   1.711 +extern "C" MFBT_API int
   1.712 +__wrap_pthread_key_delete(pthread_key_t key) {
   1.713 +  if (!sIsNuwaProcess) {
   1.714 +    return REAL(pthread_key_delete)(key);
   1.715 +  }
   1.716 +  int rv = REAL(pthread_key_delete)(key);
   1.717 +  if (rv != 0) {
   1.718 +    return rv;
   1.719 +  }
   1.720 +  REAL(pthread_mutex_lock)(&sTLSKeyLock);
   1.721 +  sTLSKeys.erase(key);
   1.722 +  pthread_mutex_unlock(&sTLSKeyLock);
   1.723 +  return 0;
   1.724 +}
   1.725 +
   1.726 +extern "C" MFBT_API pthread_t
   1.727 +__wrap_pthread_self() {
   1.728 +  thread_info_t *tinfo = CUR_THREAD_INFO;
   1.729 +  if (tinfo) {
   1.730 +    // For recreated thread, masquerade as the original thread in the Nuwa
   1.731 +    // process.
   1.732 +    return tinfo->origThreadID;
   1.733 +  }
   1.734 +  return REAL(pthread_self)();
   1.735 +}
   1.736 +
   1.737 +extern "C" MFBT_API int
   1.738 +__wrap_pthread_join(pthread_t thread, void **retval) {
   1.739 +  thread_info_t *tinfo = GetThreadInfo(thread);
   1.740 +  if (tinfo == nullptr) {
   1.741 +    return REAL(pthread_join)(thread, retval);
   1.742 +  }
   1.743 +  // pthread_join() need to use the real thread ID in the spawned process.
   1.744 +  return REAL(pthread_join)(tinfo->recreatedThreadID, retval);
   1.745 +}
   1.746 +
   1.747 +/**
   1.748 + * The following are used to synchronize between the main thread and the
   1.749 + * thread being recreated. The main thread will wait until the thread is woken
   1.750 + * up from the freeze points or the blocking intercepted functions and then
   1.751 + * proceed to recreate the next frozen thread.
   1.752 + *
   1.753 + * In thread recreation, the main thread recreates the frozen threads one by
   1.754 + * one. The recreated threads will be "gated" until the main thread "opens the
   1.755 + * gate" to let them run freely as if they were created from scratch. The VIP
   1.756 + * threads gets the chance to run first after their thread stacks are recreated
   1.757 + * (using longjmp()) so they can adjust their contexts to a valid, consistent
   1.758 + * state. The threads frozen waiting for pthread condition variables are VIP
   1.759 + * threads. After woken up they need to run first to make the associated mutex
   1.760 + * in a valid state to maintain the semantics of the intercepted function calls
   1.761 + * (like pthread_cond_wait()).
   1.762 + */
   1.763 +
   1.764 +// Used to synchronize the main thread and the thread being recreated so that
   1.765 +// only one thread is allowed to be recreated at a time.
   1.766 +static pthread_mutex_t sRecreateWaitLock = PTHREAD_MUTEX_INITIALIZER;
   1.767 +// Used to block recreated threads until the main thread "opens the gate".
   1.768 +static pthread_mutex_t sRecreateGateLock = PTHREAD_MUTEX_INITIALIZER;
   1.769 +// Used to block the main thread from "opening the gate" until all VIP threads
   1.770 +// have been recreated.
   1.771 +static pthread_mutex_t sRecreateVIPGateLock = PTHREAD_MUTEX_INITIALIZER;
   1.772 +static pthread_cond_t sRecreateVIPCond = PTHREAD_COND_INITIALIZER;
   1.773 +static int sRecreateVIPCount = 0;
   1.774 +static int sRecreateGatePassed = 0;
   1.775 +
   1.776 +/**
   1.777 + * Thread recreation macros.
   1.778 + *
   1.779 + * The following macros are used in the forked process to synchronize and
   1.780 + * control the progress of thread recreation.
   1.781 + *
   1.782 + * 1. RECREATE_START() is first called in the beginning of thread
   1.783 + *    recreation to set sRecreateWaitLock and sRecreateGateLock in locked
   1.784 + *    state.
   1.785 + * 2. For each frozen thread:
   1.786 + *    2.1. RECREATE_BEFORE() to set the thread being recreated.
   1.787 + *    2.2. thread_recreate() to recreate the frozen thread.
   1.788 + *    2.3. Main thread calls RECREATE_WAIT() to wait on sRecreateWaitLock until
   1.789 + *         the thread is recreated from the freeze point and calls
   1.790 + *         RECREATE_CONTINUE() to release sRecreateWaitLock.
   1.791 + *    2.3. Non-VIP threads are blocked on RECREATE_GATE(). VIP threads calls
   1.792 + *         RECREATE_PASS_VIP() to mark that a VIP thread is successfully
   1.793 + *         recreated and then is blocked by calling RECREATE_GATE_VIP().
   1.794 + * 3. RECREATE_WAIT_ALL_VIP() to wait until all VIP threads passed, that is,
   1.795 + *    VIP threads already has their contexts (mainly pthread mutex) in a valid
   1.796 + *    state.
   1.797 + * 4. RECREATE_OPEN_GATE() to unblock threads blocked by sRecreateGateLock.
   1.798 + * 5. RECREATE_FINISH() to complete thread recreation.
   1.799 + */
   1.800 +#define RECREATE_START()                          \
   1.801 +  do {                                            \
   1.802 +    REAL(pthread_mutex_lock)(&sRecreateWaitLock); \
   1.803 +    REAL(pthread_mutex_lock)(&sRecreateGateLock); \
   1.804 +  } while(0)
   1.805 +#define RECREATE_BEFORE(info) do { sCurrentRecreatingThread = info; } while(0)
   1.806 +#define RECREATE_WAIT() REAL(pthread_mutex_lock)(&sRecreateWaitLock)
   1.807 +#define RECREATE_CONTINUE() do {                \
   1.808 +    RunCustomRecreation();                      \
   1.809 +    pthread_mutex_unlock(&sRecreateWaitLock);   \
   1.810 +  } while(0)
   1.811 +#define RECREATE_FINISH() pthread_mutex_unlock(&sRecreateWaitLock)
   1.812 +#define RECREATE_GATE()                           \
   1.813 +  do {                                            \
   1.814 +    REAL(pthread_mutex_lock)(&sRecreateGateLock); \
   1.815 +    sRecreateGatePassed++;                        \
   1.816 +    pthread_mutex_unlock(&sRecreateGateLock);     \
   1.817 +  } while(0)
   1.818 +#define RECREATE_OPEN_GATE() pthread_mutex_unlock(&sRecreateGateLock)
   1.819 +#define RECREATE_GATE_VIP()                       \
   1.820 +  do {                                            \
   1.821 +    REAL(pthread_mutex_lock)(&sRecreateGateLock); \
   1.822 +    pthread_mutex_unlock(&sRecreateGateLock);     \
   1.823 +  } while(0)
   1.824 +#define RECREATE_PASS_VIP()                          \
   1.825 +  do {                                               \
   1.826 +    REAL(pthread_mutex_lock)(&sRecreateVIPGateLock); \
   1.827 +    sRecreateGatePassed++;                           \
   1.828 +    pthread_cond_signal(&sRecreateVIPCond);          \
   1.829 +    pthread_mutex_unlock(&sRecreateVIPGateLock);     \
   1.830 +  } while(0)
   1.831 +#define RECREATE_WAIT_ALL_VIP()                        \
   1.832 +  do {                                                 \
   1.833 +    REAL(pthread_mutex_lock)(&sRecreateVIPGateLock);   \
   1.834 +    while(sRecreateGatePassed < sRecreateVIPCount) {   \
   1.835 +      REAL(pthread_cond_wait)(&sRecreateVIPCond,       \
   1.836 +                        &sRecreateVIPGateLock);        \
   1.837 +    }                                                  \
   1.838 +    pthread_mutex_unlock(&sRecreateVIPGateLock);       \
   1.839 +  } while(0)
   1.840 +
   1.841 +/**
   1.842 + * Thread freeze points. Note that the freeze points are implemented as macros
   1.843 + * so as not to garble the content of the stack after setjmp().
   1.844 + *
   1.845 + * In the nuwa process, when a thread supporting nuwa calls a wrapper
   1.846 + * function, freeze point 1 setjmp()s to save the state. We only allow the
   1.847 + * thread to be frozen in the wrapper functions. If thread freezing is not
   1.848 + * enabled yet, the wrapper functions act like their wrapped counterparts,
   1.849 + * except for the extra actions in the freeze points. If thread freezing is
   1.850 + * enabled, the thread will be frozen by calling one of the wrapper functions.
   1.851 + * The threads can be frozen in any of the following points:
   1.852 + *
   1.853 + * 1) Freeze point 1: this is the point where we setjmp() in the nuwa process
   1.854 + *    and longjmp() in the spawned process. If freezing is enabled, then the
   1.855 + *    current thread blocks by acquiring an already locked mutex,
   1.856 + *    sThreadFreezeLock.
   1.857 + * 2) The wrapped function: the function that might block waiting for some
   1.858 + *    resource or condition.
   1.859 + * 3) Freeze point 2: blocks the current thread by acquiring sThreadFreezeLock.
   1.860 + *    If freezing is not enabled then revert the counter change in freeze
   1.861 + *    point 1.
   1.862 + */
   1.863 +#define THREAD_FREEZE_POINT1()                                 \
   1.864 +  bool freezeCountChg = false;                                 \
   1.865 +  bool recreated = false;                                      \
   1.866 +  volatile bool freezePoint2 = false;                          \
   1.867 +  thread_info_t *tinfo;                                        \
   1.868 +  if (sIsNuwaProcess &&                                        \
   1.869 +      (tinfo = CUR_THREAD_INFO) &&                             \
   1.870 +      (tinfo->flags & TINFO_FLAG_NUWA_SUPPORT) &&              \
   1.871 +      !(tinfo->flags & TINFO_FLAG_NUWA_EXPLICIT_CHECKPOINT)) { \
   1.872 +    if (!setjmp(tinfo->jmpEnv)) {                              \
   1.873 +      REAL(pthread_mutex_lock)(&sThreadCountLock);             \
   1.874 +      SaveTLSInfo(tinfo);                                      \
   1.875 +      sThreadFreezeCount++;                                    \
   1.876 +      freezeCountChg = true;                                   \
   1.877 +      pthread_cond_signal(&sThreadChangeCond);                 \
   1.878 +      pthread_mutex_unlock(&sThreadCountLock);                 \
   1.879 +                                                               \
   1.880 +      if (sIsFreezing) {                                       \
   1.881 +        REAL(pthread_mutex_lock)(&sThreadFreezeLock);          \
   1.882 +        /* Never return from the pthread_mutex_lock() call. */ \
   1.883 +        abort();                                               \
   1.884 +      }                                                        \
   1.885 +    } else {                                                   \
   1.886 +      RECREATE_CONTINUE();                                     \
   1.887 +      RECREATE_GATE();                                         \
   1.888 +      freezeCountChg = false;                                  \
   1.889 +      recreated = true;                                        \
   1.890 +    }                                                          \
   1.891 +  }
   1.892 +
   1.893 +#define THREAD_FREEZE_POINT1_VIP()                             \
   1.894 +  bool freezeCountChg = false;                                 \
   1.895 +  bool recreated = false;                                      \
   1.896 +  volatile bool freezePoint1 = false;                          \
   1.897 +  volatile bool freezePoint2 = false;                          \
   1.898 +  thread_info_t *tinfo;                                        \
   1.899 +  if (sIsNuwaProcess &&                                        \
   1.900 +      (tinfo = CUR_THREAD_INFO) &&                             \
   1.901 +      (tinfo->flags & TINFO_FLAG_NUWA_SUPPORT) &&              \
   1.902 +      !(tinfo->flags & TINFO_FLAG_NUWA_EXPLICIT_CHECKPOINT)) { \
   1.903 +    if (!setjmp(tinfo->jmpEnv)) {                              \
   1.904 +      REAL(pthread_mutex_lock)(&sThreadCountLock);             \
   1.905 +      SaveTLSInfo(tinfo);                                      \
   1.906 +      sThreadFreezeCount++;                                    \
   1.907 +      sRecreateVIPCount++;                                     \
   1.908 +      freezeCountChg = true;                                   \
   1.909 +      pthread_cond_signal(&sThreadChangeCond);                 \
   1.910 +      pthread_mutex_unlock(&sThreadCountLock);                 \
   1.911 +                                                               \
   1.912 +      if (sIsFreezing) {                                       \
   1.913 +        freezePoint1 = true;                                   \
   1.914 +        REAL(pthread_mutex_lock)(&sThreadFreezeLock);          \
   1.915 +        /* Never return from the pthread_mutex_lock() call. */ \
   1.916 +        abort();                                               \
   1.917 +      }                                                        \
   1.918 +    } else {                                                   \
   1.919 +      freezeCountChg = false;                                  \
   1.920 +      recreated = true;                                        \
   1.921 +    }                                                          \
   1.922 +  }
   1.923 +
   1.924 +#define THREAD_FREEZE_POINT2()                               \
   1.925 +  if (freezeCountChg) {                                      \
   1.926 +    REAL(pthread_mutex_lock)(&sThreadCountLock);             \
   1.927 +    if (sNuwaReady && sIsNuwaProcess) {                      \
   1.928 +      pthread_mutex_unlock(&sThreadCountLock);               \
   1.929 +      freezePoint2 = true;                                   \
   1.930 +      REAL(pthread_mutex_lock)(&sThreadFreezeLock);          \
   1.931 +      /* Never return from the pthread_mutex_lock() call. */ \
   1.932 +      abort();                                               \
   1.933 +    }                                                        \
   1.934 +    sThreadFreezeCount--;                                    \
   1.935 +    pthread_cond_signal(&sThreadChangeCond);                 \
   1.936 +    pthread_mutex_unlock(&sThreadCountLock);                 \
   1.937 +  }
   1.938 +
   1.939 +#define THREAD_FREEZE_POINT2_VIP()                           \
   1.940 +  if (freezeCountChg) {                                      \
   1.941 +    REAL(pthread_mutex_lock)(&sThreadCountLock);             \
   1.942 +    if (sNuwaReady && sIsNuwaProcess) {                      \
   1.943 +      pthread_mutex_unlock(&sThreadCountLock);               \
   1.944 +      freezePoint2 = true;                                   \
   1.945 +      REAL(pthread_mutex_lock)(&sThreadFreezeLock);          \
   1.946 +      /* Never return from the pthread_mutex_lock() call. */ \
   1.947 +      abort();                                               \
   1.948 +    }                                                        \
   1.949 +    sThreadFreezeCount--;                                    \
   1.950 +    sRecreateVIPCount--;                                     \
   1.951 +    pthread_cond_signal(&sThreadChangeCond);                 \
   1.952 +    pthread_mutex_unlock(&sThreadCountLock);                 \
   1.953 +  }
   1.954 +
   1.955 +/**
   1.956 + * Wrapping the blocking functions: epoll_wait(), poll(), pthread_mutex_lock(),
   1.957 + * pthread_cond_wait() and pthread_cond_timedwait():
   1.958 + *
   1.959 + * These functions are wrapped by the above freeze point macros. Once a new
   1.960 + * process is forked, the recreated thread will be blocked in one of the wrapper
   1.961 + * functions. When recreating the thread, we longjmp() to
   1.962 + * THREAD_FREEZE_POINT1() to recover the thread stack. Care must be taken to
   1.963 + * maintain the semantics of the wrapped function:
   1.964 + *
   1.965 + * - epoll_wait() and poll(): just retry the function.
   1.966 + * - pthread_mutex_lock(): don't lock if frozen at freeze point 2 (lock is
   1.967 + *   already acquired).
   1.968 + * - pthread_cond_wait() and pthread_cond_timedwait(): if the thread is frozen
   1.969 + *   waiting the condition variable, the mutex is already released, we need to
   1.970 + *   reacquire the mutex before calling the wrapped function again so the mutex
   1.971 + *   will be in a valid state.
   1.972 + */
   1.973 +
   1.974 +extern "C" MFBT_API int
   1.975 +__wrap_epoll_wait(int epfd,
   1.976 +                  struct epoll_event *events,
   1.977 +                  int maxevents,
   1.978 +                  int timeout) {
   1.979 +  int rv;
   1.980 +
   1.981 +  THREAD_FREEZE_POINT1();
   1.982 +  rv = REAL(epoll_wait)(epfd, events, maxevents, timeout);
   1.983 +  THREAD_FREEZE_POINT2();
   1.984 +
   1.985 +  return rv;
   1.986 +}
   1.987 +
   1.988 +extern "C" MFBT_API int
   1.989 +__wrap_pthread_cond_wait(pthread_cond_t *cond,
   1.990 +                         pthread_mutex_t *mtx) {
   1.991 +  int rv = 0;
   1.992 +
   1.993 +  THREAD_FREEZE_POINT1_VIP();
   1.994 +  if (freezePoint2) {
   1.995 +    RECREATE_CONTINUE();
   1.996 +    RECREATE_PASS_VIP();
   1.997 +    RECREATE_GATE_VIP();
   1.998 +    return rv;
   1.999 +  }
  1.1000 +  if (recreated && mtx) {
  1.1001 +    if (!freezePoint1 && pthread_mutex_trylock(mtx)) {
  1.1002 +      // The thread was frozen in pthread_cond_wait() after releasing mtx in the
  1.1003 +      // Nuwa process. In recreating this thread, We failed to reacquire mtx
  1.1004 +      // with the pthread_mutex_trylock() call, that is, mtx was acquired by
  1.1005 +      // another thread. Because of this, we need the main thread's help to
  1.1006 +      // reacquire mtx so that it will be in a valid state.
  1.1007 +      tinfo->reacquireMutex = mtx;
  1.1008 +    }
  1.1009 +    RECREATE_CONTINUE();
  1.1010 +    RECREATE_PASS_VIP();
  1.1011 +  }
  1.1012 +  rv = REAL(pthread_cond_wait)(cond, mtx);
  1.1013 +  if (recreated && mtx) {
  1.1014 +    // We still need to be gated as not to acquire another mutex associated with
  1.1015 +    // another VIP thread and interfere with it.
  1.1016 +    RECREATE_GATE_VIP();
  1.1017 +  }
  1.1018 +  THREAD_FREEZE_POINT2_VIP();
  1.1019 +
  1.1020 +  return rv;
  1.1021 +}
  1.1022 +
  1.1023 +extern "C" MFBT_API int
  1.1024 +__wrap_pthread_cond_timedwait(pthread_cond_t *cond,
  1.1025 +                              pthread_mutex_t *mtx,
  1.1026 +                              const struct timespec *abstime) {
  1.1027 +  int rv = 0;
  1.1028 +
  1.1029 +  THREAD_FREEZE_POINT1_VIP();
  1.1030 +  if (freezePoint2) {
  1.1031 +    RECREATE_CONTINUE();
  1.1032 +    RECREATE_PASS_VIP();
  1.1033 +    RECREATE_GATE_VIP();
  1.1034 +    return rv;
  1.1035 +  }
  1.1036 +  if (recreated && mtx) {
  1.1037 +    if (!freezePoint1 && pthread_mutex_trylock(mtx)) {
  1.1038 +      tinfo->reacquireMutex = mtx;
  1.1039 +    }
  1.1040 +    RECREATE_CONTINUE();
  1.1041 +    RECREATE_PASS_VIP();
  1.1042 +  }
  1.1043 +  rv = REAL(pthread_cond_timedwait)(cond, mtx, abstime);
  1.1044 +  if (recreated && mtx) {
  1.1045 +    RECREATE_GATE_VIP();
  1.1046 +  }
  1.1047 +  THREAD_FREEZE_POINT2_VIP();
  1.1048 +
  1.1049 +  return rv;
  1.1050 +}
  1.1051 +
  1.1052 +extern "C" int __pthread_cond_timedwait(pthread_cond_t *cond,
  1.1053 +                                        pthread_mutex_t *mtx,
  1.1054 +                                        const struct timespec *abstime,
  1.1055 +                                        clockid_t clock);
  1.1056 +
  1.1057 +extern "C" MFBT_API int
  1.1058 +__wrap___pthread_cond_timedwait(pthread_cond_t *cond,
  1.1059 +                                pthread_mutex_t *mtx,
  1.1060 +                                const struct timespec *abstime,
  1.1061 +                                clockid_t clock) {
  1.1062 +  int rv = 0;
  1.1063 +
  1.1064 +  THREAD_FREEZE_POINT1_VIP();
  1.1065 +  if (freezePoint2) {
  1.1066 +    RECREATE_CONTINUE();
  1.1067 +    RECREATE_PASS_VIP();
  1.1068 +    RECREATE_GATE_VIP();
  1.1069 +    return rv;
  1.1070 +  }
  1.1071 +  if (recreated && mtx) {
  1.1072 +    if (!freezePoint1 && pthread_mutex_trylock(mtx)) {
  1.1073 +      tinfo->reacquireMutex = mtx;
  1.1074 +    }
  1.1075 +    RECREATE_CONTINUE();
  1.1076 +    RECREATE_PASS_VIP();
  1.1077 +  }
  1.1078 +  rv = REAL(__pthread_cond_timedwait)(cond, mtx, abstime, clock);
  1.1079 +  if (recreated && mtx) {
  1.1080 +    RECREATE_GATE_VIP();
  1.1081 +  }
  1.1082 +  THREAD_FREEZE_POINT2_VIP();
  1.1083 +
  1.1084 +  return rv;
  1.1085 +}
  1.1086 +
  1.1087 +extern "C" MFBT_API int
  1.1088 +__wrap_pthread_mutex_lock(pthread_mutex_t *mtx) {
  1.1089 +  int rv = 0;
  1.1090 +
  1.1091 +  THREAD_FREEZE_POINT1();
  1.1092 +  if (freezePoint2) {
  1.1093 +    return rv;
  1.1094 +  }
  1.1095 +  rv = REAL(pthread_mutex_lock)(mtx);
  1.1096 +  THREAD_FREEZE_POINT2();
  1.1097 +
  1.1098 +  return rv;
  1.1099 +}
  1.1100 +
  1.1101 +extern "C" MFBT_API int
  1.1102 +__wrap_poll(struct pollfd *fds, nfds_t nfds, int timeout) {
  1.1103 +  int rv;
  1.1104 +
  1.1105 +  THREAD_FREEZE_POINT1();
  1.1106 +  rv = REAL(poll)(fds, nfds, timeout);
  1.1107 +  THREAD_FREEZE_POINT2();
  1.1108 +
  1.1109 +  return rv;
  1.1110 +}
  1.1111 +
  1.1112 +extern "C" MFBT_API int
  1.1113 +__wrap_epoll_create(int size) {
  1.1114 +  int epollfd = REAL(epoll_create)(size);
  1.1115 +
  1.1116 +  if (!sIsNuwaProcess) {
  1.1117 +    return epollfd;
  1.1118 +  }
  1.1119 +
  1.1120 +  if (epollfd >= 0) {
  1.1121 +    EpollManager::Singleton()->AddEpollInfo(epollfd, size);
  1.1122 +  }
  1.1123 +
  1.1124 +  return epollfd;
  1.1125 +}
  1.1126 +
  1.1127 +/**
  1.1128 + * Wrapping the functions to create file descriptor pairs. In the child process
  1.1129 + * FD pairs are created for intra-process signaling. The generation of FD pairs
  1.1130 + * need to be tracked in the nuwa process so they can be recreated in the
  1.1131 + * spawned process.
  1.1132 + */
  1.1133 +struct FdPairInfo {
  1.1134 +  enum {
  1.1135 +    kPipe,
  1.1136 +    kSocketpair
  1.1137 +  } call;
  1.1138 +
  1.1139 +  int FDs[2];
  1.1140 +  int flags;
  1.1141 +  int domain;
  1.1142 +  int type;
  1.1143 +  int protocol;
  1.1144 +};
  1.1145 +
  1.1146 +/**
  1.1147 + * Protects the access to sSingalFds.
  1.1148 + */
  1.1149 +static pthread_mutex_t  sSignalFdLock = PTHREAD_MUTEX_INITIALIZER;
  1.1150 +static std::vector<FdPairInfo> sSignalFds;
  1.1151 +
  1.1152 +extern "C" MFBT_API int
  1.1153 +__wrap_socketpair(int domain, int type, int protocol, int sv[2])
  1.1154 +{
  1.1155 +  int rv = REAL(socketpair)(domain, type, protocol, sv);
  1.1156 +
  1.1157 +  if (!sIsNuwaProcess || rv < 0) {
  1.1158 +    return rv;
  1.1159 +  }
  1.1160 +
  1.1161 +  REAL(pthread_mutex_lock)(&sSignalFdLock);
  1.1162 +  FdPairInfo signalFd;
  1.1163 +  signalFd.call = FdPairInfo::kSocketpair;
  1.1164 +  signalFd.FDs[0] = sv[0];
  1.1165 +  signalFd.FDs[1] = sv[1];
  1.1166 +  signalFd.domain = domain;
  1.1167 +  signalFd.type = type;
  1.1168 +  signalFd.protocol = protocol;
  1.1169 +
  1.1170 +  sSignalFds.push_back(signalFd);
  1.1171 +  pthread_mutex_unlock(&sSignalFdLock);
  1.1172 +
  1.1173 +  return rv;
  1.1174 +}
  1.1175 +
  1.1176 +extern "C" MFBT_API int
  1.1177 +__wrap_pipe2(int __pipedes[2], int flags)
  1.1178 +{
  1.1179 +  int rv = REAL(pipe2)(__pipedes, flags);
  1.1180 +  if (!sIsNuwaProcess || rv < 0) {
  1.1181 +    return rv;
  1.1182 +  }
  1.1183 +
  1.1184 +  REAL(pthread_mutex_lock)(&sSignalFdLock);
  1.1185 +  FdPairInfo signalFd;
  1.1186 +  signalFd.call = FdPairInfo::kPipe;
  1.1187 +  signalFd.FDs[0] = __pipedes[0];
  1.1188 +  signalFd.FDs[1] = __pipedes[1];
  1.1189 +  signalFd.flags = flags;
  1.1190 +  sSignalFds.push_back(signalFd);
  1.1191 +  pthread_mutex_unlock(&sSignalFdLock);
  1.1192 +  return rv;
  1.1193 +}
  1.1194 +
  1.1195 +extern "C" MFBT_API int
  1.1196 +__wrap_pipe(int __pipedes[2])
  1.1197 +{
  1.1198 +  return __wrap_pipe2(__pipedes, 0);
  1.1199 +}
  1.1200 +
  1.1201 +static void
  1.1202 +DupeSingleFd(int newFd, int origFd)
  1.1203 +{
  1.1204 +  struct stat sb;
  1.1205 +  if (fstat(origFd, &sb)) {
  1.1206 +    // Maybe the original FD is closed.
  1.1207 +    return;
  1.1208 +  }
  1.1209 +  int fd = fcntl(origFd, F_GETFD);
  1.1210 +  int fl = fcntl(origFd, F_GETFL);
  1.1211 +  dup2(newFd, origFd);
  1.1212 +  fcntl(origFd, F_SETFD, fd);
  1.1213 +  fcntl(origFd, F_SETFL, fl);
  1.1214 +  REAL(close)(newFd);
  1.1215 +}
  1.1216 +
  1.1217 +extern "C" MFBT_API void
  1.1218 +ReplaceSignalFds()
  1.1219 +{
  1.1220 +  for (std::vector<FdPairInfo>::iterator it = sSignalFds.begin();
  1.1221 +       it < sSignalFds.end(); ++it) {
  1.1222 +    int fds[2];
  1.1223 +    int rc = 0;
  1.1224 +    switch (it->call) {
  1.1225 +    case FdPairInfo::kPipe:
  1.1226 +      rc = REAL(pipe2)(fds, it->flags);
  1.1227 +      break;
  1.1228 +    case FdPairInfo::kSocketpair:
  1.1229 +      rc = REAL(socketpair)(it->domain, it->type, it->protocol, fds);
  1.1230 +      break;
  1.1231 +    default:
  1.1232 +      continue;
  1.1233 +    }
  1.1234 +
  1.1235 +    if (rc == 0) {
  1.1236 +      DupeSingleFd(fds[0], it->FDs[0]);
  1.1237 +      DupeSingleFd(fds[1], it->FDs[1]);
  1.1238 +    }
  1.1239 +  }
  1.1240 +}
  1.1241 +
  1.1242 +extern "C" MFBT_API int
  1.1243 +__wrap_epoll_ctl(int aEpollFd, int aOp, int aFd, struct epoll_event *aEvent) {
  1.1244 +  int rv = REAL(epoll_ctl)(aEpollFd, aOp, aFd, aEvent);
  1.1245 +
  1.1246 +  if (!sIsNuwaProcess || rv == -1) {
  1.1247 +    return rv;
  1.1248 +  }
  1.1249 +
  1.1250 +  EpollManager::EpollInfo *info =
  1.1251 +    EpollManager::Singleton()->FindEpollInfo(aEpollFd);
  1.1252 +  if (info == nullptr) {
  1.1253 +    abort();
  1.1254 +  }
  1.1255 +
  1.1256 +  switch(aOp) {
  1.1257 +  case EPOLL_CTL_ADD:
  1.1258 +    info->AddEvents(aFd, *aEvent);
  1.1259 +    break;
  1.1260 +
  1.1261 +  case EPOLL_CTL_MOD:
  1.1262 +    info->ModifyEvents(aFd, *aEvent);
  1.1263 +    break;
  1.1264 +
  1.1265 +  case EPOLL_CTL_DEL:
  1.1266 +    info->RemoveEvents(aFd);
  1.1267 +    break;
  1.1268 +
  1.1269 +  default:
  1.1270 +    abort();
  1.1271 +  }
  1.1272 +
  1.1273 +  return rv;
  1.1274 +}
  1.1275 +
  1.1276 +// XXX: thinker: Maybe, we should also track dup, dup2, and other functions.
  1.1277 +extern "C" MFBT_API int
  1.1278 +__wrap_close(int aFd) {
  1.1279 +  int rv = REAL(close)(aFd);
  1.1280 +  if (!sIsNuwaProcess || rv == -1) {
  1.1281 +    return rv;
  1.1282 +  }
  1.1283 +
  1.1284 +  EpollManager::EpollInfo *info =
  1.1285 +    EpollManager::Singleton()->FindEpollInfo(aFd);
  1.1286 +  if (info) {
  1.1287 +    EpollManager::Singleton()->RemoveEpollInfo(aFd);
  1.1288 +  }
  1.1289 +
  1.1290 +  return rv;
  1.1291 +}
  1.1292 +
  1.1293 +extern "C" MFBT_API int
  1.1294 +__wrap_tgkill(pid_t tgid, pid_t tid, int signalno)
  1.1295 +{
  1.1296 +  if (sIsNuwaProcess) {
  1.1297 +    return tgkill(tgid, tid, signalno);
  1.1298 +  }
  1.1299 +
  1.1300 +  if (tid == sMainThread.origNativeThreadID) {
  1.1301 +    return tgkill(tgid, sMainThread.recreatedNativeThreadID, signalno);
  1.1302 +  }
  1.1303 +
  1.1304 +  thread_info_t *tinfo = (tid == sMainThread.origNativeThreadID ?
  1.1305 +      &sMainThread :
  1.1306 +      GetThreadInfo(tid));
  1.1307 +  if (!tinfo) {
  1.1308 +    return tgkill(tgid, tid, signalno);
  1.1309 +  }
  1.1310 +
  1.1311 +  return tgkill(tgid, tinfo->recreatedNativeThreadID, signalno);
  1.1312 +}
  1.1313 +
  1.1314 +static void *
  1.1315 +thread_recreate_startup(void *arg) {
  1.1316 +  /*
  1.1317 +   * Dark Art!! Never do the same unless you are ABSOLUTELY sure what you are
  1.1318 +   * doing!
  1.1319 +   *
  1.1320 +   * The stack space collapsed by this frame had been reserved by
  1.1321 +   * thread_create_startup().  And thread_create_startup() will
  1.1322 +   * return immediately after returning from real start routine, so
  1.1323 +   * all collapsed values does not affect the result.
  1.1324 +   *
  1.1325 +   * All outer frames of thread_create_startup() and
  1.1326 +   * thread_recreate_startup() are equivalent, so
  1.1327 +   * thread_create_startup() will return successfully.
  1.1328 +   */
  1.1329 +  thread_info_t *tinfo = (thread_info_t *)arg;
  1.1330 +
  1.1331 +  prctl(PR_SET_NAME, (unsigned long)&tinfo->nativeThreadName, 0, 0, 0);
  1.1332 +  RestoreTLSInfo(tinfo);
  1.1333 +
  1.1334 +  if (setjmp(tinfo->retEnv) != 0) {
  1.1335 +    return nullptr;
  1.1336 +  }
  1.1337 +
  1.1338 +  // longjump() to recreate the stack on the new thread.
  1.1339 +  longjmp(tinfo->jmpEnv, 1);
  1.1340 +
  1.1341 +  // Never go here!
  1.1342 +  abort();
  1.1343 +
  1.1344 +  return nullptr;
  1.1345 +}
  1.1346 +
  1.1347 +/**
  1.1348 + * Recreate the context given by tinfo at a new thread.
  1.1349 + */
  1.1350 +static void
  1.1351 +thread_recreate(thread_info_t *tinfo) {
  1.1352 +  pthread_t thread;
  1.1353 +
  1.1354 +  // Note that the thread_recreate_startup() runs on the stack specified by
  1.1355 +  // tinfo.
  1.1356 +  pthread_create(&thread, &tinfo->threadAttr, thread_recreate_startup, tinfo);
  1.1357 +}
  1.1358 +
  1.1359 +/**
  1.1360 + * Recreate all threads in a process forked from an Nuwa process.
  1.1361 + */
  1.1362 +static void
  1.1363 +RecreateThreads() {
  1.1364 +  sIsNuwaProcess = false;
  1.1365 +  sIsFreezing = false;
  1.1366 +
  1.1367 +  sMainThread.recreatedThreadID = pthread_self();
  1.1368 +  sMainThread.recreatedNativeThreadID = gettid();
  1.1369 +
  1.1370 +  // Run registered constructors.
  1.1371 +  for (std::vector<nuwa_construct_t>::iterator ctr = sConstructors.begin();
  1.1372 +       ctr != sConstructors.end();
  1.1373 +       ctr++) {
  1.1374 +    (*ctr).construct((*ctr).arg);
  1.1375 +  }
  1.1376 +  sConstructors.clear();
  1.1377 +
  1.1378 +  REAL(pthread_mutex_lock)(&sThreadCountLock);
  1.1379 +  thread_info_t *tinfo = sAllThreads.getFirst();
  1.1380 +  pthread_mutex_unlock(&sThreadCountLock);
  1.1381 +
  1.1382 +  RECREATE_START();
  1.1383 +  while (tinfo != nullptr) {
  1.1384 +    if (tinfo->flags & TINFO_FLAG_NUWA_SUPPORT) {
  1.1385 +      RECREATE_BEFORE(tinfo);
  1.1386 +      thread_recreate(tinfo);
  1.1387 +      RECREATE_WAIT();
  1.1388 +      if (tinfo->reacquireMutex) {
  1.1389 +        REAL(pthread_mutex_lock)(tinfo->reacquireMutex);
  1.1390 +      }
  1.1391 +    } else if(!(tinfo->flags & TINFO_FLAG_NUWA_SKIP)) {
  1.1392 +      // An unmarked thread is found other than the main thread.
  1.1393 +
  1.1394 +      // All threads should be marked as one of SUPPORT or SKIP, or
  1.1395 +      // abort the process to make sure all threads in the Nuwa
  1.1396 +      // process are Nuwa-aware.
  1.1397 +      abort();
  1.1398 +    }
  1.1399 +
  1.1400 +    tinfo = tinfo->getNext();
  1.1401 +  }
  1.1402 +  RECREATE_WAIT_ALL_VIP();
  1.1403 +  RECREATE_OPEN_GATE();
  1.1404 +
  1.1405 +  RECREATE_FINISH();
  1.1406 +
  1.1407 +  // Run registered final constructors.
  1.1408 +  for (std::vector<nuwa_construct_t>::iterator ctr = sFinalConstructors.begin();
  1.1409 +       ctr != sFinalConstructors.end();
  1.1410 +       ctr++) {
  1.1411 +    (*ctr).construct((*ctr).arg);
  1.1412 +  }
  1.1413 +  sFinalConstructors.clear();
  1.1414 +}
  1.1415 +
  1.1416 +extern "C" {
  1.1417 +
  1.1418 +/**
  1.1419 + * Recreate all epoll fds and restore status; include all events.
  1.1420 + */
  1.1421 +static void
  1.1422 +RecreateEpollFds() {
  1.1423 +  EpollManager *man = EpollManager::Singleton();
  1.1424 +
  1.1425 +  for (EpollManager::const_iterator info_it = man->begin();
  1.1426 +       info_it != man->end();
  1.1427 +       info_it++) {
  1.1428 +    int epollfd = info_it->first;
  1.1429 +    const EpollManager::EpollInfo *info = &info_it->second;
  1.1430 +
  1.1431 +    int fdflags = fcntl(epollfd, F_GETFD);
  1.1432 +    if (fdflags == -1) {
  1.1433 +      abort();
  1.1434 +    }
  1.1435 +    int fl = fcntl(epollfd, F_GETFL);
  1.1436 +    if (fl == -1) {
  1.1437 +      abort();
  1.1438 +    }
  1.1439 +
  1.1440 +    int newepollfd = REAL(epoll_create)(info->BackSize());
  1.1441 +    if (newepollfd == -1) {
  1.1442 +      abort();
  1.1443 +    }
  1.1444 +    int rv = REAL(close)(epollfd);
  1.1445 +    if (rv == -1) {
  1.1446 +      abort();
  1.1447 +    }
  1.1448 +    rv = dup2(newepollfd, epollfd);
  1.1449 +    if (rv == -1) {
  1.1450 +      abort();
  1.1451 +    }
  1.1452 +    rv = REAL(close)(newepollfd);
  1.1453 +    if (rv == -1) {
  1.1454 +      abort();
  1.1455 +    }
  1.1456 +
  1.1457 +    rv = fcntl(epollfd, F_SETFD, fdflags);
  1.1458 +    if (rv == -1) {
  1.1459 +      abort();
  1.1460 +    }
  1.1461 +    rv = fcntl(epollfd, F_SETFL, fl);
  1.1462 +    if (rv == -1) {
  1.1463 +      abort();
  1.1464 +    }
  1.1465 +
  1.1466 +    for (EpollManager::EpollInfo::const_iterator events_it = info->begin();
  1.1467 +         events_it != info->end();
  1.1468 +         events_it++) {
  1.1469 +      int fd = events_it->first;
  1.1470 +      epoll_event events;
  1.1471 +      events = events_it->second;
  1.1472 +      rv = REAL(epoll_ctl)(epollfd, EPOLL_CTL_ADD, fd, &events);
  1.1473 +      if (rv == -1) {
  1.1474 +        abort();
  1.1475 +      }
  1.1476 +    }
  1.1477 +  }
  1.1478 +
  1.1479 +  // Shutdown EpollManager. It won't be needed in the spawned process.
  1.1480 +  EpollManager::Shutdown();
  1.1481 +}
  1.1482 +
  1.1483 +/**
  1.1484 + * Fix IPC to make it ready.
  1.1485 + *
  1.1486 + * Especially, fix ContentChild.
  1.1487 + */
  1.1488 +static void
  1.1489 +ReplaceIPC(NuwaProtoFdInfo *aInfoList, int aInfoSize) {
  1.1490 +  int i;
  1.1491 +  int rv;
  1.1492 +
  1.1493 +  for (i = 0; i < aInfoSize; i++) {
  1.1494 +    int fd = fcntl(aInfoList[i].originFd, F_GETFD);
  1.1495 +    if (fd == -1) {
  1.1496 +      abort();
  1.1497 +    }
  1.1498 +
  1.1499 +    int fl = fcntl(aInfoList[i].originFd, F_GETFL);
  1.1500 +    if (fl == -1) {
  1.1501 +      abort();
  1.1502 +    }
  1.1503 +
  1.1504 +    rv = dup2(aInfoList[i].newFds[NUWA_NEWFD_CHILD], aInfoList[i].originFd);
  1.1505 +    if (rv == -1) {
  1.1506 +      abort();
  1.1507 +    }
  1.1508 +
  1.1509 +    rv = fcntl(aInfoList[i].originFd, F_SETFD, fd);
  1.1510 +    if (rv == -1) {
  1.1511 +      abort();
  1.1512 +    }
  1.1513 +
  1.1514 +    rv = fcntl(aInfoList[i].originFd, F_SETFL, fl);
  1.1515 +    if (rv == -1) {
  1.1516 +      abort();
  1.1517 +    }
  1.1518 +  }
  1.1519 +}
  1.1520 +
  1.1521 +/**
  1.1522 + * Add a new content process at the chrome process.
  1.1523 + */
  1.1524 +static void
  1.1525 +AddNewProcess(pid_t pid, NuwaProtoFdInfo *aInfoList, int aInfoSize) {
  1.1526 +  static bool (*AddNewIPCProcess)(pid_t, NuwaProtoFdInfo *, int) = nullptr;
  1.1527 +
  1.1528 +  if (AddNewIPCProcess == nullptr) {
  1.1529 +    AddNewIPCProcess = (bool (*)(pid_t, NuwaProtoFdInfo *, int))
  1.1530 +      dlsym(RTLD_DEFAULT, "AddNewIPCProcess");
  1.1531 +  }
  1.1532 +  AddNewIPCProcess(pid, aInfoList, aInfoSize);
  1.1533 +}
  1.1534 +
  1.1535 +static void
  1.1536 +PrepareProtoSockets(NuwaProtoFdInfo *aInfoList, int aInfoSize) {
  1.1537 +  int i;
  1.1538 +  int rv;
  1.1539 +
  1.1540 +  for (i = 0; i < aInfoSize; i++) {
  1.1541 +    rv = REAL(socketpair)(PF_UNIX, SOCK_STREAM, 0, aInfoList[i].newFds);
  1.1542 +    if (rv == -1) {
  1.1543 +      abort();
  1.1544 +    }
  1.1545 +  }
  1.1546 +}
  1.1547 +
  1.1548 +static void
  1.1549 +CloseAllProtoSockets(NuwaProtoFdInfo *aInfoList, int aInfoSize) {
  1.1550 +  int i;
  1.1551 +
  1.1552 +  for (i = 0; i < aInfoSize; i++) {
  1.1553 +    REAL(close)(aInfoList[i].newFds[0]);
  1.1554 +    REAL(close)(aInfoList[i].newFds[1]);
  1.1555 +  }
  1.1556 +}
  1.1557 +
  1.1558 +static void
  1.1559 +AfterForkHook()
  1.1560 +{
  1.1561 +  void (*AfterNuwaFork)();
  1.1562 +
  1.1563 +  // This is defined in dom/ipc/ContentChild.cpp
  1.1564 +  AfterNuwaFork = (void (*)())
  1.1565 +    dlsym(RTLD_DEFAULT, "AfterNuwaFork");
  1.1566 +  AfterNuwaFork();
  1.1567 +}
  1.1568 +
  1.1569 +/**
  1.1570 + * Fork a new process that is ready for running IPC.
  1.1571 + *
  1.1572 + * @return the PID of the new process.
  1.1573 + */
  1.1574 +static int
  1.1575 +ForkIPCProcess() {
  1.1576 +  int pid;
  1.1577 +
  1.1578 +  REAL(pthread_mutex_lock)(&sForkLock);
  1.1579 +
  1.1580 +  PrepareProtoSockets(sProtoFdInfos, sProtoFdInfosSize);
  1.1581 +
  1.1582 +  sNuwaForking = true;
  1.1583 +  pid = fork();
  1.1584 +  sNuwaForking = false;
  1.1585 +  if (pid == -1) {
  1.1586 +    abort();
  1.1587 +  }
  1.1588 +
  1.1589 +  if (pid > 0) {
  1.1590 +    // in the parent
  1.1591 +    AddNewProcess(pid, sProtoFdInfos, sProtoFdInfosSize);
  1.1592 +    CloseAllProtoSockets(sProtoFdInfos, sProtoFdInfosSize);
  1.1593 +  } else {
  1.1594 +    // in the child
  1.1595 +    if (getenv("MOZ_DEBUG_CHILD_PROCESS")) {
  1.1596 +      printf("\n\nNUWA CHILDCHILDCHILDCHILD\n  debug me @ %d\n\n", getpid());
  1.1597 +      sleep(30);
  1.1598 +    }
  1.1599 +    AfterForkHook();
  1.1600 +    ReplaceSignalFds();
  1.1601 +    ReplaceIPC(sProtoFdInfos, sProtoFdInfosSize);
  1.1602 +    RecreateEpollFds();
  1.1603 +    RecreateThreads();
  1.1604 +    CloseAllProtoSockets(sProtoFdInfos, sProtoFdInfosSize);
  1.1605 +  }
  1.1606 +
  1.1607 +  sForkWaitCondChanged = true;
  1.1608 +  pthread_cond_signal(&sForkWaitCond);
  1.1609 +  pthread_mutex_unlock(&sForkLock);
  1.1610 +
  1.1611 +  return pid;
  1.1612 +}
  1.1613 +
  1.1614 +/**
  1.1615 + * Prepare for spawning a new process. Called on the IPC thread.
  1.1616 + */
  1.1617 +MFBT_API void
  1.1618 +NuwaSpawnPrepare() {
  1.1619 +  REAL(pthread_mutex_lock)(&sForkLock);
  1.1620 +
  1.1621 +  sForkWaitCondChanged = false; // Will be modified on the main thread.
  1.1622 +}
  1.1623 +
  1.1624 +/**
  1.1625 + * Let IPC thread wait until fork action on the main thread has completed.
  1.1626 + */
  1.1627 +MFBT_API void
  1.1628 +NuwaSpawnWait() {
  1.1629 +  while (!sForkWaitCondChanged) {
  1.1630 +    REAL(pthread_cond_wait)(&sForkWaitCond, &sForkLock);
  1.1631 +  }
  1.1632 +  pthread_mutex_unlock(&sForkLock);
  1.1633 +}
  1.1634 +
  1.1635 +/**
  1.1636 + * Spawn a new process. If not ready for spawn (still waiting for some threads
  1.1637 + * to freeze), postpone the spawn request until ready.
  1.1638 + *
  1.1639 + * @return the pid of the new process, or 0 if not ready.
  1.1640 + */
  1.1641 +MFBT_API pid_t
  1.1642 +NuwaSpawn() {
  1.1643 +  if (gettid() != getpid()) {
  1.1644 +    // Not the main thread.
  1.1645 +    abort();
  1.1646 +  }
  1.1647 +
  1.1648 +  pid_t pid = 0;
  1.1649 +
  1.1650 +  if (sNuwaReady) {
  1.1651 +    pid = ForkIPCProcess();
  1.1652 +  } else {
  1.1653 +    sNuwaPendingSpawn = true;
  1.1654 +  }
  1.1655 +
  1.1656 +  return pid;
  1.1657 +}
  1.1658 +
  1.1659 +/**
  1.1660 + * Prepare to freeze the Nuwa-supporting threads.
  1.1661 + */
  1.1662 +MFBT_API void
  1.1663 +PrepareNuwaProcess() {
  1.1664 +  sIsNuwaProcess = true;
  1.1665 +  // Explicitly ignore SIGCHLD so we don't have to call watpid() to reap
  1.1666 +  // dead child processes.
  1.1667 +  signal(SIGCHLD, SIG_IGN);
  1.1668 +
  1.1669 +  // Make marked threads block in one freeze point.
  1.1670 +  REAL(pthread_mutex_lock)(&sThreadFreezeLock);
  1.1671 +
  1.1672 +  // Populate sMainThread for mapping of tgkill.
  1.1673 +  sMainThread.origThreadID = pthread_self();
  1.1674 +  sMainThread.origNativeThreadID = gettid();
  1.1675 +}
  1.1676 +
  1.1677 +// Make current process as a Nuwa process.
  1.1678 +MFBT_API void
  1.1679 +MakeNuwaProcess() {
  1.1680 +  void (*GetProtoFdInfos)(NuwaProtoFdInfo *, int, int *) = nullptr;
  1.1681 +  void (*OnNuwaProcessReady)() = nullptr;
  1.1682 +  sIsFreezing = true;
  1.1683 +
  1.1684 +  REAL(pthread_mutex_lock)(&sThreadCountLock);
  1.1685 +
  1.1686 +  // wait until all threads are frozen.
  1.1687 +  while ((sThreadFreezeCount + sThreadSkipCount) != sThreadCount) {
  1.1688 +    REAL(pthread_cond_wait)(&sThreadChangeCond, &sThreadCountLock);
  1.1689 +  }
  1.1690 +
  1.1691 +  GetProtoFdInfos = (void (*)(NuwaProtoFdInfo *, int, int *))
  1.1692 +    dlsym(RTLD_DEFAULT, "GetProtoFdInfos");
  1.1693 +  GetProtoFdInfos(sProtoFdInfos, NUWA_TOPLEVEL_MAX, &sProtoFdInfosSize);
  1.1694 +
  1.1695 +  sNuwaReady = true;
  1.1696 +
  1.1697 +  pthread_mutex_unlock(&sThreadCountLock);
  1.1698 +
  1.1699 +  OnNuwaProcessReady = (void (*)())dlsym(RTLD_DEFAULT, "OnNuwaProcessReady");
  1.1700 +  OnNuwaProcessReady();
  1.1701 +
  1.1702 +  if (sNuwaPendingSpawn) {
  1.1703 +    sNuwaPendingSpawn = false;
  1.1704 +    NuwaSpawn();
  1.1705 +  }
  1.1706 +}
  1.1707 +
  1.1708 +/**
  1.1709 + * Mark the current thread as supporting Nuwa. The thread will be recreated in
  1.1710 + * the spawned process.
  1.1711 + */
  1.1712 +MFBT_API void
  1.1713 +NuwaMarkCurrentThread(void (*recreate)(void *), void *arg) {
  1.1714 +  if (!sIsNuwaProcess) {
  1.1715 +    return;
  1.1716 +  }
  1.1717 +
  1.1718 +  thread_info_t *tinfo = CUR_THREAD_INFO;
  1.1719 +  if (tinfo == nullptr) {
  1.1720 +    abort();
  1.1721 +  }
  1.1722 +
  1.1723 +  tinfo->flags |= TINFO_FLAG_NUWA_SUPPORT;
  1.1724 +  tinfo->recrFunc = recreate;
  1.1725 +  tinfo->recrArg = arg;
  1.1726 +
  1.1727 +  // XXX Thread name might be set later than this call. If this is the case, we
  1.1728 +  // might need to delay getting the thread name.
  1.1729 +  prctl(PR_GET_NAME, (unsigned long)&tinfo->nativeThreadName, 0, 0, 0);
  1.1730 +}
  1.1731 +
  1.1732 +/**
  1.1733 + * Mark the current thread as not supporting Nuwa. Don't recreate this thread in
  1.1734 + * the spawned process.
  1.1735 + */
  1.1736 +MFBT_API void
  1.1737 +NuwaSkipCurrentThread() {
  1.1738 +  if (!sIsNuwaProcess) return;
  1.1739 +
  1.1740 +  thread_info_t *tinfo = CUR_THREAD_INFO;
  1.1741 +  if (tinfo == nullptr) {
  1.1742 +    abort();
  1.1743 +  }
  1.1744 +
  1.1745 +  if (!(tinfo->flags & TINFO_FLAG_NUWA_SKIP)) {
  1.1746 +    sThreadSkipCount++;
  1.1747 +  }
  1.1748 +  tinfo->flags |= TINFO_FLAG_NUWA_SKIP;
  1.1749 +}
  1.1750 +
  1.1751 +/**
  1.1752 + * Force to freeze the current thread.
  1.1753 + *
  1.1754 + * This method does not return in Nuwa process.  It returns for the
  1.1755 + * recreated thread.
  1.1756 + */
  1.1757 +MFBT_API void
  1.1758 +NuwaFreezeCurrentThread() {
  1.1759 +  thread_info_t *tinfo = CUR_THREAD_INFO;
  1.1760 +  if (sIsNuwaProcess &&
  1.1761 +      (tinfo = CUR_THREAD_INFO) &&
  1.1762 +      (tinfo->flags & TINFO_FLAG_NUWA_SUPPORT)) {
  1.1763 +    if (!setjmp(tinfo->jmpEnv)) {
  1.1764 +      REAL(pthread_mutex_lock)(&sThreadCountLock);
  1.1765 +      SaveTLSInfo(tinfo);
  1.1766 +      sThreadFreezeCount++;
  1.1767 +      pthread_cond_signal(&sThreadChangeCond);
  1.1768 +      pthread_mutex_unlock(&sThreadCountLock);
  1.1769 +
  1.1770 +      REAL(pthread_mutex_lock)(&sThreadFreezeLock);
  1.1771 +    } else {
  1.1772 +      RECREATE_CONTINUE();
  1.1773 +      RECREATE_GATE();
  1.1774 +    }
  1.1775 +  }
  1.1776 +}
  1.1777 +
  1.1778 +/**
  1.1779 + * The caller of NuwaCheckpointCurrentThread() is at the line it wishes to
  1.1780 + * return after the thread is recreated.
  1.1781 + *
  1.1782 + * The checkpointed thread will restart at the calling line of
  1.1783 + * NuwaCheckpointCurrentThread(). This macro returns true in the Nuwa process
  1.1784 + * and false on the recreated thread in the forked process.
  1.1785 + *
  1.1786 + * NuwaCheckpointCurrentThread() is implemented as a macro so we can place the
  1.1787 + * setjmp() call in the calling method without changing its stack pointer. This
  1.1788 + * is essential for not corrupting the stack when the calling thread continues
  1.1789 + * to request the main thread for forking a new process. The caller of
  1.1790 + * NuwaCheckpointCurrentThread() should not return before the process forking
  1.1791 + * finishes.
  1.1792 + *
  1.1793 + * @return true for Nuwa process, and false in the forked process.
  1.1794 + */
  1.1795 +MFBT_API jmp_buf*
  1.1796 +NuwaCheckpointCurrentThread1() {
  1.1797 +  thread_info_t *tinfo = CUR_THREAD_INFO;
  1.1798 +  if (sIsNuwaProcess &&
  1.1799 +      (tinfo = CUR_THREAD_INFO) &&
  1.1800 +      (tinfo->flags & TINFO_FLAG_NUWA_SUPPORT)) {
  1.1801 +    return &tinfo->jmpEnv;
  1.1802 +  }
  1.1803 +  abort();
  1.1804 +  return nullptr;
  1.1805 +}
  1.1806 +
  1.1807 +MFBT_API bool
  1.1808 +NuwaCheckpointCurrentThread2(int setjmpCond) {
  1.1809 +  thread_info_t *tinfo = CUR_THREAD_INFO;
  1.1810 +  if (setjmpCond == 0) {
  1.1811 +    REAL(pthread_mutex_lock)(&sThreadCountLock);
  1.1812 +    if (!(tinfo->flags & TINFO_FLAG_NUWA_EXPLICIT_CHECKPOINT)) {
  1.1813 +      tinfo->flags |= TINFO_FLAG_NUWA_EXPLICIT_CHECKPOINT;
  1.1814 +      SaveTLSInfo(tinfo);
  1.1815 +      sThreadFreezeCount++;
  1.1816 +    }
  1.1817 +    pthread_cond_signal(&sThreadChangeCond);
  1.1818 +    pthread_mutex_unlock(&sThreadCountLock);
  1.1819 +    return true;
  1.1820 +  }
  1.1821 +  RECREATE_CONTINUE();
  1.1822 +  RECREATE_GATE();
  1.1823 +  return false;               // Recreated thread.
  1.1824 +}
  1.1825 +
  1.1826 +/**
  1.1827 + * Register methods to be invoked before recreating threads in the spawned
  1.1828 + * process.
  1.1829 + */
  1.1830 +MFBT_API void
  1.1831 +NuwaAddConstructor(void (*construct)(void *), void *arg) {
  1.1832 +  nuwa_construct_t ctr;
  1.1833 +  ctr.construct = construct;
  1.1834 +  ctr.arg = arg;
  1.1835 +  sConstructors.push_back(ctr);
  1.1836 +}
  1.1837 +
  1.1838 +/**
  1.1839 + * Register methods to be invoked after recreating threads in the spawned
  1.1840 + * process.
  1.1841 + */
  1.1842 +MFBT_API void
  1.1843 +NuwaAddFinalConstructor(void (*construct)(void *), void *arg) {
  1.1844 +  nuwa_construct_t ctr;
  1.1845 +  ctr.construct = construct;
  1.1846 +  ctr.arg = arg;
  1.1847 +  sFinalConstructors.push_back(ctr);
  1.1848 +}
  1.1849 +
  1.1850 +/**
  1.1851 + * @return if the current process is the nuwa process.
  1.1852 + */
  1.1853 +MFBT_API bool
  1.1854 +IsNuwaProcess() {
  1.1855 +  return sIsNuwaProcess;
  1.1856 +}
  1.1857 +
  1.1858 +/**
  1.1859 + * @return if the nuwa process is ready for spawning new processes.
  1.1860 + */
  1.1861 +MFBT_API bool
  1.1862 +IsNuwaReady() {
  1.1863 +  return sNuwaReady;
  1.1864 +}
  1.1865 +
  1.1866 +}      // extern "C"

mercurial