Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* This Source Code Form is subject to the terms of the Mozilla Public
3 * License, v. 2.0. If a copy of the MPL was not distributed with this
4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 #include "primpl.h"
7 #include <sys/types.h>
8 #include <unistd.h>
9 #include <signal.h>
10 #include <pthread.h>
13 sigset_t ints_off;
14 pthread_mutex_t _pr_heapLock;
15 pthread_key_t current_thread_key;
16 pthread_key_t current_cpu_key;
17 pthread_key_t last_thread_key;
18 pthread_key_t intsoff_key;
21 PRInt32 _pr_md_pthreads_created, _pr_md_pthreads_failed;
22 PRInt32 _pr_md_pthreads = 1;
24 void _MD_EarlyInit(void)
25 {
26 extern PRInt32 _nspr_noclock;
28 if (pthread_key_create(¤t_thread_key, NULL) != 0) {
29 perror("pthread_key_create failed");
30 exit(1);
31 }
32 if (pthread_key_create(¤t_cpu_key, NULL) != 0) {
33 perror("pthread_key_create failed");
34 exit(1);
35 }
36 if (pthread_key_create(&last_thread_key, NULL) != 0) {
37 perror("pthread_key_create failed");
38 exit(1);
39 }
40 if (pthread_key_create(&intsoff_key, NULL) != 0) {
41 perror("pthread_key_create failed");
42 exit(1);
43 }
45 sigemptyset(&ints_off);
46 sigaddset(&ints_off, SIGALRM);
47 sigaddset(&ints_off, SIGIO);
48 sigaddset(&ints_off, SIGCLD);
50 /*
51 * disable clock interrupts
52 */
53 _nspr_noclock = 1;
55 }
57 void _MD_InitLocks()
58 {
59 if (pthread_mutex_init(&_pr_heapLock, NULL) != 0) {
60 perror("pthread_mutex_init failed");
61 exit(1);
62 }
63 }
65 PR_IMPLEMENT(void) _MD_FREE_LOCK(struct _MDLock *lockp)
66 {
67 PRIntn _is;
68 PRThread *me = _PR_MD_CURRENT_THREAD();
70 if (me && !_PR_IS_NATIVE_THREAD(me))
71 _PR_INTSOFF(_is);
72 pthread_mutex_destroy(&lockp->mutex);
73 if (me && !_PR_IS_NATIVE_THREAD(me))
74 _PR_FAST_INTSON(_is);
75 }
79 PR_IMPLEMENT(PRStatus) _MD_NEW_LOCK(struct _MDLock *lockp)
80 {
81 PRStatus rv;
82 PRIntn is;
83 PRThread *me = _PR_MD_CURRENT_THREAD();
85 if (me && !_PR_IS_NATIVE_THREAD(me))
86 _PR_INTSOFF(is);
87 rv = pthread_mutex_init(&lockp->mutex, NULL);
88 if (me && !_PR_IS_NATIVE_THREAD(me))
89 _PR_FAST_INTSON(is);
90 return (rv == 0) ? PR_SUCCESS : PR_FAILURE;
91 }
94 PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
95 {
96 if (isCurrent) {
97 (void) setjmp(CONTEXT(t));
98 }
99 *np = sizeof(CONTEXT(t)) / sizeof(PRWord);
100 return (PRWord *) CONTEXT(t);
101 }
103 PR_IMPLEMENT(void)
104 _MD_SetPriority(_MDThread *thread, PRThreadPriority newPri)
105 {
106 /*
107 * XXX - to be implemented
108 */
109 return;
110 }
112 PR_IMPLEMENT(PRStatus) _MD_InitThread(struct PRThread *thread)
113 {
114 struct sigaction sigact;
116 if (thread->flags & _PR_GLOBAL_SCOPE) {
117 thread->md.pthread = pthread_self();
118 #if 0
119 /*
120 * set up SIGUSR1 handler; this is used to save state
121 * during PR_SuspendAll
122 */
123 sigact.sa_handler = save_context_and_block;
124 sigact.sa_flags = SA_RESTART;
125 /*
126 * Must mask clock interrupts
127 */
128 sigact.sa_mask = timer_set;
129 sigaction(SIGUSR1, &sigact, 0);
130 #endif
131 }
133 return PR_SUCCESS;
134 }
136 PR_IMPLEMENT(void) _MD_ExitThread(struct PRThread *thread)
137 {
138 if (thread->flags & _PR_GLOBAL_SCOPE) {
139 _MD_CLEAN_THREAD(thread);
140 _MD_SET_CURRENT_THREAD(NULL);
141 }
142 }
144 PR_IMPLEMENT(void) _MD_CleanThread(struct PRThread *thread)
145 {
146 if (thread->flags & _PR_GLOBAL_SCOPE) {
147 pthread_mutex_destroy(&thread->md.pthread_mutex);
148 pthread_cond_destroy(&thread->md.pthread_cond);
149 }
150 }
152 PR_IMPLEMENT(void) _MD_SuspendThread(struct PRThread *thread)
153 {
154 PRInt32 rv;
156 PR_ASSERT((thread->flags & _PR_GLOBAL_SCOPE) &&
157 _PR_IS_GCABLE_THREAD(thread));
158 #if 0
159 thread->md.suspending_id = getpid();
160 rv = kill(thread->md.id, SIGUSR1);
161 PR_ASSERT(rv == 0);
162 /*
163 * now, block the current thread/cpu until woken up by the suspended
164 * thread from it's SIGUSR1 signal handler
165 */
166 blockproc(getpid());
167 #endif
168 }
170 PR_IMPLEMENT(void) _MD_ResumeThread(struct PRThread *thread)
171 {
172 PRInt32 rv;
174 PR_ASSERT((thread->flags & _PR_GLOBAL_SCOPE) &&
175 _PR_IS_GCABLE_THREAD(thread));
176 #if 0
177 rv = unblockproc(thread->md.id);
178 #endif
179 }
181 PR_IMPLEMENT(void) _MD_SuspendCPU(struct _PRCPU *thread)
182 {
183 PRInt32 rv;
185 #if 0
186 cpu->md.suspending_id = getpid();
187 rv = kill(cpu->md.id, SIGUSR1);
188 PR_ASSERT(rv == 0);
189 /*
190 * now, block the current thread/cpu until woken up by the suspended
191 * thread from it's SIGUSR1 signal handler
192 */
193 blockproc(getpid());
194 #endif
195 }
197 PR_IMPLEMENT(void) _MD_ResumeCPU(struct _PRCPU *thread)
198 {
199 #if 0
200 unblockproc(cpu->md.id);
201 #endif
202 }
205 #define PT_NANOPERMICRO 1000UL
206 #define PT_BILLION 1000000000UL
208 PR_IMPLEMENT(PRStatus)
209 _pt_wait(PRThread *thread, PRIntervalTime timeout)
210 {
211 int rv;
212 struct timeval now;
213 struct timespec tmo;
214 PRUint32 ticks = PR_TicksPerSecond();
217 if (timeout != PR_INTERVAL_NO_TIMEOUT) {
218 tmo.tv_sec = timeout / ticks;
219 tmo.tv_nsec = timeout - (tmo.tv_sec * ticks);
220 tmo.tv_nsec = PR_IntervalToMicroseconds(PT_NANOPERMICRO *
221 tmo.tv_nsec);
223 /* pthreads wants this in absolute time, off we go ... */
224 (void)GETTIMEOFDAY(&now);
225 /* that one's usecs, this one's nsecs - grrrr! */
226 tmo.tv_sec += now.tv_sec;
227 tmo.tv_nsec += (PT_NANOPERMICRO * now.tv_usec);
228 tmo.tv_sec += tmo.tv_nsec / PT_BILLION;
229 tmo.tv_nsec %= PT_BILLION;
230 }
232 pthread_mutex_lock(&thread->md.pthread_mutex);
233 thread->md.wait--;
234 if (thread->md.wait < 0) {
235 if (timeout != PR_INTERVAL_NO_TIMEOUT) {
236 rv = pthread_cond_timedwait(&thread->md.pthread_cond,
237 &thread->md.pthread_mutex, &tmo);
238 }
239 else
240 rv = pthread_cond_wait(&thread->md.pthread_cond,
241 &thread->md.pthread_mutex);
242 if (rv != 0) {
243 thread->md.wait++;
244 }
245 } else
246 rv = 0;
247 pthread_mutex_unlock(&thread->md.pthread_mutex);
249 return (rv == 0) ? PR_SUCCESS : PR_FAILURE;
250 }
252 PR_IMPLEMENT(PRStatus)
253 _MD_wait(PRThread *thread, PRIntervalTime ticks)
254 {
255 if ( thread->flags & _PR_GLOBAL_SCOPE ) {
256 _MD_CHECK_FOR_EXIT();
257 if (_pt_wait(thread, ticks) == PR_FAILURE) {
258 _MD_CHECK_FOR_EXIT();
259 /*
260 * wait timed out
261 */
262 _PR_THREAD_LOCK(thread);
263 if (thread->wait.cvar) {
264 /*
265 * The thread will remove itself from the waitQ
266 * of the cvar in _PR_WaitCondVar
267 */
268 thread->wait.cvar = NULL;
269 thread->state = _PR_RUNNING;
270 _PR_THREAD_UNLOCK(thread);
271 } else {
272 _pt_wait(thread, PR_INTERVAL_NO_TIMEOUT);
273 _PR_THREAD_UNLOCK(thread);
274 }
275 }
276 } else {
277 _PR_MD_SWITCH_CONTEXT(thread);
278 }
279 return PR_SUCCESS;
280 }
282 PR_IMPLEMENT(PRStatus)
283 _MD_WakeupWaiter(PRThread *thread)
284 {
285 PRThread *me = _PR_MD_CURRENT_THREAD();
286 PRInt32 pid, rv;
287 PRIntn is;
289 PR_ASSERT(_pr_md_idle_cpus >= 0);
290 if (thread == NULL) {
291 if (_pr_md_idle_cpus)
292 _MD_Wakeup_CPUs();
293 } else if (!_PR_IS_NATIVE_THREAD(thread)) {
294 /*
295 * If the thread is on my cpu's runq there is no need to
296 * wakeup any cpus
297 */
298 if (!_PR_IS_NATIVE_THREAD(me)) {
299 if (me->cpu != thread->cpu) {
300 if (_pr_md_idle_cpus)
301 _MD_Wakeup_CPUs();
302 }
303 } else {
304 if (_pr_md_idle_cpus)
305 _MD_Wakeup_CPUs();
306 }
307 } else {
308 PR_ASSERT(_PR_IS_NATIVE_THREAD(thread));
309 if (!_PR_IS_NATIVE_THREAD(me))
310 _PR_INTSOFF(is);
312 pthread_mutex_lock(&thread->md.pthread_mutex);
313 thread->md.wait++;
314 rv = pthread_cond_signal(&thread->md.pthread_cond);
315 PR_ASSERT(rv == 0);
316 pthread_mutex_unlock(&thread->md.pthread_mutex);
318 if (!_PR_IS_NATIVE_THREAD(me))
319 _PR_FAST_INTSON(is);
320 }
321 return PR_SUCCESS;
322 }
324 /* These functions should not be called for AIX */
325 PR_IMPLEMENT(void)
326 _MD_YIELD(void)
327 {
328 PR_NOT_REACHED("_MD_YIELD should not be called for AIX.");
329 }
331 PR_IMPLEMENT(PRStatus)
332 _MD_CreateThread(
333 PRThread *thread,
334 void (*start) (void *),
335 PRThreadPriority priority,
336 PRThreadScope scope,
337 PRThreadState state,
338 PRUint32 stackSize)
339 {
340 PRIntn is;
341 int rv;
342 PRThread *me = _PR_MD_CURRENT_THREAD();
343 pthread_attr_t attr;
345 if (!_PR_IS_NATIVE_THREAD(me))
346 _PR_INTSOFF(is);
348 if (pthread_mutex_init(&thread->md.pthread_mutex, NULL) != 0) {
349 if (!_PR_IS_NATIVE_THREAD(me))
350 _PR_FAST_INTSON(is);
351 return PR_FAILURE;
352 }
354 if (pthread_cond_init(&thread->md.pthread_cond, NULL) != 0) {
355 pthread_mutex_destroy(&thread->md.pthread_mutex);
356 if (!_PR_IS_NATIVE_THREAD(me))
357 _PR_FAST_INTSON(is);
358 return PR_FAILURE;
359 }
360 thread->flags |= _PR_GLOBAL_SCOPE;
362 pthread_attr_init(&attr); /* initialize attr with default attributes */
363 if (pthread_attr_setstacksize(&attr, (size_t) stackSize) != 0) {
364 pthread_mutex_destroy(&thread->md.pthread_mutex);
365 pthread_cond_destroy(&thread->md.pthread_cond);
366 pthread_attr_destroy(&attr);
367 if (!_PR_IS_NATIVE_THREAD(me))
368 _PR_FAST_INTSON(is);
369 return PR_FAILURE;
370 }
372 thread->md.wait = 0;
373 rv = pthread_create(&thread->md.pthread, &attr, start, (void *)thread);
374 if (0 == rv) {
375 _MD_ATOMIC_INCREMENT(&_pr_md_pthreads_created);
376 _MD_ATOMIC_INCREMENT(&_pr_md_pthreads);
377 if (!_PR_IS_NATIVE_THREAD(me))
378 _PR_FAST_INTSON(is);
379 return PR_SUCCESS;
380 } else {
381 pthread_mutex_destroy(&thread->md.pthread_mutex);
382 pthread_cond_destroy(&thread->md.pthread_cond);
383 pthread_attr_destroy(&attr);
384 _MD_ATOMIC_INCREMENT(&_pr_md_pthreads_failed);
385 if (!_PR_IS_NATIVE_THREAD(me))
386 _PR_FAST_INTSON(is);
387 PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, rv);
388 return PR_FAILURE;
389 }
390 }
392 PR_IMPLEMENT(void)
393 _MD_InitRunningCPU(struct _PRCPU *cpu)
394 {
395 extern int _pr_md_pipefd[2];
397 _MD_unix_init_running_cpu(cpu);
398 cpu->md.pthread = pthread_self();
399 if (_pr_md_pipefd[0] >= 0) {
400 _PR_IOQ_MAX_OSFD(cpu) = _pr_md_pipefd[0];
401 #ifndef _PR_USE_POLL
402 FD_SET(_pr_md_pipefd[0], &_PR_FD_READ_SET(cpu));
403 #endif
404 }
405 }
408 void
409 _MD_CleanupBeforeExit(void)
410 {
411 #if 0
412 extern PRInt32 _pr_cpus_exit;
414 _pr_irix_exit_now = 1;
415 if (_pr_numCPU > 1) {
416 /*
417 * Set a global flag, and wakeup all cpus which will notice the flag
418 * and exit.
419 */
420 _pr_cpus_exit = getpid();
421 _MD_Wakeup_CPUs();
422 while(_pr_numCPU > 1) {
423 _PR_WAIT_SEM(_pr_irix_exit_sem);
424 _pr_numCPU--;
425 }
426 }
427 /*
428 * cause global threads on the recycle list to exit
429 */
430 _PR_DEADQ_LOCK;
431 if (_PR_NUM_DEADNATIVE != 0) {
432 PRThread *thread;
433 PRCList *ptr;
435 ptr = _PR_DEADNATIVEQ.next;
436 while( ptr != &_PR_DEADNATIVEQ ) {
437 thread = _PR_THREAD_PTR(ptr);
438 _MD_CVAR_POST_SEM(thread);
439 ptr = ptr->next;
440 }
441 }
442 _PR_DEADQ_UNLOCK;
443 while(_PR_NUM_DEADNATIVE > 1) {
444 _PR_WAIT_SEM(_pr_irix_exit_sem);
445 _PR_DEC_DEADNATIVE;
446 }
447 #endif
448 }