|
1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
|
2 /* This Source Code Form is subject to the terms of the Mozilla Public |
|
3 * License, v. 2.0. If a copy of the MPL was not distributed with this |
|
4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
|
5 |
|
6 #ifndef nspr_irix_defs_h___ |
|
7 #define nspr_irix_defs_h___ |
|
8 |
|
9 #define _PR_HAVE_ATOMIC_CAS |
|
10 |
|
11 /* |
|
12 * MipsPro assembler defines _LANGUAGE_ASSEMBLY |
|
13 */ |
|
14 #ifndef _LANGUAGE_ASSEMBLY |
|
15 |
|
16 #include "prclist.h" |
|
17 #include "prthread.h" |
|
18 #include <sys/ucontext.h> |
|
19 |
|
20 /* |
|
21 * Internal configuration macros |
|
22 */ |
|
23 |
|
24 #define PR_LINKER_ARCH "irix" |
|
25 #define _PR_SI_SYSNAME "IRIX" |
|
26 #define _PR_SI_ARCHITECTURE "mips" |
|
27 #define PR_DLL_SUFFIX ".so" |
|
28 |
|
29 #define _PR_VMBASE 0x30000000 |
|
30 #define _PR_STACK_VMBASE 0x50000000 |
|
31 #define _PR_NUM_GCREGS 9 |
|
32 #define _MD_MMAP_FLAGS MAP_PRIVATE |
|
33 |
|
34 #define _MD_DEFAULT_STACK_SIZE 65536L |
|
35 #define _MD_MIN_STACK_SIZE 16384L |
|
36 |
|
37 #undef HAVE_STACK_GROWING_UP |
|
38 #define HAVE_WEAK_IO_SYMBOLS |
|
39 #define HAVE_WEAK_MALLOC_SYMBOLS |
|
40 #define HAVE_DLL |
|
41 #define USE_DLFCN |
|
42 #define _PR_HAVE_ATOMIC_OPS |
|
43 #define _PR_POLL_AVAILABLE |
|
44 #define _PR_USE_POLL |
|
45 #define _PR_STAT_HAS_ST_ATIM |
|
46 #define _PR_HAVE_OFF64_T |
|
47 #define HAVE_POINTER_LOCALTIME_R |
|
48 #define _PR_HAVE_POSIX_SEMAPHORES |
|
49 #define PR_HAVE_POSIX_NAMED_SHARED_MEMORY |
|
50 #define _PR_ACCEPT_INHERIT_NONBLOCK |
|
51 |
|
52 #ifdef _PR_INET6 |
|
53 #define _PR_HAVE_INET_NTOP |
|
54 #define _PR_HAVE_GETIPNODEBYNAME |
|
55 #define _PR_HAVE_GETIPNODEBYADDR |
|
56 #define _PR_HAVE_GETADDRINFO |
|
57 #endif |
|
58 |
|
59 /* Initialization entry points */ |
|
60 NSPR_API(void) _MD_EarlyInit(void); |
|
61 #define _MD_EARLY_INIT _MD_EarlyInit |
|
62 |
|
63 NSPR_API(void) _MD_IrixInit(void); |
|
64 #define _MD_FINAL_INIT _MD_IrixInit |
|
65 |
|
66 #define _MD_INIT_IO() |
|
67 |
|
68 /* Timer operations */ |
|
69 NSPR_API(PRIntervalTime) _MD_IrixGetInterval(void); |
|
70 #define _MD_GET_INTERVAL _MD_IrixGetInterval |
|
71 |
|
72 NSPR_API(PRIntervalTime) _MD_IrixIntervalPerSec(void); |
|
73 #define _MD_INTERVAL_PER_SEC _MD_IrixIntervalPerSec |
|
74 |
|
75 /* GC operations */ |
|
76 NSPR_API(void *) _MD_GetSP(PRThread *thread); |
|
77 #define _MD_GET_SP _MD_GetSP |
|
78 |
|
79 /* The atomic operations */ |
|
80 #include <mutex.h> |
|
81 #define _MD_INIT_ATOMIC() |
|
82 #define _MD_ATOMIC_INCREMENT(val) add_then_test((unsigned long*)val, 1) |
|
83 #define _MD_ATOMIC_ADD(ptr, val) add_then_test((unsigned long*)ptr, (unsigned long)val) |
|
84 #define _MD_ATOMIC_DECREMENT(val) add_then_test((unsigned long*)val, 0xffffffff) |
|
85 #define _MD_ATOMIC_SET(val, newval) test_and_set((unsigned long*)val, newval) |
|
86 |
|
87 #if defined(_PR_PTHREADS) |
|
88 #else /* defined(_PR_PTHREADS) */ |
|
89 |
|
90 /************************************************************************/ |
|
91 |
|
92 #include <setjmp.h> |
|
93 #include <errno.h> |
|
94 #include <unistd.h> |
|
95 #include <bstring.h> |
|
96 #include <sys/time.h> |
|
97 #include <ulocks.h> |
|
98 #include <sys/prctl.h> |
|
99 |
|
100 |
|
101 /* |
|
102 * Data region private to each sproc. This region is setup by calling |
|
103 * mmap(...,MAP_LOCAL,...). The private data is mapped at the same |
|
104 * address in every sproc, but every sproc gets a private mapping. |
|
105 * |
|
106 * Just make sure that this structure fits in a page, as only one page |
|
107 * is allocated for the private region. |
|
108 */ |
|
109 struct sproc_private_data { |
|
110 struct PRThread *me; |
|
111 struct _PRCPU *cpu; |
|
112 struct PRThread *last; |
|
113 PRUintn intsOff; |
|
114 int sproc_pid; |
|
115 }; |
|
116 |
|
117 extern char *_nspr_sproc_private; |
|
118 |
|
119 #define _PR_PRDA() ((struct sproc_private_data *) _nspr_sproc_private) |
|
120 #define _MD_SET_CURRENT_THREAD(_thread) _PR_PRDA()->me = (_thread) |
|
121 #define _MD_THIS_THREAD() (_PR_PRDA()->me) |
|
122 #define _MD_LAST_THREAD() (_PR_PRDA()->last) |
|
123 #define _MD_SET_LAST_THREAD(_thread) _PR_PRDA()->last = (_thread) |
|
124 #define _MD_CURRENT_CPU() (_PR_PRDA()->cpu) |
|
125 #define _MD_SET_CURRENT_CPU(_cpu) _PR_PRDA()->cpu = (_cpu) |
|
126 #define _MD_SET_INTSOFF(_val) (_PR_PRDA()->intsOff = _val) |
|
127 #define _MD_GET_INTSOFF() (_PR_PRDA()->intsOff) |
|
128 |
|
129 #define _MD_SET_SPROC_PID(_val) (_PR_PRDA()->sproc_pid = _val) |
|
130 #define _MD_GET_SPROC_PID() (_PR_PRDA()->sproc_pid) |
|
131 |
|
132 NSPR_API(struct PRThread*) _MD_get_attached_thread(void); |
|
133 NSPR_API(struct PRThread*) _MD_get_current_thread(void); |
|
134 #define _MD_GET_ATTACHED_THREAD() _MD_get_attached_thread() |
|
135 #define _MD_CURRENT_THREAD() _MD_get_current_thread() |
|
136 |
|
137 #define _MD_CHECK_FOR_EXIT() { \ |
|
138 if (_pr_irix_exit_now) { \ |
|
139 _PR_POST_SEM(_pr_irix_exit_sem); \ |
|
140 _MD_Wakeup_CPUs(); \ |
|
141 _exit(0); \ |
|
142 } \ |
|
143 } |
|
144 |
|
145 #define _MD_ATTACH_THREAD(threadp) |
|
146 |
|
147 #define _MD_SAVE_ERRNO(_thread) (_thread)->md.errcode = errno; |
|
148 #define _MD_RESTORE_ERRNO(_thread) errno = (_thread)->md.errcode; |
|
149 |
|
150 extern struct _PRCPU *_pr_primordialCPU; |
|
151 extern usema_t *_pr_irix_exit_sem; |
|
152 extern PRInt32 _pr_irix_exit_now; |
|
153 extern int _pr_irix_primoridal_cpu_fd[]; |
|
154 extern PRInt32 _pr_irix_process_exit; |
|
155 extern PRInt32 _pr_irix_process_exit_code; |
|
156 |
|
157 /* Thread operations */ |
|
158 #define _PR_LOCK_HEAP() { \ |
|
159 PRIntn _is; \ |
|
160 if (_pr_primordialCPU) { \ |
|
161 if (_MD_GET_ATTACHED_THREAD() && \ |
|
162 !_PR_IS_NATIVE_THREAD( \ |
|
163 _MD_GET_ATTACHED_THREAD())) \ |
|
164 _PR_INTSOFF(_is); \ |
|
165 _PR_LOCK(_pr_heapLock); \ |
|
166 } |
|
167 |
|
168 #define _PR_UNLOCK_HEAP() if (_pr_primordialCPU) { \ |
|
169 _PR_UNLOCK(_pr_heapLock); \ |
|
170 if (_MD_GET_ATTACHED_THREAD() && \ |
|
171 !_PR_IS_NATIVE_THREAD( \ |
|
172 _MD_GET_ATTACHED_THREAD())) \ |
|
173 _PR_INTSON(_is); \ |
|
174 } \ |
|
175 } |
|
176 |
|
177 #define _PR_OPEN_POLL_SEM(_sem) usopenpollsema(_sem, 0666) |
|
178 #define _PR_WAIT_SEM(_sem) uspsema(_sem) |
|
179 #define _PR_POST_SEM(_sem) usvsema(_sem) |
|
180 |
|
181 #define _MD_CVAR_POST_SEM(threadp) usvsema((threadp)->md.cvar_pollsem) |
|
182 |
|
183 #define _MD_IOQ_LOCK() |
|
184 #define _MD_IOQ_UNLOCK() |
|
185 |
|
186 struct _MDLock { |
|
187 ulock_t lock; |
|
188 usptr_t *arena; |
|
189 }; |
|
190 |
|
191 /* |
|
192 * disable pre-emption for the LOCAL threads when calling the arena lock |
|
193 * routines |
|
194 */ |
|
195 |
|
196 #define _PR_LOCK(lock) { \ |
|
197 PRIntn _is; \ |
|
198 PRThread *me = _MD_GET_ATTACHED_THREAD(); \ |
|
199 if (me && !_PR_IS_NATIVE_THREAD(me)) \ |
|
200 _PR_INTSOFF(_is); \ |
|
201 ussetlock(lock); \ |
|
202 if (me && !_PR_IS_NATIVE_THREAD(me)) \ |
|
203 _PR_FAST_INTSON(_is); \ |
|
204 } |
|
205 |
|
206 #define _PR_UNLOCK(lock) { \ |
|
207 PRIntn _is; \ |
|
208 PRThread *me = _MD_GET_ATTACHED_THREAD(); \ |
|
209 if (me && !_PR_IS_NATIVE_THREAD(me)) \ |
|
210 _PR_INTSOFF(_is); \ |
|
211 usunsetlock(lock); \ |
|
212 if (me && !_PR_IS_NATIVE_THREAD(me)) \ |
|
213 _PR_FAST_INTSON(_is); \ |
|
214 } |
|
215 |
|
216 NSPR_API(PRStatus) _MD_NEW_LOCK(struct _MDLock *md); |
|
217 NSPR_API(void) _MD_FREE_LOCK(struct _MDLock *lockp); |
|
218 |
|
219 #define _MD_LOCK(_lockp) _PR_LOCK((_lockp)->lock) |
|
220 #define _MD_UNLOCK(_lockp) _PR_UNLOCK((_lockp)->lock) |
|
221 #define _MD_TEST_AND_LOCK(_lockp) (uscsetlock((_lockp)->lock, 1) == 0) |
|
222 |
|
223 extern ulock_t _pr_heapLock; |
|
224 |
|
225 struct _MDThread { |
|
226 jmp_buf jb; |
|
227 usptr_t *pollsem_arena; |
|
228 usema_t *cvar_pollsem; |
|
229 PRInt32 cvar_pollsemfd; |
|
230 PRInt32 cvar_pollsem_select; /* acquire sem by calling select */ |
|
231 PRInt32 cvar_wait; /* if 1, thread is waiting on cvar Q */ |
|
232 PRInt32 id; |
|
233 PRInt32 suspending_id; |
|
234 int errcode; |
|
235 }; |
|
236 |
|
237 struct _MDThreadStack { |
|
238 PRInt8 notused; |
|
239 }; |
|
240 |
|
241 struct _MDSemaphore { |
|
242 usema_t *sem; |
|
243 }; |
|
244 |
|
245 struct _MDCVar { |
|
246 ulock_t mdcvar_lock; |
|
247 }; |
|
248 |
|
249 struct _MDSegment { |
|
250 PRInt8 notused; |
|
251 }; |
|
252 |
|
253 /* |
|
254 * md-specific cpu structure field |
|
255 */ |
|
256 #define _PR_MD_MAX_OSFD FD_SETSIZE |
|
257 |
|
258 struct _MDCPU_Unix { |
|
259 PRCList ioQ; |
|
260 PRUint32 ioq_timeout; |
|
261 PRInt32 ioq_max_osfd; |
|
262 PRInt32 ioq_osfd_cnt; |
|
263 #ifndef _PR_USE_POLL |
|
264 fd_set fd_read_set, fd_write_set, fd_exception_set; |
|
265 PRInt16 fd_read_cnt[_PR_MD_MAX_OSFD],fd_write_cnt[_PR_MD_MAX_OSFD], |
|
266 fd_exception_cnt[_PR_MD_MAX_OSFD]; |
|
267 #else |
|
268 struct pollfd *ioq_pollfds; |
|
269 int ioq_pollfds_size; |
|
270 #endif /* _PR_USE_POLL */ |
|
271 }; |
|
272 |
|
273 #define _PR_IOQ(_cpu) ((_cpu)->md.md_unix.ioQ) |
|
274 #define _PR_ADD_TO_IOQ(_pq, _cpu) PR_APPEND_LINK(&_pq.links, &_PR_IOQ(_cpu)) |
|
275 #define _PR_FD_READ_SET(_cpu) ((_cpu)->md.md_unix.fd_read_set) |
|
276 #define _PR_FD_READ_CNT(_cpu) ((_cpu)->md.md_unix.fd_read_cnt) |
|
277 #define _PR_FD_WRITE_SET(_cpu) ((_cpu)->md.md_unix.fd_write_set) |
|
278 #define _PR_FD_WRITE_CNT(_cpu) ((_cpu)->md.md_unix.fd_write_cnt) |
|
279 #define _PR_FD_EXCEPTION_SET(_cpu) ((_cpu)->md.md_unix.fd_exception_set) |
|
280 #define _PR_FD_EXCEPTION_CNT(_cpu) ((_cpu)->md.md_unix.fd_exception_cnt) |
|
281 #define _PR_IOQ_TIMEOUT(_cpu) ((_cpu)->md.md_unix.ioq_timeout) |
|
282 #define _PR_IOQ_MAX_OSFD(_cpu) ((_cpu)->md.md_unix.ioq_max_osfd) |
|
283 #define _PR_IOQ_OSFD_CNT(_cpu) ((_cpu)->md.md_unix.ioq_osfd_cnt) |
|
284 #define _PR_IOQ_POLLFDS(_cpu) ((_cpu)->md.md_unix.ioq_pollfds) |
|
285 #define _PR_IOQ_POLLFDS_SIZE(_cpu) ((_cpu)->md.md_unix.ioq_pollfds_size) |
|
286 |
|
287 #define _PR_IOQ_MIN_POLLFDS_SIZE(_cpu) 32 |
|
288 |
|
289 |
|
290 struct _MDCPU { |
|
291 PRInt32 id; |
|
292 PRInt32 suspending_id; |
|
293 struct _MDCPU_Unix md_unix; |
|
294 }; |
|
295 |
|
296 /* |
|
297 ** Initialize the thread context preparing it to execute _main. |
|
298 */ |
|
299 #define _MD_INIT_CONTEXT(_thread, _sp, _main, status) \ |
|
300 PR_BEGIN_MACRO \ |
|
301 int *jb = (_thread)->md.jb; \ |
|
302 *status = PR_TRUE; \ |
|
303 (void) setjmp(jb); \ |
|
304 (_thread)->md.jb[JB_SP] = (int) ((_sp) - 64); \ |
|
305 (_thread)->md.jb[JB_PC] = (int) _main; \ |
|
306 _thread->no_sched = 0; \ |
|
307 PR_END_MACRO |
|
308 |
|
309 /* |
|
310 ** Switch away from the current thread context by saving its state and |
|
311 ** calling the thread scheduler. Reload cpu when we come back from the |
|
312 ** context switch because it might have changed. |
|
313 * |
|
314 * XXX RUNQ lock needed before clearing _PR_NO_SCHED flag, because the |
|
315 * thread may be unr RUNQ? |
|
316 */ |
|
317 #define _MD_SWITCH_CONTEXT(_thread) \ |
|
318 PR_BEGIN_MACRO \ |
|
319 PR_ASSERT(_thread->no_sched); \ |
|
320 if (!setjmp(_thread->md.jb)) { \ |
|
321 _MD_SAVE_ERRNO(_thread) \ |
|
322 _MD_SET_LAST_THREAD(_thread); \ |
|
323 _PR_Schedule(); \ |
|
324 } else { \ |
|
325 PR_ASSERT(_MD_LAST_THREAD() !=_MD_CURRENT_THREAD()); \ |
|
326 _MD_LAST_THREAD()->no_sched = 0; \ |
|
327 } \ |
|
328 PR_END_MACRO |
|
329 |
|
330 /* |
|
331 ** Restore a thread context that was saved by _MD_SWITCH_CONTEXT or |
|
332 ** initialized by _MD_INIT_CONTEXT. |
|
333 */ |
|
334 #define _MD_RESTORE_CONTEXT(_newThread) \ |
|
335 PR_BEGIN_MACRO \ |
|
336 int *jb = (_newThread)->md.jb; \ |
|
337 _MD_RESTORE_ERRNO(_newThread) \ |
|
338 _MD_SET_CURRENT_THREAD(_newThread); \ |
|
339 _newThread->no_sched = 1; \ |
|
340 longjmp(jb, 1); \ |
|
341 PR_END_MACRO |
|
342 |
|
343 NSPR_API(PRStatus) _MD_InitThread(struct PRThread *thread, |
|
344 PRBool wakeup_parent); |
|
345 NSPR_API(PRStatus) _MD_InitAttachedThread(struct PRThread *thread, |
|
346 PRBool wakeup_parent); |
|
347 #define _MD_INIT_THREAD(thread) _MD_InitThread(thread, PR_TRUE) |
|
348 #define _MD_INIT_ATTACHED_THREAD(thread) \ |
|
349 _MD_InitAttachedThread(thread, PR_FALSE) |
|
350 |
|
351 NSPR_API(void) _MD_ExitThread(struct PRThread *thread); |
|
352 #define _MD_EXIT_THREAD _MD_ExitThread |
|
353 |
|
354 NSPR_API(void) _MD_SuspendThread(struct PRThread *thread); |
|
355 #define _MD_SUSPEND_THREAD _MD_SuspendThread |
|
356 |
|
357 NSPR_API(void) _MD_ResumeThread(struct PRThread *thread); |
|
358 #define _MD_RESUME_THREAD _MD_ResumeThread |
|
359 |
|
360 NSPR_API(void) _MD_SuspendCPU(struct _PRCPU *thread); |
|
361 #define _MD_SUSPEND_CPU _MD_SuspendCPU |
|
362 |
|
363 NSPR_API(void) _MD_ResumeCPU(struct _PRCPU *thread); |
|
364 #define _MD_RESUME_CPU _MD_ResumeCPU |
|
365 |
|
366 #define _MD_BEGIN_SUSPEND_ALL() |
|
367 #define _MD_END_SUSPEND_ALL() |
|
368 #define _MD_BEGIN_RESUME_ALL() |
|
369 #define _MD_END_RESUME_ALL() |
|
370 |
|
371 NSPR_API(void) _MD_InitLocks(void); |
|
372 #define _MD_INIT_LOCKS _MD_InitLocks |
|
373 |
|
374 NSPR_API(void) _MD_CleanThread(struct PRThread *thread); |
|
375 #define _MD_CLEAN_THREAD _MD_CleanThread |
|
376 |
|
377 #define _MD_YIELD() sginap(0) |
|
378 |
|
379 /* The _PR_MD_WAIT_LOCK and _PR_MD_WAKEUP_WAITER functions put to sleep and |
|
380 * awaken a thread which is waiting on a lock or cvar. |
|
381 */ |
|
382 NSPR_API(PRStatus) _MD_wait(struct PRThread *, PRIntervalTime timeout); |
|
383 #define _MD_WAIT _MD_wait |
|
384 |
|
385 NSPR_API(void) _PR_MD_primordial_cpu(); |
|
386 NSPR_API(void) _PR_MD_WAKEUP_PRIMORDIAL_CPU(); |
|
387 |
|
388 NSPR_API(PRStatus) _MD_WakeupWaiter(struct PRThread *); |
|
389 #define _MD_WAKEUP_WAITER _MD_WakeupWaiter |
|
390 |
|
391 NSPR_API(void ) _MD_exit(PRIntn status); |
|
392 #define _MD_EXIT _MD_exit |
|
393 |
|
394 #include "prthread.h" |
|
395 |
|
396 NSPR_API(void) _MD_SetPriority(struct _MDThread *thread, |
|
397 PRThreadPriority newPri); |
|
398 #define _MD_SET_PRIORITY _MD_SetPriority |
|
399 |
|
400 NSPR_API(PRStatus) _MD_CreateThread( |
|
401 struct PRThread *thread, |
|
402 void (*start) (void *), |
|
403 PRThreadPriority priority, |
|
404 PRThreadScope scope, |
|
405 PRThreadState state, |
|
406 PRUint32 stackSize); |
|
407 #define _MD_CREATE_THREAD _MD_CreateThread |
|
408 |
|
409 extern void _MD_CleanupBeforeExit(void); |
|
410 #define _MD_CLEANUP_BEFORE_EXIT _MD_CleanupBeforeExit |
|
411 |
|
412 NSPR_API(void) _PR_MD_PRE_CLEANUP(PRThread *me); |
|
413 |
|
414 |
|
415 /* The following defines the unwrapped versions of select() and poll(). */ |
|
416 extern int _select(int nfds, fd_set *readfds, fd_set *writefds, |
|
417 fd_set *exceptfds, struct timeval *timeout); |
|
418 #define _MD_SELECT _select |
|
419 |
|
420 #include <stropts.h> |
|
421 #include <poll.h> |
|
422 #define _MD_POLL _poll |
|
423 extern int _poll(struct pollfd *fds, unsigned long nfds, int timeout); |
|
424 |
|
425 |
|
426 #define HAVE_THREAD_AFFINITY 1 |
|
427 |
|
428 NSPR_API(PRInt32) _MD_GetThreadAffinityMask(PRThread *unused, PRUint32 *mask); |
|
429 #define _MD_GETTHREADAFFINITYMASK _MD_GetThreadAffinityMask |
|
430 |
|
431 NSPR_API(void) _MD_InitRunningCPU(struct _PRCPU *cpu); |
|
432 #define _MD_INIT_RUNNING_CPU _MD_InitRunningCPU |
|
433 |
|
434 #endif /* defined(_PR_PTHREADS) */ |
|
435 |
|
436 #endif /* _LANGUAGE_ASSEMBLY */ |
|
437 |
|
438 #endif /* nspr_irix_defs_h___ */ |