Wed, 31 Dec 2014 06:55:46 +0100
Added tag TORBROWSER_REPLICA for changeset 6474c204b198
michael@0 | 1 | /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
michael@0 | 2 | /* This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 3 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 4 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 5 | |
michael@0 | 6 | #include "primpl.h" |
michael@0 | 7 | |
michael@0 | 8 | _PRCPU *_pr_primordialCPU = NULL; |
michael@0 | 9 | |
michael@0 | 10 | PRInt32 _pr_md_idle_cpus; /* number of idle cpus */ |
michael@0 | 11 | /* |
michael@0 | 12 | * The idle threads in MxN models increment/decrement _pr_md_idle_cpus. |
michael@0 | 13 | * If _PR_HAVE_ATOMIC_OPS is not defined, they can't use the atomic |
michael@0 | 14 | * increment/decrement routines (which are based on PR_Lock/PR_Unlock), |
michael@0 | 15 | * because PR_Lock asserts that the calling thread is not an idle thread. |
michael@0 | 16 | * So we use a _MDLock to protect _pr_md_idle_cpus. |
michael@0 | 17 | */ |
michael@0 | 18 | #if !defined(_PR_LOCAL_THREADS_ONLY) && !defined(_PR_GLOBAL_THREADS_ONLY) |
michael@0 | 19 | #ifndef _PR_HAVE_ATOMIC_OPS |
michael@0 | 20 | static _MDLock _pr_md_idle_cpus_lock; |
michael@0 | 21 | #endif |
michael@0 | 22 | #endif |
michael@0 | 23 | PRUintn _pr_numCPU; |
michael@0 | 24 | PRInt32 _pr_cpus_exit; |
michael@0 | 25 | PRUint32 _pr_cpu_affinity_mask = 0; |
michael@0 | 26 | |
michael@0 | 27 | #if !defined (_PR_GLOBAL_THREADS_ONLY) |
michael@0 | 28 | |
michael@0 | 29 | static PRUintn _pr_cpuID; |
michael@0 | 30 | |
michael@0 | 31 | static void PR_CALLBACK _PR_CPU_Idle(void *); |
michael@0 | 32 | |
michael@0 | 33 | static _PRCPU *_PR_CreateCPU(void); |
michael@0 | 34 | static PRStatus _PR_StartCPU(_PRCPU *cpu, PRThread *thread); |
michael@0 | 35 | |
michael@0 | 36 | #if !defined(_PR_LOCAL_THREADS_ONLY) |
michael@0 | 37 | static void _PR_RunCPU(void *arg); |
michael@0 | 38 | #endif |
michael@0 | 39 | |
michael@0 | 40 | void _PR_InitCPUs() |
michael@0 | 41 | { |
michael@0 | 42 | PRThread *me = _PR_MD_CURRENT_THREAD(); |
michael@0 | 43 | |
michael@0 | 44 | if (_native_threads_only) |
michael@0 | 45 | return; |
michael@0 | 46 | |
michael@0 | 47 | _pr_cpuID = 0; |
michael@0 | 48 | _MD_NEW_LOCK( &_pr_cpuLock); |
michael@0 | 49 | #if !defined(_PR_LOCAL_THREADS_ONLY) && !defined(_PR_GLOBAL_THREADS_ONLY) |
michael@0 | 50 | #ifndef _PR_HAVE_ATOMIC_OPS |
michael@0 | 51 | _MD_NEW_LOCK(&_pr_md_idle_cpus_lock); |
michael@0 | 52 | #endif |
michael@0 | 53 | #endif |
michael@0 | 54 | |
michael@0 | 55 | #ifdef _PR_LOCAL_THREADS_ONLY |
michael@0 | 56 | |
michael@0 | 57 | #ifdef HAVE_CUSTOM_USER_THREADS |
michael@0 | 58 | _PR_MD_CREATE_PRIMORDIAL_USER_THREAD(me); |
michael@0 | 59 | #endif |
michael@0 | 60 | |
michael@0 | 61 | /* Now start the first CPU. */ |
michael@0 | 62 | _pr_primordialCPU = _PR_CreateCPU(); |
michael@0 | 63 | _pr_numCPU = 1; |
michael@0 | 64 | _PR_StartCPU(_pr_primordialCPU, me); |
michael@0 | 65 | |
michael@0 | 66 | _PR_MD_SET_CURRENT_CPU(_pr_primordialCPU); |
michael@0 | 67 | |
michael@0 | 68 | /* Initialize cpu for current thread (could be different from me) */ |
michael@0 | 69 | _PR_MD_CURRENT_THREAD()->cpu = _pr_primordialCPU; |
michael@0 | 70 | |
michael@0 | 71 | _PR_MD_SET_LAST_THREAD(me); |
michael@0 | 72 | |
michael@0 | 73 | #else /* Combined MxN model */ |
michael@0 | 74 | |
michael@0 | 75 | _pr_primordialCPU = _PR_CreateCPU(); |
michael@0 | 76 | _pr_numCPU = 1; |
michael@0 | 77 | _PR_CreateThread(PR_SYSTEM_THREAD, |
michael@0 | 78 | _PR_RunCPU, |
michael@0 | 79 | _pr_primordialCPU, |
michael@0 | 80 | PR_PRIORITY_NORMAL, |
michael@0 | 81 | PR_GLOBAL_THREAD, |
michael@0 | 82 | PR_UNJOINABLE_THREAD, |
michael@0 | 83 | 0, |
michael@0 | 84 | _PR_IDLE_THREAD); |
michael@0 | 85 | |
michael@0 | 86 | #endif /* _PR_LOCAL_THREADS_ONLY */ |
michael@0 | 87 | |
michael@0 | 88 | _PR_MD_INIT_CPUS(); |
michael@0 | 89 | } |
michael@0 | 90 | |
michael@0 | 91 | #ifdef WINNT |
michael@0 | 92 | /* |
michael@0 | 93 | * Right now this function merely stops the CPUs and does |
michael@0 | 94 | * not do any other cleanup. |
michael@0 | 95 | * |
michael@0 | 96 | * It is only implemented for WINNT because bug 161998 only |
michael@0 | 97 | * affects the WINNT version of NSPR, but it would be nice |
michael@0 | 98 | * to implement this function for other platforms too. |
michael@0 | 99 | */ |
michael@0 | 100 | void _PR_CleanupCPUs(void) |
michael@0 | 101 | { |
michael@0 | 102 | PRUintn i; |
michael@0 | 103 | PRCList *qp; |
michael@0 | 104 | _PRCPU *cpu; |
michael@0 | 105 | |
michael@0 | 106 | _pr_cpus_exit = 1; |
michael@0 | 107 | for (i = 0; i < _pr_numCPU; i++) { |
michael@0 | 108 | _PR_MD_WAKEUP_WAITER(NULL); |
michael@0 | 109 | } |
michael@0 | 110 | for (qp = _PR_CPUQ().next; qp != &_PR_CPUQ(); qp = qp->next) { |
michael@0 | 111 | cpu = _PR_CPU_PTR(qp); |
michael@0 | 112 | _PR_MD_JOIN_THREAD(&cpu->thread->md); |
michael@0 | 113 | } |
michael@0 | 114 | } |
michael@0 | 115 | #endif |
michael@0 | 116 | |
michael@0 | 117 | static _PRCPUQueue *_PR_CreateCPUQueue(void) |
michael@0 | 118 | { |
michael@0 | 119 | PRInt32 index; |
michael@0 | 120 | _PRCPUQueue *cpuQueue; |
michael@0 | 121 | cpuQueue = PR_NEWZAP(_PRCPUQueue); |
michael@0 | 122 | |
michael@0 | 123 | _MD_NEW_LOCK( &cpuQueue->runQLock ); |
michael@0 | 124 | _MD_NEW_LOCK( &cpuQueue->sleepQLock ); |
michael@0 | 125 | _MD_NEW_LOCK( &cpuQueue->miscQLock ); |
michael@0 | 126 | |
michael@0 | 127 | for (index = 0; index < PR_PRIORITY_LAST + 1; index++) |
michael@0 | 128 | PR_INIT_CLIST( &(cpuQueue->runQ[index]) ); |
michael@0 | 129 | PR_INIT_CLIST( &(cpuQueue->sleepQ) ); |
michael@0 | 130 | PR_INIT_CLIST( &(cpuQueue->pauseQ) ); |
michael@0 | 131 | PR_INIT_CLIST( &(cpuQueue->suspendQ) ); |
michael@0 | 132 | PR_INIT_CLIST( &(cpuQueue->waitingToJoinQ) ); |
michael@0 | 133 | |
michael@0 | 134 | cpuQueue->numCPUs = 1; |
michael@0 | 135 | |
michael@0 | 136 | return cpuQueue; |
michael@0 | 137 | } |
michael@0 | 138 | |
michael@0 | 139 | /* |
michael@0 | 140 | * Create a new CPU. |
michael@0 | 141 | * |
michael@0 | 142 | * This function initializes enough of the _PRCPU structure so |
michael@0 | 143 | * that it can be accessed safely by a global thread or another |
michael@0 | 144 | * CPU. This function does not create the native thread that |
michael@0 | 145 | * will run the CPU nor does it initialize the parts of _PRCPU |
michael@0 | 146 | * that must be initialized by that native thread. |
michael@0 | 147 | * |
michael@0 | 148 | * The reason we cannot simply have the native thread create |
michael@0 | 149 | * and fully initialize a new CPU is that we need to be able to |
michael@0 | 150 | * create a usable _pr_primordialCPU in _PR_InitCPUs without |
michael@0 | 151 | * assuming that the primordial CPU thread we created can run |
michael@0 | 152 | * during NSPR initialization. For example, on Windows while |
michael@0 | 153 | * new threads can be created by DllMain, they won't be able |
michael@0 | 154 | * to run during DLL initialization. If NSPR is initialized |
michael@0 | 155 | * by DllMain, the primordial CPU thread won't run until DLL |
michael@0 | 156 | * initialization is finished. |
michael@0 | 157 | */ |
michael@0 | 158 | static _PRCPU *_PR_CreateCPU(void) |
michael@0 | 159 | { |
michael@0 | 160 | _PRCPU *cpu; |
michael@0 | 161 | |
michael@0 | 162 | cpu = PR_NEWZAP(_PRCPU); |
michael@0 | 163 | if (cpu) { |
michael@0 | 164 | cpu->queue = _PR_CreateCPUQueue(); |
michael@0 | 165 | if (!cpu->queue) { |
michael@0 | 166 | PR_DELETE(cpu); |
michael@0 | 167 | return NULL; |
michael@0 | 168 | } |
michael@0 | 169 | } |
michael@0 | 170 | return cpu; |
michael@0 | 171 | } |
michael@0 | 172 | |
michael@0 | 173 | /* |
michael@0 | 174 | * Start a new CPU. |
michael@0 | 175 | * |
michael@0 | 176 | * 'cpu' is a _PRCPU structure created by _PR_CreateCPU(). |
michael@0 | 177 | * 'thread' is the native thread that will run the CPU. |
michael@0 | 178 | * |
michael@0 | 179 | * If this function fails, 'cpu' is destroyed. |
michael@0 | 180 | */ |
michael@0 | 181 | static PRStatus _PR_StartCPU(_PRCPU *cpu, PRThread *thread) |
michael@0 | 182 | { |
michael@0 | 183 | /* |
michael@0 | 184 | ** Start a new cpu. The assumption this code makes is that the |
michael@0 | 185 | ** underlying operating system creates a stack to go with the new |
michael@0 | 186 | ** native thread. That stack will be used by the cpu when pausing. |
michael@0 | 187 | */ |
michael@0 | 188 | |
michael@0 | 189 | PR_ASSERT(!_native_threads_only); |
michael@0 | 190 | |
michael@0 | 191 | cpu->last_clock = PR_IntervalNow(); |
michael@0 | 192 | |
michael@0 | 193 | /* Before we create any threads on this CPU we have to |
michael@0 | 194 | * set the current CPU |
michael@0 | 195 | */ |
michael@0 | 196 | _PR_MD_SET_CURRENT_CPU(cpu); |
michael@0 | 197 | _PR_MD_INIT_RUNNING_CPU(cpu); |
michael@0 | 198 | thread->cpu = cpu; |
michael@0 | 199 | |
michael@0 | 200 | cpu->idle_thread = _PR_CreateThread(PR_SYSTEM_THREAD, |
michael@0 | 201 | _PR_CPU_Idle, |
michael@0 | 202 | (void *)cpu, |
michael@0 | 203 | PR_PRIORITY_NORMAL, |
michael@0 | 204 | PR_LOCAL_THREAD, |
michael@0 | 205 | PR_UNJOINABLE_THREAD, |
michael@0 | 206 | 0, |
michael@0 | 207 | _PR_IDLE_THREAD); |
michael@0 | 208 | |
michael@0 | 209 | if (!cpu->idle_thread) { |
michael@0 | 210 | /* didn't clean up CPU queue XXXMB */ |
michael@0 | 211 | PR_DELETE(cpu); |
michael@0 | 212 | return PR_FAILURE; |
michael@0 | 213 | } |
michael@0 | 214 | PR_ASSERT(cpu->idle_thread->cpu == cpu); |
michael@0 | 215 | |
michael@0 | 216 | cpu->idle_thread->no_sched = 0; |
michael@0 | 217 | |
michael@0 | 218 | cpu->thread = thread; |
michael@0 | 219 | |
michael@0 | 220 | if (_pr_cpu_affinity_mask) |
michael@0 | 221 | PR_SetThreadAffinityMask(thread, _pr_cpu_affinity_mask); |
michael@0 | 222 | |
michael@0 | 223 | /* Created and started a new CPU */ |
michael@0 | 224 | _PR_CPU_LIST_LOCK(); |
michael@0 | 225 | cpu->id = _pr_cpuID++; |
michael@0 | 226 | PR_APPEND_LINK(&cpu->links, &_PR_CPUQ()); |
michael@0 | 227 | _PR_CPU_LIST_UNLOCK(); |
michael@0 | 228 | |
michael@0 | 229 | return PR_SUCCESS; |
michael@0 | 230 | } |
michael@0 | 231 | |
michael@0 | 232 | #if !defined(_PR_GLOBAL_THREADS_ONLY) && !defined(_PR_LOCAL_THREADS_ONLY) |
michael@0 | 233 | /* |
michael@0 | 234 | ** This code is used during a cpu's initial creation. |
michael@0 | 235 | */ |
michael@0 | 236 | static void _PR_RunCPU(void *arg) |
michael@0 | 237 | { |
michael@0 | 238 | _PRCPU *cpu = (_PRCPU *)arg; |
michael@0 | 239 | PRThread *me = _PR_MD_CURRENT_THREAD(); |
michael@0 | 240 | |
michael@0 | 241 | PR_ASSERT(NULL != me); |
michael@0 | 242 | |
michael@0 | 243 | /* |
michael@0 | 244 | * _PR_StartCPU calls _PR_CreateThread to create the |
michael@0 | 245 | * idle thread. Because _PR_CreateThread calls PR_Lock, |
michael@0 | 246 | * the current thread has to remain a global thread |
michael@0 | 247 | * during the _PR_StartCPU call so that it can wait for |
michael@0 | 248 | * the lock if the lock is held by another thread. If |
michael@0 | 249 | * we clear the _PR_GLOBAL_SCOPE flag in |
michael@0 | 250 | * _PR_MD_CREATE_PRIMORDIAL_THREAD, the current thread |
michael@0 | 251 | * will be treated as a local thread and have trouble |
michael@0 | 252 | * waiting for the lock because the CPU is not fully |
michael@0 | 253 | * constructed yet. |
michael@0 | 254 | * |
michael@0 | 255 | * After the CPU is started, it is safe to mark the |
michael@0 | 256 | * current thread as a local thread. |
michael@0 | 257 | */ |
michael@0 | 258 | |
michael@0 | 259 | #ifdef HAVE_CUSTOM_USER_THREADS |
michael@0 | 260 | _PR_MD_CREATE_PRIMORDIAL_USER_THREAD(me); |
michael@0 | 261 | #endif |
michael@0 | 262 | |
michael@0 | 263 | me->no_sched = 1; |
michael@0 | 264 | _PR_StartCPU(cpu, me); |
michael@0 | 265 | |
michael@0 | 266 | #ifdef HAVE_CUSTOM_USER_THREADS |
michael@0 | 267 | me->flags &= (~_PR_GLOBAL_SCOPE); |
michael@0 | 268 | #endif |
michael@0 | 269 | |
michael@0 | 270 | _PR_MD_SET_CURRENT_CPU(cpu); |
michael@0 | 271 | _PR_MD_SET_CURRENT_THREAD(cpu->thread); |
michael@0 | 272 | me->cpu = cpu; |
michael@0 | 273 | |
michael@0 | 274 | while(1) { |
michael@0 | 275 | PRInt32 is; |
michael@0 | 276 | if (!_PR_IS_NATIVE_THREAD(me)) _PR_INTSOFF(is); |
michael@0 | 277 | _PR_MD_START_INTERRUPTS(); |
michael@0 | 278 | _PR_MD_SWITCH_CONTEXT(me); |
michael@0 | 279 | } |
michael@0 | 280 | } |
michael@0 | 281 | #endif |
michael@0 | 282 | |
michael@0 | 283 | static void PR_CALLBACK _PR_CPU_Idle(void *_cpu) |
michael@0 | 284 | { |
michael@0 | 285 | _PRCPU *cpu = (_PRCPU *)_cpu; |
michael@0 | 286 | PRThread *me = _PR_MD_CURRENT_THREAD(); |
michael@0 | 287 | |
michael@0 | 288 | PR_ASSERT(NULL != me); |
michael@0 | 289 | |
michael@0 | 290 | me->cpu = cpu; |
michael@0 | 291 | cpu->idle_thread = me; |
michael@0 | 292 | if (_MD_LAST_THREAD()) |
michael@0 | 293 | _MD_LAST_THREAD()->no_sched = 0; |
michael@0 | 294 | if (!_PR_IS_NATIVE_THREAD(me)) _PR_MD_SET_INTSOFF(0); |
michael@0 | 295 | while(1) { |
michael@0 | 296 | PRInt32 is; |
michael@0 | 297 | PRIntervalTime timeout; |
michael@0 | 298 | if (!_PR_IS_NATIVE_THREAD(me)) _PR_INTSOFF(is); |
michael@0 | 299 | |
michael@0 | 300 | _PR_RUNQ_LOCK(cpu); |
michael@0 | 301 | #if !defined(_PR_LOCAL_THREADS_ONLY) && !defined(_PR_GLOBAL_THREADS_ONLY) |
michael@0 | 302 | #ifdef _PR_HAVE_ATOMIC_OPS |
michael@0 | 303 | _PR_MD_ATOMIC_INCREMENT(&_pr_md_idle_cpus); |
michael@0 | 304 | #else |
michael@0 | 305 | _PR_MD_LOCK(&_pr_md_idle_cpus_lock); |
michael@0 | 306 | _pr_md_idle_cpus++; |
michael@0 | 307 | _PR_MD_UNLOCK(&_pr_md_idle_cpus_lock); |
michael@0 | 308 | #endif /* _PR_HAVE_ATOMIC_OPS */ |
michael@0 | 309 | #endif |
michael@0 | 310 | /* If someone on runq; do a nonblocking PAUSECPU */ |
michael@0 | 311 | if (_PR_RUNQREADYMASK(me->cpu) != 0) { |
michael@0 | 312 | _PR_RUNQ_UNLOCK(cpu); |
michael@0 | 313 | timeout = PR_INTERVAL_NO_WAIT; |
michael@0 | 314 | } else { |
michael@0 | 315 | _PR_RUNQ_UNLOCK(cpu); |
michael@0 | 316 | |
michael@0 | 317 | _PR_SLEEPQ_LOCK(cpu); |
michael@0 | 318 | if (PR_CLIST_IS_EMPTY(&_PR_SLEEPQ(me->cpu))) { |
michael@0 | 319 | timeout = PR_INTERVAL_NO_TIMEOUT; |
michael@0 | 320 | } else { |
michael@0 | 321 | PRThread *wakeThread; |
michael@0 | 322 | wakeThread = _PR_THREAD_PTR(_PR_SLEEPQ(me->cpu).next); |
michael@0 | 323 | timeout = wakeThread->sleep; |
michael@0 | 324 | } |
michael@0 | 325 | _PR_SLEEPQ_UNLOCK(cpu); |
michael@0 | 326 | } |
michael@0 | 327 | |
michael@0 | 328 | /* Wait for an IO to complete */ |
michael@0 | 329 | (void)_PR_MD_PAUSE_CPU(timeout); |
michael@0 | 330 | |
michael@0 | 331 | #ifdef WINNT |
michael@0 | 332 | if (_pr_cpus_exit) { |
michael@0 | 333 | /* _PR_CleanupCPUs tells us to exit */ |
michael@0 | 334 | _PR_MD_END_THREAD(); |
michael@0 | 335 | } |
michael@0 | 336 | #endif |
michael@0 | 337 | |
michael@0 | 338 | #if !defined(_PR_LOCAL_THREADS_ONLY) && !defined(_PR_GLOBAL_THREADS_ONLY) |
michael@0 | 339 | #ifdef _PR_HAVE_ATOMIC_OPS |
michael@0 | 340 | _PR_MD_ATOMIC_DECREMENT(&_pr_md_idle_cpus); |
michael@0 | 341 | #else |
michael@0 | 342 | _PR_MD_LOCK(&_pr_md_idle_cpus_lock); |
michael@0 | 343 | _pr_md_idle_cpus--; |
michael@0 | 344 | _PR_MD_UNLOCK(&_pr_md_idle_cpus_lock); |
michael@0 | 345 | #endif /* _PR_HAVE_ATOMIC_OPS */ |
michael@0 | 346 | #endif |
michael@0 | 347 | |
michael@0 | 348 | _PR_ClockInterrupt(); |
michael@0 | 349 | |
michael@0 | 350 | /* Now schedule any thread that is on the runq |
michael@0 | 351 | * INTS must be OFF when calling PR_Schedule() |
michael@0 | 352 | */ |
michael@0 | 353 | me->state = _PR_RUNNABLE; |
michael@0 | 354 | _PR_MD_SWITCH_CONTEXT(me); |
michael@0 | 355 | if (!_PR_IS_NATIVE_THREAD(me)) _PR_FAST_INTSON(is); |
michael@0 | 356 | } |
michael@0 | 357 | } |
michael@0 | 358 | #endif /* _PR_GLOBAL_THREADS_ONLY */ |
michael@0 | 359 | |
michael@0 | 360 | PR_IMPLEMENT(void) PR_SetConcurrency(PRUintn numCPUs) |
michael@0 | 361 | { |
michael@0 | 362 | #if defined(_PR_GLOBAL_THREADS_ONLY) || defined(_PR_LOCAL_THREADS_ONLY) |
michael@0 | 363 | |
michael@0 | 364 | /* do nothing */ |
michael@0 | 365 | |
michael@0 | 366 | #else /* combined, MxN thread model */ |
michael@0 | 367 | |
michael@0 | 368 | PRUintn newCPU; |
michael@0 | 369 | _PRCPU *cpu; |
michael@0 | 370 | PRThread *thr; |
michael@0 | 371 | |
michael@0 | 372 | |
michael@0 | 373 | if (!_pr_initialized) _PR_ImplicitInitialization(); |
michael@0 | 374 | |
michael@0 | 375 | if (_native_threads_only) |
michael@0 | 376 | return; |
michael@0 | 377 | |
michael@0 | 378 | _PR_CPU_LIST_LOCK(); |
michael@0 | 379 | if (_pr_numCPU < numCPUs) { |
michael@0 | 380 | newCPU = numCPUs - _pr_numCPU; |
michael@0 | 381 | _pr_numCPU = numCPUs; |
michael@0 | 382 | } else newCPU = 0; |
michael@0 | 383 | _PR_CPU_LIST_UNLOCK(); |
michael@0 | 384 | |
michael@0 | 385 | for (; newCPU; newCPU--) { |
michael@0 | 386 | cpu = _PR_CreateCPU(); |
michael@0 | 387 | thr = _PR_CreateThread(PR_SYSTEM_THREAD, |
michael@0 | 388 | _PR_RunCPU, |
michael@0 | 389 | cpu, |
michael@0 | 390 | PR_PRIORITY_NORMAL, |
michael@0 | 391 | PR_GLOBAL_THREAD, |
michael@0 | 392 | PR_UNJOINABLE_THREAD, |
michael@0 | 393 | 0, |
michael@0 | 394 | _PR_IDLE_THREAD); |
michael@0 | 395 | } |
michael@0 | 396 | #endif |
michael@0 | 397 | } |
michael@0 | 398 | |
michael@0 | 399 | PR_IMPLEMENT(_PRCPU *) _PR_GetPrimordialCPU(void) |
michael@0 | 400 | { |
michael@0 | 401 | if (_pr_primordialCPU) |
michael@0 | 402 | return _pr_primordialCPU; |
michael@0 | 403 | else |
michael@0 | 404 | return _PR_MD_CURRENT_CPU(); |
michael@0 | 405 | } |