Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
michael@0 | 2 | /* This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 3 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 4 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 5 | |
michael@0 | 6 | #include "primpl.h" |
michael@0 | 7 | |
michael@0 | 8 | /* List of free stack virtual memory chunks */ |
michael@0 | 9 | PRLock *_pr_stackLock; |
michael@0 | 10 | PRCList _pr_freeStacks = PR_INIT_STATIC_CLIST(&_pr_freeStacks); |
michael@0 | 11 | PRIntn _pr_numFreeStacks; |
michael@0 | 12 | PRIntn _pr_maxFreeStacks = 4; |
michael@0 | 13 | |
michael@0 | 14 | #ifdef DEBUG |
michael@0 | 15 | /* |
michael@0 | 16 | ** A variable that can be set via the debugger... |
michael@0 | 17 | */ |
michael@0 | 18 | PRBool _pr_debugStacks = PR_FALSE; |
michael@0 | 19 | #endif |
michael@0 | 20 | |
michael@0 | 21 | /* How much space to leave between the stacks, at each end */ |
michael@0 | 22 | #define REDZONE (2 << _pr_pageShift) |
michael@0 | 23 | |
michael@0 | 24 | #define _PR_THREAD_STACK_PTR(_qp) \ |
michael@0 | 25 | ((PRThreadStack*) ((char*) (_qp) - offsetof(PRThreadStack,links))) |
michael@0 | 26 | |
michael@0 | 27 | void _PR_InitStacks(void) |
michael@0 | 28 | { |
michael@0 | 29 | _pr_stackLock = PR_NewLock(); |
michael@0 | 30 | } |
michael@0 | 31 | |
michael@0 | 32 | void _PR_CleanupStacks(void) |
michael@0 | 33 | { |
michael@0 | 34 | if (_pr_stackLock) { |
michael@0 | 35 | PR_DestroyLock(_pr_stackLock); |
michael@0 | 36 | _pr_stackLock = NULL; |
michael@0 | 37 | } |
michael@0 | 38 | } |
michael@0 | 39 | |
michael@0 | 40 | /* |
michael@0 | 41 | ** Allocate a stack for a thread. |
michael@0 | 42 | */ |
michael@0 | 43 | PRThreadStack *_PR_NewStack(PRUint32 stackSize) |
michael@0 | 44 | { |
michael@0 | 45 | PRCList *qp; |
michael@0 | 46 | PRThreadStack *ts; |
michael@0 | 47 | PRThread *thr; |
michael@0 | 48 | |
michael@0 | 49 | /* |
michael@0 | 50 | ** Trim the list of free stacks. Trim it backwards, tossing out the |
michael@0 | 51 | ** oldest stack found first (this way more recent stacks have a |
michael@0 | 52 | ** chance of being present in the data cache). |
michael@0 | 53 | */ |
michael@0 | 54 | PR_Lock(_pr_stackLock); |
michael@0 | 55 | qp = _pr_freeStacks.prev; |
michael@0 | 56 | while ((_pr_numFreeStacks > _pr_maxFreeStacks) && (qp != &_pr_freeStacks)) { |
michael@0 | 57 | ts = _PR_THREAD_STACK_PTR(qp); |
michael@0 | 58 | thr = _PR_THREAD_STACK_TO_PTR(ts); |
michael@0 | 59 | qp = qp->prev; |
michael@0 | 60 | /* |
michael@0 | 61 | * skip stacks which are still being used |
michael@0 | 62 | */ |
michael@0 | 63 | if (thr->no_sched) |
michael@0 | 64 | continue; |
michael@0 | 65 | PR_REMOVE_LINK(&ts->links); |
michael@0 | 66 | |
michael@0 | 67 | /* Give platform OS to clear out the stack for debugging */ |
michael@0 | 68 | _PR_MD_CLEAR_STACK(ts); |
michael@0 | 69 | |
michael@0 | 70 | _pr_numFreeStacks--; |
michael@0 | 71 | _PR_DestroySegment(ts->seg); |
michael@0 | 72 | PR_DELETE(ts); |
michael@0 | 73 | } |
michael@0 | 74 | |
michael@0 | 75 | /* |
michael@0 | 76 | ** Find a free thread stack. This searches the list of free'd up |
michael@0 | 77 | ** virtually mapped thread stacks. |
michael@0 | 78 | */ |
michael@0 | 79 | qp = _pr_freeStacks.next; |
michael@0 | 80 | ts = 0; |
michael@0 | 81 | while (qp != &_pr_freeStacks) { |
michael@0 | 82 | ts = _PR_THREAD_STACK_PTR(qp); |
michael@0 | 83 | thr = _PR_THREAD_STACK_TO_PTR(ts); |
michael@0 | 84 | qp = qp->next; |
michael@0 | 85 | /* |
michael@0 | 86 | * skip stacks which are still being used |
michael@0 | 87 | */ |
michael@0 | 88 | if ((!(thr->no_sched)) && ((ts->allocSize - 2*REDZONE) >= stackSize)) { |
michael@0 | 89 | /* |
michael@0 | 90 | ** Found a stack that is not in use and is big enough. Change |
michael@0 | 91 | ** stackSize to fit it. |
michael@0 | 92 | */ |
michael@0 | 93 | stackSize = ts->allocSize - 2*REDZONE; |
michael@0 | 94 | PR_REMOVE_LINK(&ts->links); |
michael@0 | 95 | _pr_numFreeStacks--; |
michael@0 | 96 | ts->links.next = 0; |
michael@0 | 97 | ts->links.prev = 0; |
michael@0 | 98 | PR_Unlock(_pr_stackLock); |
michael@0 | 99 | goto done; |
michael@0 | 100 | } |
michael@0 | 101 | ts = 0; |
michael@0 | 102 | } |
michael@0 | 103 | PR_Unlock(_pr_stackLock); |
michael@0 | 104 | |
michael@0 | 105 | if (!ts) { |
michael@0 | 106 | /* Make a new thread stack object. */ |
michael@0 | 107 | ts = PR_NEWZAP(PRThreadStack); |
michael@0 | 108 | if (!ts) { |
michael@0 | 109 | return NULL; |
michael@0 | 110 | } |
michael@0 | 111 | |
michael@0 | 112 | /* |
michael@0 | 113 | ** Assign some of the virtual space to the new stack object. We |
michael@0 | 114 | ** may not get that piece of VM, but if nothing else we will |
michael@0 | 115 | ** advance the pointer so we don't collide (unless the OS screws |
michael@0 | 116 | ** up). |
michael@0 | 117 | */ |
michael@0 | 118 | ts->allocSize = stackSize + 2*REDZONE; |
michael@0 | 119 | ts->seg = _PR_NewSegment(ts->allocSize, 0); |
michael@0 | 120 | if (!ts->seg) { |
michael@0 | 121 | PR_DELETE(ts); |
michael@0 | 122 | return NULL; |
michael@0 | 123 | } |
michael@0 | 124 | } |
michael@0 | 125 | |
michael@0 | 126 | done: |
michael@0 | 127 | ts->allocBase = (char*)ts->seg->vaddr; |
michael@0 | 128 | ts->flags = _PR_STACK_MAPPED; |
michael@0 | 129 | ts->stackSize = stackSize; |
michael@0 | 130 | |
michael@0 | 131 | #ifdef HAVE_STACK_GROWING_UP |
michael@0 | 132 | ts->stackTop = ts->allocBase + REDZONE; |
michael@0 | 133 | ts->stackBottom = ts->stackTop + stackSize; |
michael@0 | 134 | #else |
michael@0 | 135 | ts->stackBottom = ts->allocBase + REDZONE; |
michael@0 | 136 | ts->stackTop = ts->stackBottom + stackSize; |
michael@0 | 137 | #endif |
michael@0 | 138 | |
michael@0 | 139 | PR_LOG(_pr_thread_lm, PR_LOG_NOTICE, |
michael@0 | 140 | ("thread stack: base=0x%x limit=0x%x bottom=0x%x top=0x%x\n", |
michael@0 | 141 | ts->allocBase, ts->allocBase + ts->allocSize - 1, |
michael@0 | 142 | ts->allocBase + REDZONE, |
michael@0 | 143 | ts->allocBase + REDZONE + stackSize - 1)); |
michael@0 | 144 | |
michael@0 | 145 | _PR_MD_INIT_STACK(ts,REDZONE); |
michael@0 | 146 | |
michael@0 | 147 | return ts; |
michael@0 | 148 | } |
michael@0 | 149 | |
michael@0 | 150 | /* |
michael@0 | 151 | ** Free the stack for the current thread |
michael@0 | 152 | */ |
michael@0 | 153 | void _PR_FreeStack(PRThreadStack *ts) |
michael@0 | 154 | { |
michael@0 | 155 | if (!ts) { |
michael@0 | 156 | return; |
michael@0 | 157 | } |
michael@0 | 158 | if (ts->flags & _PR_STACK_PRIMORDIAL) { |
michael@0 | 159 | PR_DELETE(ts); |
michael@0 | 160 | return; |
michael@0 | 161 | } |
michael@0 | 162 | |
michael@0 | 163 | /* |
michael@0 | 164 | ** Put the stack on the free list. This is done because we are still |
michael@0 | 165 | ** using the stack. Next time a thread is created we will trim the |
michael@0 | 166 | ** list down; it's safe to do it then because we will have had to |
michael@0 | 167 | ** context switch to a live stack before another thread can be |
michael@0 | 168 | ** created. |
michael@0 | 169 | */ |
michael@0 | 170 | PR_Lock(_pr_stackLock); |
michael@0 | 171 | PR_APPEND_LINK(&ts->links, _pr_freeStacks.prev); |
michael@0 | 172 | _pr_numFreeStacks++; |
michael@0 | 173 | PR_Unlock(_pr_stackLock); |
michael@0 | 174 | } |