nsprpub/pr/src/bthreads/btcvar.c

Wed, 31 Dec 2014 06:55:46 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:55:46 +0100
changeset 1
ca08bd8f51b2
permissions
-rw-r--r--

Added tag TORBROWSER_REPLICA for changeset 6474c204b198

michael@0 1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
michael@0 2 /* This Source Code Form is subject to the terms of the Mozilla Public
michael@0 3 * License, v. 2.0. If a copy of the MPL was not distributed with this
michael@0 4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
michael@0 5
michael@0 6 #include <kernel/OS.h>
michael@0 7
michael@0 8 #include "primpl.h"
michael@0 9
michael@0 10 /*
michael@0 11 ** Create a new condition variable.
michael@0 12 **
michael@0 13 ** "lock" is the lock used to protect the condition variable.
michael@0 14 **
michael@0 15 ** Condition variables are synchronization objects that threads can use
michael@0 16 ** to wait for some condition to occur.
michael@0 17 **
michael@0 18 ** This may fail if memory is tight or if some operating system resource
michael@0 19 ** is low. In such cases, a NULL will be returned.
michael@0 20 */
michael@0 21 PR_IMPLEMENT(PRCondVar*)
michael@0 22 PR_NewCondVar (PRLock *lock)
michael@0 23 {
michael@0 24 PRCondVar *cv = PR_NEW( PRCondVar );
michael@0 25 PR_ASSERT( NULL != lock );
michael@0 26 if( NULL != cv )
michael@0 27 {
michael@0 28 cv->lock = lock;
michael@0 29 cv->sem = create_sem(0, "CVSem");
michael@0 30 cv->handshakeSem = create_sem(0, "CVHandshake");
michael@0 31 cv->signalSem = create_sem( 0, "CVSignal");
michael@0 32 cv->signalBenCount = 0;
michael@0 33 cv->ns = cv->nw = 0;
michael@0 34 PR_ASSERT( cv->sem >= B_NO_ERROR );
michael@0 35 PR_ASSERT( cv->handshakeSem >= B_NO_ERROR );
michael@0 36 PR_ASSERT( cv->signalSem >= B_NO_ERROR );
michael@0 37 }
michael@0 38 return cv;
michael@0 39 } /* PR_NewCondVar */
michael@0 40
michael@0 41 /*
michael@0 42 ** Destroy a condition variable. There must be no thread
michael@0 43 ** waiting on the condvar. The caller is responsible for guaranteeing
michael@0 44 ** that the condvar is no longer in use.
michael@0 45 **
michael@0 46 */
michael@0 47 PR_IMPLEMENT(void)
michael@0 48 PR_DestroyCondVar (PRCondVar *cvar)
michael@0 49 {
michael@0 50 status_t result = delete_sem( cvar->sem );
michael@0 51 PR_ASSERT( result == B_NO_ERROR );
michael@0 52
michael@0 53 result = delete_sem( cvar->handshakeSem );
michael@0 54 PR_ASSERT( result == B_NO_ERROR );
michael@0 55
michael@0 56 result = delete_sem( cvar->signalSem );
michael@0 57 PR_ASSERT( result == B_NO_ERROR );
michael@0 58
michael@0 59 PR_DELETE( cvar );
michael@0 60 }
michael@0 61
michael@0 62 /*
michael@0 63 ** The thread that waits on a condition is blocked in a "waiting on
michael@0 64 ** condition" state until another thread notifies the condition or a
michael@0 65 ** caller specified amount of time expires. The lock associated with
michael@0 66 ** the condition variable will be released, which must have be held
michael@0 67 ** prior to the call to wait.
michael@0 68 **
michael@0 69 ** Logically a notified thread is moved from the "waiting on condition"
michael@0 70 ** state and made "ready." When scheduled, it will attempt to reacquire
michael@0 71 ** the lock that it held when wait was called.
michael@0 72 **
michael@0 73 ** The timeout has two well known values, PR_INTERVAL_NO_TIMEOUT and
michael@0 74 ** PR_INTERVAL_NO_WAIT. The former value requires that a condition be
michael@0 75 ** notified (or the thread interrupted) before it will resume from the
michael@0 76 ** wait. If the timeout has a value of PR_INTERVAL_NO_WAIT, the effect
michael@0 77 ** is to release the lock, possibly causing a rescheduling within the
michael@0 78 ** runtime, then immediately attempting to reacquire the lock and resume.
michael@0 79 **
michael@0 80 ** Any other value for timeout will cause the thread to be rescheduled
michael@0 81 ** either due to explicit notification or an expired interval. The latter
michael@0 82 ** must be determined by treating time as one part of the monitored data
michael@0 83 ** being protected by the lock and tested explicitly for an expired
michael@0 84 ** interval.
michael@0 85 **
michael@0 86 ** Returns PR_FAILURE if the caller has not locked the lock associated
michael@0 87 ** with the condition variable or the thread was interrupted (PR_Interrupt()).
michael@0 88 ** The particular reason can be extracted with PR_GetError().
michael@0 89 */
michael@0 90 PR_IMPLEMENT(PRStatus)
michael@0 91 PR_WaitCondVar (PRCondVar *cvar, PRIntervalTime timeout)
michael@0 92 {
michael@0 93 status_t err;
michael@0 94 if( timeout == PR_INTERVAL_NO_WAIT )
michael@0 95 {
michael@0 96 PR_Unlock( cvar->lock );
michael@0 97 PR_Lock( cvar->lock );
michael@0 98 return PR_SUCCESS;
michael@0 99 }
michael@0 100
michael@0 101 if( atomic_add( &cvar->signalBenCount, 1 ) > 0 )
michael@0 102 {
michael@0 103 if (acquire_sem(cvar->signalSem) == B_INTERRUPTED)
michael@0 104 {
michael@0 105 atomic_add( &cvar->signalBenCount, -1 );
michael@0 106 return PR_FAILURE;
michael@0 107 }
michael@0 108 }
michael@0 109 cvar->nw += 1;
michael@0 110 if( atomic_add( &cvar->signalBenCount, -1 ) > 1 )
michael@0 111 {
michael@0 112 release_sem_etc(cvar->signalSem, 1, B_DO_NOT_RESCHEDULE);
michael@0 113 }
michael@0 114
michael@0 115 PR_Unlock( cvar->lock );
michael@0 116 if( timeout==PR_INTERVAL_NO_TIMEOUT )
michael@0 117 {
michael@0 118 err = acquire_sem(cvar->sem);
michael@0 119 }
michael@0 120 else
michael@0 121 {
michael@0 122 err = acquire_sem_etc(cvar->sem, 1, B_RELATIVE_TIMEOUT, PR_IntervalToMicroseconds(timeout) );
michael@0 123 }
michael@0 124
michael@0 125 if( atomic_add( &cvar->signalBenCount, 1 ) > 0 )
michael@0 126 {
michael@0 127 while (acquire_sem(cvar->signalSem) == B_INTERRUPTED);
michael@0 128 }
michael@0 129
michael@0 130 if (cvar->ns > 0)
michael@0 131 {
michael@0 132 release_sem_etc(cvar->handshakeSem, 1, B_DO_NOT_RESCHEDULE);
michael@0 133 cvar->ns -= 1;
michael@0 134 }
michael@0 135 cvar->nw -= 1;
michael@0 136 if( atomic_add( &cvar->signalBenCount, -1 ) > 1 )
michael@0 137 {
michael@0 138 release_sem_etc(cvar->signalSem, 1, B_DO_NOT_RESCHEDULE);
michael@0 139 }
michael@0 140
michael@0 141 PR_Lock( cvar->lock );
michael@0 142 if(err!=B_NO_ERROR)
michael@0 143 {
michael@0 144 return PR_FAILURE;
michael@0 145 }
michael@0 146 return PR_SUCCESS;
michael@0 147 }
michael@0 148
michael@0 149 /*
michael@0 150 ** Notify ONE thread that is currently waiting on 'cvar'. Which thread is
michael@0 151 ** dependent on the implementation of the runtime. Common sense would dictate
michael@0 152 ** that all threads waiting on a single condition have identical semantics,
michael@0 153 ** therefore which one gets notified is not significant.
michael@0 154 **
michael@0 155 ** The calling thead must hold the lock that protects the condition, as
michael@0 156 ** well as the invariants that are tightly bound to the condition, when
michael@0 157 ** notify is called.
michael@0 158 **
michael@0 159 ** Returns PR_FAILURE if the caller has not locked the lock associated
michael@0 160 ** with the condition variable.
michael@0 161 */
michael@0 162 PR_IMPLEMENT(PRStatus)
michael@0 163 PR_NotifyCondVar (PRCondVar *cvar)
michael@0 164 {
michael@0 165 status_t err ;
michael@0 166 if( atomic_add( &cvar->signalBenCount, 1 ) > 0 )
michael@0 167 {
michael@0 168 if (acquire_sem(cvar->signalSem) == B_INTERRUPTED)
michael@0 169 {
michael@0 170 atomic_add( &cvar->signalBenCount, -1 );
michael@0 171 return PR_FAILURE;
michael@0 172 }
michael@0 173 }
michael@0 174 if (cvar->nw > cvar->ns)
michael@0 175 {
michael@0 176 cvar->ns += 1;
michael@0 177 release_sem_etc(cvar->sem, 1, B_DO_NOT_RESCHEDULE);
michael@0 178 if( atomic_add( &cvar->signalBenCount, -1 ) > 1 )
michael@0 179 {
michael@0 180 release_sem_etc(cvar->signalSem, 1, B_DO_NOT_RESCHEDULE);
michael@0 181 }
michael@0 182
michael@0 183 while (acquire_sem(cvar->handshakeSem) == B_INTERRUPTED)
michael@0 184 {
michael@0 185 err = B_INTERRUPTED;
michael@0 186 }
michael@0 187 }
michael@0 188 else
michael@0 189 {
michael@0 190 if( atomic_add( &cvar->signalBenCount, -1 ) > 1 )
michael@0 191 {
michael@0 192 release_sem_etc(cvar->signalSem, 1, B_DO_NOT_RESCHEDULE);
michael@0 193 }
michael@0 194 }
michael@0 195 return PR_SUCCESS;
michael@0 196 }
michael@0 197
michael@0 198 /*
michael@0 199 ** Notify all of the threads waiting on the condition variable. The order
michael@0 200 ** that the threads are notified is indeterminant. The lock that protects
michael@0 201 ** the condition must be held.
michael@0 202 **
michael@0 203 ** Returns PR_FAILURE if the caller has not locked the lock associated
michael@0 204 ** with the condition variable.
michael@0 205 */
michael@0 206 PR_IMPLEMENT(PRStatus)
michael@0 207 PR_NotifyAllCondVar (PRCondVar *cvar)
michael@0 208 {
michael@0 209 int32 handshakes;
michael@0 210 status_t err = B_OK;
michael@0 211
michael@0 212 if( atomic_add( &cvar->signalBenCount, 1 ) > 0 )
michael@0 213 {
michael@0 214 if (acquire_sem(cvar->signalSem) == B_INTERRUPTED)
michael@0 215 {
michael@0 216 atomic_add( &cvar->signalBenCount, -1 );
michael@0 217 return PR_FAILURE;
michael@0 218 }
michael@0 219 }
michael@0 220
michael@0 221 if (cvar->nw > cvar->ns)
michael@0 222 {
michael@0 223 handshakes = cvar->nw - cvar->ns;
michael@0 224 cvar->ns = cvar->nw;
michael@0 225 release_sem_etc(cvar->sem, handshakes, B_DO_NOT_RESCHEDULE);
michael@0 226 if( atomic_add( &cvar->signalBenCount, -1 ) > 1 )
michael@0 227 {
michael@0 228 release_sem_etc(cvar->signalSem, 1, B_DO_NOT_RESCHEDULE);
michael@0 229 }
michael@0 230
michael@0 231 while (acquire_sem_etc(cvar->handshakeSem, handshakes, 0, 0) == B_INTERRUPTED)
michael@0 232 {
michael@0 233 err = B_INTERRUPTED;
michael@0 234 }
michael@0 235 }
michael@0 236 else
michael@0 237 {
michael@0 238 if( atomic_add( &cvar->signalBenCount, -1 ) > 1 )
michael@0 239 {
michael@0 240 release_sem_etc(cvar->signalSem, 1, B_DO_NOT_RESCHEDULE);
michael@0 241 }
michael@0 242 }
michael@0 243 return PR_SUCCESS;
michael@0 244 }

mercurial