netwerk/sctp/src/user_atomic.h

Wed, 31 Dec 2014 06:55:46 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:55:46 +0100
changeset 1
ca08bd8f51b2
permissions
-rwxr-xr-x

Added tag TORBROWSER_REPLICA for changeset 6474c204b198

michael@0 1 /*-
michael@0 2 * Copyright (c) 2009-2010 Brad Penoff
michael@0 3 * Copyright (c) 2009-2010 Humaira Kamal
michael@0 4 * Copyright (c) 2011-2012 Irene Ruengeler
michael@0 5 * Copyright (c) 2011-2012 Michael Tuexen
michael@0 6 *
michael@0 7 * All rights reserved.
michael@0 8 *
michael@0 9 * Redistribution and use in source and binary forms, with or without
michael@0 10 * modification, are permitted provided that the following conditions
michael@0 11 * are met:
michael@0 12 * 1. Redistributions of source code must retain the above copyright
michael@0 13 * notice, this list of conditions and the following disclaimer.
michael@0 14 * 2. Redistributions in binary form must reproduce the above copyright
michael@0 15 * notice, this list of conditions and the following disclaimer in the
michael@0 16 * documentation and/or other materials provided with the distribution.
michael@0 17 *
michael@0 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
michael@0 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
michael@0 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
michael@0 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
michael@0 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
michael@0 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
michael@0 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
michael@0 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
michael@0 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
michael@0 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
michael@0 28 * SUCH DAMAGE.
michael@0 29 */
michael@0 30
michael@0 31 #ifndef _USER_ATOMIC_H_
michael@0 32 #define _USER_ATOMIC_H_
michael@0 33
michael@0 34 /* __Userspace__ version of sys/i386/include/atomic.h goes here */
michael@0 35
michael@0 36 /* TODO In the future, might want to not use i386 specific assembly.
michael@0 37 * The options include:
michael@0 38 * - implement them generically (but maybe not truly atomic?) in userspace
michael@0 39 * - have ifdef's for __Userspace_arch_ perhaps (OS isn't enough...)
michael@0 40 */
michael@0 41
michael@0 42 #include <stdio.h>
michael@0 43 #include <sys/types.h>
michael@0 44
michael@0 45 #if defined(__Userspace_os_Darwin) || defined (__Userspace_os_Windows)
michael@0 46 #if defined (__Userspace_os_Windows)
michael@0 47 #define atomic_add_int(addr, val) InterlockedExchangeAdd((LPLONG)addr, (LONG)val)
michael@0 48 #define atomic_fetchadd_int(addr, val) InterlockedExchangeAdd((LPLONG)addr, (LONG)val)
michael@0 49 #define atomic_subtract_int(addr, val) InterlockedExchangeAdd((LPLONG)addr,-((LONG)val))
michael@0 50 #define atomic_cmpset_int(dst, exp, src) InterlockedCompareExchange((LPLONG)dst, src, exp)
michael@0 51 #define SCTP_DECREMENT_AND_CHECK_REFCOUNT(addr) (InterlockedExchangeAdd((LPLONG)addr, (-1L)) == 1)
michael@0 52 #else
michael@0 53 #include <libkern/OSAtomic.h>
michael@0 54 #define atomic_add_int(addr, val) OSAtomicAdd32Barrier(val, (int32_t *)addr)
michael@0 55 #define atomic_fetchadd_int(addr, val) OSAtomicAdd32Barrier(val, (int32_t *)addr)
michael@0 56 #define atomic_subtract_int(addr, val) OSAtomicAdd32Barrier(-val, (int32_t *)addr)
michael@0 57 #define atomic_cmpset_int(dst, exp, src) OSAtomicCompareAndSwapIntBarrier(exp, src, (int *)dst)
michael@0 58 #define SCTP_DECREMENT_AND_CHECK_REFCOUNT(addr) (atomic_fetchadd_int(addr, -1) == 0)
michael@0 59 #endif
michael@0 60
michael@0 61 #if defined(INVARIANTS)
michael@0 62 #define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \
michael@0 63 { \
michael@0 64 int32_t newval; \
michael@0 65 newval = atomic_fetchadd_int(addr, -val); \
michael@0 66 if (newval < 0) { \
michael@0 67 panic("Counter goes negative"); \
michael@0 68 } \
michael@0 69 }
michael@0 70 #else
michael@0 71 #define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \
michael@0 72 { \
michael@0 73 int32_t newval; \
michael@0 74 newval = atomic_fetchadd_int(addr, -val); \
michael@0 75 if (newval < 0) { \
michael@0 76 *addr = 0; \
michael@0 77 } \
michael@0 78 }
michael@0 79 #if defined(__Userspace_os_Windows)
michael@0 80 static void atomic_init() {} /* empty when we are not using atomic_mtx */
michael@0 81 #else
michael@0 82 static inline void atomic_init() {} /* empty when we are not using atomic_mtx */
michael@0 83 #endif
michael@0 84 #endif
michael@0 85
michael@0 86 #else
michael@0 87 /* Using gcc built-in functions for atomic memory operations
michael@0 88 Reference: http://gcc.gnu.org/onlinedocs/gcc-4.1.0/gcc/Atomic-Builtins.html
michael@0 89 Requires gcc version 4.1.0
michael@0 90 compile with -march=i486
michael@0 91 */
michael@0 92
michael@0 93 /*Atomically add V to *P.*/
michael@0 94 #define atomic_add_int(P, V) (void) __sync_fetch_and_add(P, V)
michael@0 95
michael@0 96 /*Atomically subtrace V from *P.*/
michael@0 97 #define atomic_subtract_int(P, V) (void) __sync_fetch_and_sub(P, V)
michael@0 98
michael@0 99 /*
michael@0 100 * Atomically add the value of v to the integer pointed to by p and return
michael@0 101 * the previous value of *p.
michael@0 102 */
michael@0 103 #define atomic_fetchadd_int(p, v) __sync_fetch_and_add(p, v)
michael@0 104
michael@0 105 /* Following explanation from src/sys/i386/include/atomic.h,
michael@0 106 * for atomic compare and set
michael@0 107 *
michael@0 108 * if (*dst == exp) *dst = src (all 32 bit words)
michael@0 109 *
michael@0 110 * Returns 0 on failure, non-zero on success
michael@0 111 */
michael@0 112
michael@0 113 #define atomic_cmpset_int(dst, exp, src) __sync_bool_compare_and_swap(dst, exp, src)
michael@0 114
michael@0 115 #define SCTP_DECREMENT_AND_CHECK_REFCOUNT(addr) (atomic_fetchadd_int(addr, -1) == 1)
michael@0 116 #if defined(INVARIANTS)
michael@0 117 #define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \
michael@0 118 { \
michael@0 119 int32_t oldval; \
michael@0 120 oldval = atomic_fetchadd_int(addr, -val); \
michael@0 121 if (oldval < val) { \
michael@0 122 panic("Counter goes negative"); \
michael@0 123 } \
michael@0 124 }
michael@0 125 #else
michael@0 126 #define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \
michael@0 127 { \
michael@0 128 int32_t oldval; \
michael@0 129 oldval = atomic_fetchadd_int(addr, -val); \
michael@0 130 if (oldval < val) { \
michael@0 131 *addr = 0; \
michael@0 132 } \
michael@0 133 }
michael@0 134 #endif
michael@0 135 static inline void atomic_init() {} /* empty when we are not using atomic_mtx */
michael@0 136 #endif
michael@0 137
michael@0 138 #if 0 /* using libatomic_ops */
michael@0 139 #include "user_include/atomic_ops.h"
michael@0 140
michael@0 141 /*Atomically add incr to *P, and return the original value of *P.*/
michael@0 142 #define atomic_add_int(P, V) AO_fetch_and_add((AO_t*)P, V)
michael@0 143
michael@0 144 #define atomic_subtract_int(P, V) AO_fetch_and_add((AO_t*)P, -(V))
michael@0 145
michael@0 146 /*
michael@0 147 * Atomically add the value of v to the integer pointed to by p and return
michael@0 148 * the previous value of *p.
michael@0 149 */
michael@0 150 #define atomic_fetchadd_int(p, v) AO_fetch_and_add((AO_t*)p, v)
michael@0 151
michael@0 152 /* Atomically compare *addr to old_val, and replace *addr by new_val
michael@0 153 if the first comparison succeeds. Returns nonzero if the comparison
michael@0 154 succeeded and *addr was updated.
michael@0 155 */
michael@0 156 /* Following Explanation from src/sys/i386/include/atomic.h, which
michael@0 157 matches that of AO_compare_and_swap above.
michael@0 158 * Atomic compare and set, used by the mutex functions
michael@0 159 *
michael@0 160 * if (*dst == exp) *dst = src (all 32 bit words)
michael@0 161 *
michael@0 162 * Returns 0 on failure, non-zero on success
michael@0 163 */
michael@0 164
michael@0 165 #define atomic_cmpset_int(dst, exp, src) AO_compare_and_swap((AO_t*)dst, exp, src)
michael@0 166
michael@0 167 static inline void atomic_init() {} /* empty when we are not using atomic_mtx */
michael@0 168 #endif /* closing #if for libatomic */
michael@0 169
michael@0 170 #if 0 /* using atomic_mtx */
michael@0 171
michael@0 172 #include <pthread.h>
michael@0 173
michael@0 174 extern userland_mutex_t atomic_mtx;
michael@0 175
michael@0 176 #if defined (__Userspace_os_Windows)
michael@0 177 static inline void atomic_init() {
michael@0 178 InitializeCriticalSection(&atomic_mtx);
michael@0 179 }
michael@0 180 static inline void atomic_destroy() {
michael@0 181 DeleteCriticalSection(&atomic_mtx);
michael@0 182 }
michael@0 183 static inline void atomic_lock() {
michael@0 184 EnterCriticalSection(&atomic_mtx);
michael@0 185 }
michael@0 186 static inline void atomic_unlock() {
michael@0 187 LeaveCriticalSection(&atomic_mtx);
michael@0 188 }
michael@0 189 #else
michael@0 190 static inline void atomic_init() {
michael@0 191 (void)pthread_mutex_init(&atomic_mtx, NULL);
michael@0 192 }
michael@0 193 static inline void atomic_destroy() {
michael@0 194 (void)pthread_mutex_destroy(&atomic_mtx);
michael@0 195 }
michael@0 196 static inline void atomic_lock() {
michael@0 197 (void)pthread_mutex_lock(&atomic_mtx);
michael@0 198 }
michael@0 199 static inline void atomic_unlock() {
michael@0 200 (void)pthread_mutex_unlock(&atomic_mtx);
michael@0 201 }
michael@0 202 #endif
michael@0 203 /*
michael@0 204 * For userland, always use lock prefixes so that the binaries will run
michael@0 205 * on both SMP and !SMP systems.
michael@0 206 */
michael@0 207
michael@0 208 #define MPLOCKED "lock ; "
michael@0 209
michael@0 210 /*
michael@0 211 * Atomically add the value of v to the integer pointed to by p and return
michael@0 212 * the previous value of *p.
michael@0 213 */
michael@0 214 static __inline u_int
michael@0 215 atomic_fetchadd_int(volatile void *n, u_int v)
michael@0 216 {
michael@0 217 int *p = (int *) n;
michael@0 218 atomic_lock();
michael@0 219 __asm __volatile(
michael@0 220 " " MPLOCKED " "
michael@0 221 " xaddl %0, %1 ; "
michael@0 222 "# atomic_fetchadd_int"
michael@0 223 : "+r" (v), /* 0 (result) */
michael@0 224 "=m" (*p) /* 1 */
michael@0 225 : "m" (*p)); /* 2 */
michael@0 226 atomic_unlock();
michael@0 227
michael@0 228 return (v);
michael@0 229 }
michael@0 230
michael@0 231
michael@0 232 #ifdef CPU_DISABLE_CMPXCHG
michael@0 233
michael@0 234 static __inline int
michael@0 235 atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
michael@0 236 {
michael@0 237 u_char res;
michael@0 238
michael@0 239 atomic_lock();
michael@0 240 __asm __volatile(
michael@0 241 " pushfl ; "
michael@0 242 " cli ; "
michael@0 243 " cmpl %3,%4 ; "
michael@0 244 " jne 1f ; "
michael@0 245 " movl %2,%1 ; "
michael@0 246 "1: "
michael@0 247 " sete %0 ; "
michael@0 248 " popfl ; "
michael@0 249 "# atomic_cmpset_int"
michael@0 250 : "=q" (res), /* 0 */
michael@0 251 "=m" (*dst) /* 1 */
michael@0 252 : "r" (src), /* 2 */
michael@0 253 "r" (exp), /* 3 */
michael@0 254 "m" (*dst) /* 4 */
michael@0 255 : "memory");
michael@0 256 atomic_unlock();
michael@0 257
michael@0 258 return (res);
michael@0 259 }
michael@0 260
michael@0 261 #else /* !CPU_DISABLE_CMPXCHG */
michael@0 262
michael@0 263 static __inline int
michael@0 264 atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
michael@0 265 {
michael@0 266 atomic_lock();
michael@0 267 u_char res;
michael@0 268
michael@0 269 __asm __volatile(
michael@0 270 " " MPLOCKED " "
michael@0 271 " cmpxchgl %2,%1 ; "
michael@0 272 " sete %0 ; "
michael@0 273 "1: "
michael@0 274 "# atomic_cmpset_int"
michael@0 275 : "=a" (res), /* 0 */
michael@0 276 "=m" (*dst) /* 1 */
michael@0 277 : "r" (src), /* 2 */
michael@0 278 "a" (exp), /* 3 */
michael@0 279 "m" (*dst) /* 4 */
michael@0 280 : "memory");
michael@0 281 atomic_unlock();
michael@0 282
michael@0 283 return (res);
michael@0 284 }
michael@0 285
michael@0 286 #endif /* CPU_DISABLE_CMPXCHG */
michael@0 287
michael@0 288 #define atomic_add_int(P, V) do { \
michael@0 289 atomic_lock(); \
michael@0 290 (*(u_int *)(P) += (V)); \
michael@0 291 atomic_unlock(); \
michael@0 292 } while(0)
michael@0 293 #define atomic_subtract_int(P, V) do { \
michael@0 294 atomic_lock(); \
michael@0 295 (*(u_int *)(P) -= (V)); \
michael@0 296 atomic_unlock(); \
michael@0 297 } while(0)
michael@0 298
michael@0 299 #endif
michael@0 300 #endif

mercurial