michael@0: /*- michael@0: * Copyright (c) 2009-2010 Brad Penoff michael@0: * Copyright (c) 2009-2010 Humaira Kamal michael@0: * Copyright (c) 2011-2012 Irene Ruengeler michael@0: * Copyright (c) 2011-2012 Michael Tuexen michael@0: * michael@0: * All rights reserved. michael@0: * michael@0: * Redistribution and use in source and binary forms, with or without michael@0: * modification, are permitted provided that the following conditions michael@0: * are met: michael@0: * 1. Redistributions of source code must retain the above copyright michael@0: * notice, this list of conditions and the following disclaimer. michael@0: * 2. Redistributions in binary form must reproduce the above copyright michael@0: * notice, this list of conditions and the following disclaimer in the michael@0: * documentation and/or other materials provided with the distribution. michael@0: * michael@0: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND michael@0: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE michael@0: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE michael@0: * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE michael@0: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL michael@0: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS michael@0: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) michael@0: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT michael@0: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY michael@0: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF michael@0: * SUCH DAMAGE. michael@0: */ michael@0: michael@0: #ifndef _USER_ATOMIC_H_ michael@0: #define _USER_ATOMIC_H_ michael@0: michael@0: /* __Userspace__ version of sys/i386/include/atomic.h goes here */ michael@0: michael@0: /* TODO In the future, might want to not use i386 specific assembly. michael@0: * The options include: michael@0: * - implement them generically (but maybe not truly atomic?) in userspace michael@0: * - have ifdef's for __Userspace_arch_ perhaps (OS isn't enough...) michael@0: */ michael@0: michael@0: #include michael@0: #include michael@0: michael@0: #if defined(__Userspace_os_Darwin) || defined (__Userspace_os_Windows) michael@0: #if defined (__Userspace_os_Windows) michael@0: #define atomic_add_int(addr, val) InterlockedExchangeAdd((LPLONG)addr, (LONG)val) michael@0: #define atomic_fetchadd_int(addr, val) InterlockedExchangeAdd((LPLONG)addr, (LONG)val) michael@0: #define atomic_subtract_int(addr, val) InterlockedExchangeAdd((LPLONG)addr,-((LONG)val)) michael@0: #define atomic_cmpset_int(dst, exp, src) InterlockedCompareExchange((LPLONG)dst, src, exp) michael@0: #define SCTP_DECREMENT_AND_CHECK_REFCOUNT(addr) (InterlockedExchangeAdd((LPLONG)addr, (-1L)) == 1) michael@0: #else michael@0: #include michael@0: #define atomic_add_int(addr, val) OSAtomicAdd32Barrier(val, (int32_t *)addr) michael@0: #define atomic_fetchadd_int(addr, val) OSAtomicAdd32Barrier(val, (int32_t *)addr) michael@0: #define atomic_subtract_int(addr, val) OSAtomicAdd32Barrier(-val, (int32_t *)addr) michael@0: #define atomic_cmpset_int(dst, exp, src) OSAtomicCompareAndSwapIntBarrier(exp, src, (int *)dst) michael@0: #define SCTP_DECREMENT_AND_CHECK_REFCOUNT(addr) (atomic_fetchadd_int(addr, -1) == 0) michael@0: #endif michael@0: michael@0: #if defined(INVARIANTS) michael@0: #define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \ michael@0: { \ michael@0: int32_t newval; \ michael@0: newval = atomic_fetchadd_int(addr, -val); \ michael@0: if (newval < 0) { \ michael@0: panic("Counter goes negative"); \ michael@0: } \ michael@0: } michael@0: #else michael@0: #define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \ michael@0: { \ michael@0: int32_t newval; \ michael@0: newval = atomic_fetchadd_int(addr, -val); \ michael@0: if (newval < 0) { \ michael@0: *addr = 0; \ michael@0: } \ michael@0: } michael@0: #if defined(__Userspace_os_Windows) michael@0: static void atomic_init() {} /* empty when we are not using atomic_mtx */ michael@0: #else michael@0: static inline void atomic_init() {} /* empty when we are not using atomic_mtx */ michael@0: #endif michael@0: #endif michael@0: michael@0: #else michael@0: /* Using gcc built-in functions for atomic memory operations michael@0: Reference: http://gcc.gnu.org/onlinedocs/gcc-4.1.0/gcc/Atomic-Builtins.html michael@0: Requires gcc version 4.1.0 michael@0: compile with -march=i486 michael@0: */ michael@0: michael@0: /*Atomically add V to *P.*/ michael@0: #define atomic_add_int(P, V) (void) __sync_fetch_and_add(P, V) michael@0: michael@0: /*Atomically subtrace V from *P.*/ michael@0: #define atomic_subtract_int(P, V) (void) __sync_fetch_and_sub(P, V) michael@0: michael@0: /* michael@0: * Atomically add the value of v to the integer pointed to by p and return michael@0: * the previous value of *p. michael@0: */ michael@0: #define atomic_fetchadd_int(p, v) __sync_fetch_and_add(p, v) michael@0: michael@0: /* Following explanation from src/sys/i386/include/atomic.h, michael@0: * for atomic compare and set michael@0: * michael@0: * if (*dst == exp) *dst = src (all 32 bit words) michael@0: * michael@0: * Returns 0 on failure, non-zero on success michael@0: */ michael@0: michael@0: #define atomic_cmpset_int(dst, exp, src) __sync_bool_compare_and_swap(dst, exp, src) michael@0: michael@0: #define SCTP_DECREMENT_AND_CHECK_REFCOUNT(addr) (atomic_fetchadd_int(addr, -1) == 1) michael@0: #if defined(INVARIANTS) michael@0: #define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \ michael@0: { \ michael@0: int32_t oldval; \ michael@0: oldval = atomic_fetchadd_int(addr, -val); \ michael@0: if (oldval < val) { \ michael@0: panic("Counter goes negative"); \ michael@0: } \ michael@0: } michael@0: #else michael@0: #define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \ michael@0: { \ michael@0: int32_t oldval; \ michael@0: oldval = atomic_fetchadd_int(addr, -val); \ michael@0: if (oldval < val) { \ michael@0: *addr = 0; \ michael@0: } \ michael@0: } michael@0: #endif michael@0: static inline void atomic_init() {} /* empty when we are not using atomic_mtx */ michael@0: #endif michael@0: michael@0: #if 0 /* using libatomic_ops */ michael@0: #include "user_include/atomic_ops.h" michael@0: michael@0: /*Atomically add incr to *P, and return the original value of *P.*/ michael@0: #define atomic_add_int(P, V) AO_fetch_and_add((AO_t*)P, V) michael@0: michael@0: #define atomic_subtract_int(P, V) AO_fetch_and_add((AO_t*)P, -(V)) michael@0: michael@0: /* michael@0: * Atomically add the value of v to the integer pointed to by p and return michael@0: * the previous value of *p. michael@0: */ michael@0: #define atomic_fetchadd_int(p, v) AO_fetch_and_add((AO_t*)p, v) michael@0: michael@0: /* Atomically compare *addr to old_val, and replace *addr by new_val michael@0: if the first comparison succeeds. Returns nonzero if the comparison michael@0: succeeded and *addr was updated. michael@0: */ michael@0: /* Following Explanation from src/sys/i386/include/atomic.h, which michael@0: matches that of AO_compare_and_swap above. michael@0: * Atomic compare and set, used by the mutex functions michael@0: * michael@0: * if (*dst == exp) *dst = src (all 32 bit words) michael@0: * michael@0: * Returns 0 on failure, non-zero on success michael@0: */ michael@0: michael@0: #define atomic_cmpset_int(dst, exp, src) AO_compare_and_swap((AO_t*)dst, exp, src) michael@0: michael@0: static inline void atomic_init() {} /* empty when we are not using atomic_mtx */ michael@0: #endif /* closing #if for libatomic */ michael@0: michael@0: #if 0 /* using atomic_mtx */ michael@0: michael@0: #include michael@0: michael@0: extern userland_mutex_t atomic_mtx; michael@0: michael@0: #if defined (__Userspace_os_Windows) michael@0: static inline void atomic_init() { michael@0: InitializeCriticalSection(&atomic_mtx); michael@0: } michael@0: static inline void atomic_destroy() { michael@0: DeleteCriticalSection(&atomic_mtx); michael@0: } michael@0: static inline void atomic_lock() { michael@0: EnterCriticalSection(&atomic_mtx); michael@0: } michael@0: static inline void atomic_unlock() { michael@0: LeaveCriticalSection(&atomic_mtx); michael@0: } michael@0: #else michael@0: static inline void atomic_init() { michael@0: (void)pthread_mutex_init(&atomic_mtx, NULL); michael@0: } michael@0: static inline void atomic_destroy() { michael@0: (void)pthread_mutex_destroy(&atomic_mtx); michael@0: } michael@0: static inline void atomic_lock() { michael@0: (void)pthread_mutex_lock(&atomic_mtx); michael@0: } michael@0: static inline void atomic_unlock() { michael@0: (void)pthread_mutex_unlock(&atomic_mtx); michael@0: } michael@0: #endif michael@0: /* michael@0: * For userland, always use lock prefixes so that the binaries will run michael@0: * on both SMP and !SMP systems. michael@0: */ michael@0: michael@0: #define MPLOCKED "lock ; " michael@0: michael@0: /* michael@0: * Atomically add the value of v to the integer pointed to by p and return michael@0: * the previous value of *p. michael@0: */ michael@0: static __inline u_int michael@0: atomic_fetchadd_int(volatile void *n, u_int v) michael@0: { michael@0: int *p = (int *) n; michael@0: atomic_lock(); michael@0: __asm __volatile( michael@0: " " MPLOCKED " " michael@0: " xaddl %0, %1 ; " michael@0: "# atomic_fetchadd_int" michael@0: : "+r" (v), /* 0 (result) */ michael@0: "=m" (*p) /* 1 */ michael@0: : "m" (*p)); /* 2 */ michael@0: atomic_unlock(); michael@0: michael@0: return (v); michael@0: } michael@0: michael@0: michael@0: #ifdef CPU_DISABLE_CMPXCHG michael@0: michael@0: static __inline int michael@0: atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src) michael@0: { michael@0: u_char res; michael@0: michael@0: atomic_lock(); michael@0: __asm __volatile( michael@0: " pushfl ; " michael@0: " cli ; " michael@0: " cmpl %3,%4 ; " michael@0: " jne 1f ; " michael@0: " movl %2,%1 ; " michael@0: "1: " michael@0: " sete %0 ; " michael@0: " popfl ; " michael@0: "# atomic_cmpset_int" michael@0: : "=q" (res), /* 0 */ michael@0: "=m" (*dst) /* 1 */ michael@0: : "r" (src), /* 2 */ michael@0: "r" (exp), /* 3 */ michael@0: "m" (*dst) /* 4 */ michael@0: : "memory"); michael@0: atomic_unlock(); michael@0: michael@0: return (res); michael@0: } michael@0: michael@0: #else /* !CPU_DISABLE_CMPXCHG */ michael@0: michael@0: static __inline int michael@0: atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src) michael@0: { michael@0: atomic_lock(); michael@0: u_char res; michael@0: michael@0: __asm __volatile( michael@0: " " MPLOCKED " " michael@0: " cmpxchgl %2,%1 ; " michael@0: " sete %0 ; " michael@0: "1: " michael@0: "# atomic_cmpset_int" michael@0: : "=a" (res), /* 0 */ michael@0: "=m" (*dst) /* 1 */ michael@0: : "r" (src), /* 2 */ michael@0: "a" (exp), /* 3 */ michael@0: "m" (*dst) /* 4 */ michael@0: : "memory"); michael@0: atomic_unlock(); michael@0: michael@0: return (res); michael@0: } michael@0: michael@0: #endif /* CPU_DISABLE_CMPXCHG */ michael@0: michael@0: #define atomic_add_int(P, V) do { \ michael@0: atomic_lock(); \ michael@0: (*(u_int *)(P) += (V)); \ michael@0: atomic_unlock(); \ michael@0: } while(0) michael@0: #define atomic_subtract_int(P, V) do { \ michael@0: atomic_lock(); \ michael@0: (*(u_int *)(P) -= (V)); \ michael@0: atomic_unlock(); \ michael@0: } while(0) michael@0: michael@0: #endif michael@0: #endif