netwerk/sctp/src/user_atomic.h

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/netwerk/sctp/src/user_atomic.h	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,300 @@
     1.4 +/*-
     1.5 + * Copyright (c) 2009-2010 Brad Penoff
     1.6 + * Copyright (c) 2009-2010 Humaira Kamal
     1.7 + * Copyright (c) 2011-2012 Irene Ruengeler
     1.8 + * Copyright (c) 2011-2012 Michael Tuexen
     1.9 + *
    1.10 + * All rights reserved.
    1.11 + *
    1.12 + * Redistribution and use in source and binary forms, with or without
    1.13 + * modification, are permitted provided that the following conditions
    1.14 + * are met:
    1.15 + * 1. Redistributions of source code must retain the above copyright
    1.16 + *    notice, this list of conditions and the following disclaimer.
    1.17 + * 2. Redistributions in binary form must reproduce the above copyright
    1.18 + *    notice, this list of conditions and the following disclaimer in the
    1.19 + *    documentation and/or other materials provided with the distribution.
    1.20 + *
    1.21 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
    1.22 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
    1.23 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
    1.24 + * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
    1.25 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
    1.26 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
    1.27 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
    1.28 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
    1.29 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
    1.30 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
    1.31 + * SUCH DAMAGE.
    1.32 + */
    1.33 +
    1.34 +#ifndef _USER_ATOMIC_H_
    1.35 +#define _USER_ATOMIC_H_
    1.36 +
    1.37 +/* __Userspace__ version of sys/i386/include/atomic.h goes here */
    1.38 +
    1.39 +/* TODO In the future, might want to not use i386 specific assembly.
    1.40 + *    The options include:
    1.41 + *       - implement them generically (but maybe not truly atomic?) in userspace
    1.42 + *       - have ifdef's for __Userspace_arch_ perhaps (OS isn't enough...)
    1.43 + */
    1.44 +
    1.45 +#include <stdio.h>
    1.46 +#include <sys/types.h>
    1.47 +
    1.48 +#if defined(__Userspace_os_Darwin) || defined (__Userspace_os_Windows)
    1.49 +#if defined (__Userspace_os_Windows)
    1.50 +#define atomic_add_int(addr, val) InterlockedExchangeAdd((LPLONG)addr, (LONG)val)
    1.51 +#define atomic_fetchadd_int(addr, val) InterlockedExchangeAdd((LPLONG)addr, (LONG)val)
    1.52 +#define atomic_subtract_int(addr, val)   InterlockedExchangeAdd((LPLONG)addr,-((LONG)val))
    1.53 +#define atomic_cmpset_int(dst, exp, src) InterlockedCompareExchange((LPLONG)dst, src, exp)
    1.54 +#define SCTP_DECREMENT_AND_CHECK_REFCOUNT(addr) (InterlockedExchangeAdd((LPLONG)addr, (-1L)) == 1)
    1.55 +#else
    1.56 +#include <libkern/OSAtomic.h>
    1.57 +#define atomic_add_int(addr, val) OSAtomicAdd32Barrier(val, (int32_t *)addr)
    1.58 +#define atomic_fetchadd_int(addr, val) OSAtomicAdd32Barrier(val, (int32_t *)addr)
    1.59 +#define atomic_subtract_int(addr, val) OSAtomicAdd32Barrier(-val, (int32_t *)addr)
    1.60 +#define atomic_cmpset_int(dst, exp, src) OSAtomicCompareAndSwapIntBarrier(exp, src, (int *)dst)
    1.61 +#define SCTP_DECREMENT_AND_CHECK_REFCOUNT(addr) (atomic_fetchadd_int(addr, -1) == 0)
    1.62 +#endif
    1.63 +
    1.64 +#if defined(INVARIANTS)
    1.65 +#define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \
    1.66 +{ \
    1.67 +	int32_t newval; \
    1.68 +	newval = atomic_fetchadd_int(addr, -val); \
    1.69 +	if (newval < 0) { \
    1.70 +		panic("Counter goes negative"); \
    1.71 +	} \
    1.72 +}
    1.73 +#else
    1.74 +#define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \
    1.75 +{ \
    1.76 +	int32_t newval; \
    1.77 +	newval = atomic_fetchadd_int(addr, -val); \
    1.78 +	if (newval < 0) { \
    1.79 +		*addr = 0; \
    1.80 +	} \
    1.81 +}
    1.82 +#if defined(__Userspace_os_Windows)
    1.83 +static void atomic_init() {} /* empty when we are not using atomic_mtx */
    1.84 +#else
    1.85 +static inline void atomic_init() {} /* empty when we are not using atomic_mtx */
    1.86 +#endif
    1.87 +#endif
    1.88 +
    1.89 +#else
    1.90 +/* Using gcc built-in functions for atomic memory operations
    1.91 +   Reference: http://gcc.gnu.org/onlinedocs/gcc-4.1.0/gcc/Atomic-Builtins.html
    1.92 +   Requires gcc version 4.1.0
    1.93 +   compile with -march=i486
    1.94 + */
    1.95 +
    1.96 +/*Atomically add V to *P.*/
    1.97 +#define atomic_add_int(P, V)	 (void) __sync_fetch_and_add(P, V)
    1.98 +
    1.99 +/*Atomically subtrace V from *P.*/
   1.100 +#define atomic_subtract_int(P, V) (void) __sync_fetch_and_sub(P, V)
   1.101 +
   1.102 +/*
   1.103 + * Atomically add the value of v to the integer pointed to by p and return
   1.104 + * the previous value of *p.
   1.105 + */
   1.106 +#define atomic_fetchadd_int(p, v) __sync_fetch_and_add(p, v)
   1.107 +
   1.108 +/* Following explanation from src/sys/i386/include/atomic.h,
   1.109 + * for atomic compare and set
   1.110 + *
   1.111 + * if (*dst == exp) *dst = src (all 32 bit words)
   1.112 + *
   1.113 + * Returns 0 on failure, non-zero on success
   1.114 + */
   1.115 +
   1.116 +#define atomic_cmpset_int(dst, exp, src) __sync_bool_compare_and_swap(dst, exp, src)
   1.117 +
   1.118 +#define SCTP_DECREMENT_AND_CHECK_REFCOUNT(addr) (atomic_fetchadd_int(addr, -1) == 1)
   1.119 +#if defined(INVARIANTS)
   1.120 +#define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \
   1.121 +{ \
   1.122 +	int32_t oldval; \
   1.123 +	oldval = atomic_fetchadd_int(addr, -val); \
   1.124 +	if (oldval < val) { \
   1.125 +		panic("Counter goes negative"); \
   1.126 +	} \
   1.127 +}
   1.128 +#else
   1.129 +#define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \
   1.130 +{ \
   1.131 +	int32_t oldval; \
   1.132 +	oldval = atomic_fetchadd_int(addr, -val); \
   1.133 +	if (oldval < val) { \
   1.134 +		*addr = 0; \
   1.135 +	} \
   1.136 +}
   1.137 +#endif
   1.138 +static inline void atomic_init() {} /* empty when we are not using atomic_mtx */
   1.139 +#endif
   1.140 +
   1.141 +#if 0 /* using libatomic_ops */
   1.142 +#include "user_include/atomic_ops.h"
   1.143 +
   1.144 +/*Atomically add incr to *P, and return the original value of *P.*/
   1.145 +#define atomic_add_int(P, V)	 AO_fetch_and_add((AO_t*)P, V)
   1.146 +
   1.147 +#define atomic_subtract_int(P, V) AO_fetch_and_add((AO_t*)P, -(V))
   1.148 +
   1.149 +/*
   1.150 + * Atomically add the value of v to the integer pointed to by p and return
   1.151 + * the previous value of *p.
   1.152 + */
   1.153 +#define atomic_fetchadd_int(p, v) AO_fetch_and_add((AO_t*)p, v)
   1.154 +
   1.155 +/* Atomically compare *addr to old_val, and replace *addr by new_val
   1.156 +   if the first comparison succeeds.  Returns nonzero if the comparison
   1.157 +   succeeded and *addr was updated.
   1.158 +*/
   1.159 +/* Following Explanation from src/sys/i386/include/atomic.h, which
   1.160 +   matches that of AO_compare_and_swap above.
   1.161 + * Atomic compare and set, used by the mutex functions
   1.162 + *
   1.163 + * if (*dst == exp) *dst = src (all 32 bit words)
   1.164 + *
   1.165 + * Returns 0 on failure, non-zero on success
   1.166 + */
   1.167 +
   1.168 +#define atomic_cmpset_int(dst, exp, src) AO_compare_and_swap((AO_t*)dst, exp, src)
   1.169 +
   1.170 +static inline void atomic_init() {} /* empty when we are not using atomic_mtx */
   1.171 +#endif /* closing #if for libatomic */
   1.172 +
   1.173 +#if 0 /* using atomic_mtx */
   1.174 +
   1.175 +#include <pthread.h>
   1.176 +
   1.177 +extern userland_mutex_t atomic_mtx;
   1.178 +
   1.179 +#if defined (__Userspace_os_Windows)
   1.180 +static inline void atomic_init() {
   1.181 +	InitializeCriticalSection(&atomic_mtx);
   1.182 +}
   1.183 +static inline void atomic_destroy() {
   1.184 +	DeleteCriticalSection(&atomic_mtx);
   1.185 +}
   1.186 +static inline void atomic_lock() {
   1.187 +	EnterCriticalSection(&atomic_mtx);
   1.188 +}
   1.189 +static inline void atomic_unlock() {
   1.190 +	LeaveCriticalSection(&atomic_mtx);
   1.191 +}
   1.192 +#else
   1.193 +static inline void atomic_init() {
   1.194 +	(void)pthread_mutex_init(&atomic_mtx, NULL);
   1.195 +}
   1.196 +static inline void atomic_destroy() {
   1.197 +	(void)pthread_mutex_destroy(&atomic_mtx);
   1.198 +}
   1.199 +static inline void atomic_lock() {
   1.200 +	(void)pthread_mutex_lock(&atomic_mtx);
   1.201 +}
   1.202 +static inline void atomic_unlock() {
   1.203 +	(void)pthread_mutex_unlock(&atomic_mtx);
   1.204 +}
   1.205 +#endif
   1.206 +/*
   1.207 + * For userland, always use lock prefixes so that the binaries will run
   1.208 + * on both SMP and !SMP systems.
   1.209 + */
   1.210 +
   1.211 +#define	MPLOCKED	"lock ; "
   1.212 +
   1.213 +/*
   1.214 + * Atomically add the value of v to the integer pointed to by p and return
   1.215 + * the previous value of *p.
   1.216 + */
   1.217 +static __inline u_int
   1.218 +atomic_fetchadd_int(volatile void *n, u_int v)
   1.219 +{
   1.220 +	int *p = (int *) n;
   1.221 +	atomic_lock();
   1.222 +	__asm __volatile(
   1.223 +	"	" MPLOCKED "		"
   1.224 +	"	xaddl	%0, %1 ;	"
   1.225 +	"# atomic_fetchadd_int"
   1.226 +	: "+r" (v),			/* 0 (result) */
   1.227 +	  "=m" (*p)			/* 1 */
   1.228 +	: "m" (*p));			/* 2 */
   1.229 +	atomic_unlock();
   1.230 +
   1.231 +	return (v);
   1.232 +}
   1.233 +
   1.234 +
   1.235 +#ifdef CPU_DISABLE_CMPXCHG
   1.236 +
   1.237 +static __inline int
   1.238 +atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
   1.239 +{
   1.240 +	u_char res;
   1.241 +
   1.242 +	atomic_lock();
   1.243 +	__asm __volatile(
   1.244 +	"	pushfl ;		"
   1.245 +	"	cli ;			"
   1.246 +	"	cmpl	%3,%4 ;		"
   1.247 +	"	jne	1f ;		"
   1.248 +	"	movl	%2,%1 ;		"
   1.249 +	"1:				"
   1.250 +	"       sete	%0 ;		"
   1.251 +	"	popfl ;			"
   1.252 +	"# atomic_cmpset_int"
   1.253 +	: "=q" (res),			/* 0 */
   1.254 +	  "=m" (*dst)			/* 1 */
   1.255 +	: "r" (src),			/* 2 */
   1.256 +	  "r" (exp),			/* 3 */
   1.257 +	  "m" (*dst)			/* 4 */
   1.258 +	: "memory");
   1.259 +	atomic_unlock();
   1.260 +
   1.261 +	return (res);
   1.262 +}
   1.263 +
   1.264 +#else /* !CPU_DISABLE_CMPXCHG */
   1.265 +
   1.266 +static __inline int
   1.267 +atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
   1.268 +{
   1.269 +	atomic_lock();
   1.270 +	u_char res;
   1.271 +
   1.272 +	__asm __volatile(
   1.273 +	"	" MPLOCKED "		"
   1.274 +	"	cmpxchgl %2,%1 ;	"
   1.275 +	"	sete	%0 ;		"
   1.276 +	"1:				"
   1.277 +	"# atomic_cmpset_int"
   1.278 +	: "=a" (res),			/* 0 */
   1.279 +	  "=m" (*dst)			/* 1 */
   1.280 +	: "r" (src),			/* 2 */
   1.281 +	  "a" (exp),			/* 3 */
   1.282 +	  "m" (*dst)			/* 4 */
   1.283 +	: "memory");
   1.284 +	atomic_unlock();
   1.285 +
   1.286 +	return (res);
   1.287 +}
   1.288 +
   1.289 +#endif /* CPU_DISABLE_CMPXCHG */
   1.290 +
   1.291 +#define atomic_add_int(P, V)	 do {   \
   1.292 +		atomic_lock();          \
   1.293 +		(*(u_int *)(P) += (V)); \
   1.294 +		atomic_unlock();        \
   1.295 +} while(0)
   1.296 +#define atomic_subtract_int(P, V)  do {   \
   1.297 +		atomic_lock();            \
   1.298 +		(*(u_int *)(P) -= (V));   \
   1.299 +		atomic_unlock();          \
   1.300 +} while(0)
   1.301 +
   1.302 +#endif
   1.303 +#endif

mercurial