From: Mathieu Desnoyers Date: Fri, 10 Jun 2011 19:58:34 +0000 (-0400) Subject: Headers: move uatomic_*.h to urcu/uatomic/*.h, rename uatomic_arch.h to uatomic.h X-Git-Tag: v0.6.0~12 X-Git-Url: https://git.lttng.org./?a=commitdiff_plain;h=a2e7bf9ce5de5113c7f59c380b0087e291cd603d;p=userspace-rcu.git Headers: move uatomic_*.h to urcu/uatomic/*.h, rename uatomic_arch.h to uatomic.h Signed-off-by: Mathieu Desnoyers --- diff --git a/Makefile.am b/Makefile.am index e85c924..53268e5 100644 --- a/Makefile.am +++ b/Makefile.am @@ -8,12 +8,12 @@ SUBDIRS = . tests include_HEADERS = urcu.h $(top_srcdir)/urcu-*.h nobase_dist_include_HEADERS = urcu/compiler.h urcu/hlist.h urcu/list.h \ urcu/rculist.h urcu/rcuhlist.h urcu/system.h urcu/urcu-futex.h \ - urcu/uatomic_generic.h urcu/arch/generic.h urcu/wfstack.h \ + urcu/uatomic/generic.h urcu/arch/generic.h urcu/wfstack.h \ urcu/wfqueue.h urcu/rculfstack.h urcu/rculfqueue.h \ urcu/urcu_ref.h urcu/map/*.h urcu/static/*.h -nobase_nodist_include_HEADERS = urcu/arch.h urcu/uatomic_arch.h urcu/config.h +nobase_nodist_include_HEADERS = urcu/arch.h urcu/uatomic.h urcu/config.h -EXTRA_DIST = $(top_srcdir)/urcu/arch/*.h $(top_srcdir)/urcu/uatomic_arch_*.h \ +EXTRA_DIST = $(top_srcdir)/urcu/arch/*.h $(top_srcdir)/urcu/uatomic/*.h \ gpl-2.0.txt lgpl-2.1.txt lgpl-relicensing.txt \ README LICENSE compat_arch_x86.c diff --git a/compat_arch_x86.c b/compat_arch_x86.c index 692417e..9cbd3c8 100644 --- a/compat_arch_x86.c +++ b/compat_arch_x86.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include /* * It does not really matter if the constructor is called before using diff --git a/configure.ac b/configure.ac index 85123a1..a885fd9 100644 --- a/configure.ac +++ b/configure.ac @@ -81,7 +81,7 @@ asm volatile("dmb":::"memory"); ) fi -UATOMICSRC=urcu/uatomic_arch_$ARCHTYPE.h +UATOMICSRC=urcu/uatomic/$ARCHTYPE.h ARCHSRC=urcu/arch/$ARCHTYPE.h if test "x$ARCHTYPE" != xx86 -a "x$ARCHTYPE" != xppc; then APISRC=tests/api_gcc.h @@ -240,7 +240,7 @@ CFLAGS=$saved_CFLAGS AC_CONFIG_LINKS([ urcu/arch.h:$ARCHSRC - urcu/uatomic_arch.h:$UATOMICSRC + urcu/uatomic.h:$UATOMICSRC tests/api.h:$APISRC ]) AC_CONFIG_FILES([ diff --git a/tests/test_uatomic.c b/tests/test_uatomic.c index 2c8c232..692060b 100644 --- a/tests/test_uatomic.c +++ b/tests/test_uatomic.c @@ -1,6 +1,6 @@ #include #include -#include +#include struct testvals { unsigned char c; diff --git a/tests/urcutorture.c b/tests/urcutorture.c index a098d87..5e9b059 100644 --- a/tests/urcutorture.c +++ b/tests/urcutorture.c @@ -24,6 +24,6 @@ #include #endif -#include +#include #include #include "rcutorture.h" diff --git a/urcu-defer-impl.h b/urcu-defer-impl.h index 0aedd53..8dcd114 100644 --- a/urcu-defer-impl.h +++ b/urcu-defer-impl.h @@ -45,7 +45,7 @@ #include #include -#include +#include #include #include diff --git a/urcu-pointer.c b/urcu-pointer.c index 7dfb53a..45dad2b 100644 --- a/urcu-pointer.c +++ b/urcu-pointer.c @@ -24,7 +24,7 @@ * IBM's contributions to this file may be relicensed under LGPLv2 or later. */ -#include +#include #include "urcu/static/urcu-pointer.h" /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */ diff --git a/urcu-pointer.h b/urcu-pointer.h index 359a99f..027a18f 100644 --- a/urcu-pointer.h +++ b/urcu-pointer.h @@ -28,7 +28,7 @@ #include #include -#include +#include #ifdef __cplusplus extern "C" { diff --git a/urcu/static/rculfqueue.h b/urcu/static/rculfqueue.h index 410a4cf..75df985 100644 --- a/urcu/static/rculfqueue.h +++ b/urcu/static/rculfqueue.h @@ -27,7 +27,7 @@ */ #include -#include +#include #include /* A urcu implementation header should be already included. */ diff --git a/urcu/static/rculfstack.h b/urcu/static/rculfstack.h index 7caf3c8..99d3d4c 100644 --- a/urcu/static/rculfstack.h +++ b/urcu/static/rculfstack.h @@ -26,7 +26,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ -#include +#include /* A urcu implementation header should be already included. */ #ifdef __cplusplus diff --git a/urcu/static/urcu-bp.h b/urcu/static/urcu-bp.h index 14c6cfe..cca8d6a 100644 --- a/urcu/static/urcu-bp.h +++ b/urcu/static/urcu-bp.h @@ -37,7 +37,7 @@ #include #include #include -#include +#include #include /* diff --git a/urcu/static/urcu-pointer.h b/urcu/static/urcu-pointer.h index b644486..acd7cee 100644 --- a/urcu/static/urcu-pointer.h +++ b/urcu/static/urcu-pointer.h @@ -32,7 +32,7 @@ #include #include #include -#include +#include #ifdef __cplusplus extern "C" { diff --git a/urcu/static/urcu-qsbr.h b/urcu/static/urcu-qsbr.h index e0b12be..f189e31 100644 --- a/urcu/static/urcu-qsbr.h +++ b/urcu/static/urcu-qsbr.h @@ -39,7 +39,7 @@ #include #include #include -#include +#include #include #include diff --git a/urcu/static/urcu.h b/urcu/static/urcu.h index 18e4826..2898272 100644 --- a/urcu/static/urcu.h +++ b/urcu/static/urcu.h @@ -37,7 +37,7 @@ #include #include #include -#include +#include #include #include diff --git a/urcu/static/wfqueue.h b/urcu/static/wfqueue.h index 790931b..77828ca 100644 --- a/urcu/static/wfqueue.h +++ b/urcu/static/wfqueue.h @@ -30,7 +30,7 @@ #include #include #include -#include +#include #ifdef __cplusplus extern "C" { diff --git a/urcu/static/wfstack.h b/urcu/static/wfstack.h index ff18c4a..454240f 100644 --- a/urcu/static/wfstack.h +++ b/urcu/static/wfstack.h @@ -30,7 +30,7 @@ #include #include #include -#include +#include #ifdef __cplusplus extern "C" { diff --git a/urcu/uatomic/alpha.h b/urcu/uatomic/alpha.h new file mode 100644 index 0000000..5dceb90 --- /dev/null +++ b/urcu/uatomic/alpha.h @@ -0,0 +1,32 @@ +#ifndef _URCU_UATOMIC_ARCH_ALPHA_H +#define _URCU_UATOMIC_ARCH_ALPHA_H + +/* + * Atomic exchange operations for the Alpha architecture. Let GCC do it. + * + * Copyright (c) 2010 Paolo Bonzini + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include +#include +#include + +#endif /* _URCU_UATOMIC_ARCH_ALPHA_H */ diff --git a/urcu/uatomic/arm.h b/urcu/uatomic/arm.h new file mode 100644 index 0000000..e0016b8 --- /dev/null +++ b/urcu/uatomic/arm.h @@ -0,0 +1,43 @@ +#ifndef _URCU_ARCH_UATOMIC_ARM_H +#define _URCU_ARCH_UATOMIC_ARM_H + +/* + * Atomics for ARM. This approach is usable on kernels back to 2.6.15. + * + * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. + * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. + * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P. + * Copyright (c) 2009 Mathieu Desnoyers + * Copyright (c) 2010 Paul E. McKenney, IBM Corporation + * (Adapted from uatomic_arch_ppc.h) + * + * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED + * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. + * + * Permission is hereby granted to use or copy this program + * for any purpose, provided the above notices are retained on all copies. + * Permission to modify the code and to distribute modified code is granted, + * provided the above notices are retained, and a notice that the code was + * modified is included with the above copyright notice. + * + * Code inspired from libuatomic_ops-1.2, inherited in part from the + * Boehm-Demers-Weiser conservative garbage collector. + */ + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* xchg */ +#define uatomic_xchg(addr, v) __sync_lock_test_and_set(addr, v) + +#ifdef __cplusplus +} +#endif + +#include + +#endif /* _URCU_ARCH_UATOMIC_ARM_H */ diff --git a/urcu/uatomic/gcc.h b/urcu/uatomic/gcc.h new file mode 100644 index 0000000..47ca195 --- /dev/null +++ b/urcu/uatomic/gcc.h @@ -0,0 +1,46 @@ +#ifndef _URCU_ARCH_UATOMIC_GCC_H +#define _URCU_ARCH_UATOMIC_GCC_H + +/* + * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. + * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. + * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P. + * Copyright (c) 2009 Mathieu Desnoyers + * Copyright (c) 2010 Paul E. McKenney, IBM Corporation + * (Adapted from uatomic_arch_ppc.h) + * + * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED + * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. + * + * Permission is hereby granted to use or copy this program + * for any purpose, provided the above notices are retained on all copies. + * Permission to modify the code and to distribute modified code is granted, + * provided the above notices are retained, and a notice that the code was + * modified is included with the above copyright notice. + * + * Code inspired from libuatomic_ops-1.2, inherited in part from the + * Boehm-Demers-Weiser conservative garbage collector. + */ + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * If your platform doesn't have a full set of atomics, you will need + * a separate uatomic_arch_*.h file for your architecture. Otherwise, + * just rely on the definitions in uatomic/generic.h. + */ +#define UATOMIC_HAS_ATOMIC_BYTE +#define UATOMIC_HAS_ATOMIC_SHORT + +#ifdef __cplusplus +} +#endif + +#include + +#endif /* _URCU_ARCH_UATOMIC_GCC_H */ diff --git a/urcu/uatomic/generic.h b/urcu/uatomic/generic.h new file mode 100644 index 0000000..337fe40 --- /dev/null +++ b/urcu/uatomic/generic.h @@ -0,0 +1,557 @@ +#ifndef _URCU_UATOMIC_GENERIC_H +#define _URCU_UATOMIC_GENERIC_H + +/* + * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. + * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. + * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P. + * Copyright (c) 2009 Mathieu Desnoyers + * Copyright (c) 2010 Paolo Bonzini + * + * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED + * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. + * + * Permission is hereby granted to use or copy this program + * for any purpose, provided the above notices are retained on all copies. + * Permission to modify the code and to distribute modified code is granted, + * provided the above notices are retained, and a notice that the code was + * modified is included with the above copyright notice. + * + * Code inspired from libuatomic_ops-1.2, inherited in part from the + * Boehm-Demers-Weiser conservative garbage collector. + */ + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef uatomic_set +#define uatomic_set(addr, v) CMM_STORE_SHARED(*(addr), (v)) +#endif + +#ifndef uatomic_read +#define uatomic_read(addr) CMM_LOAD_SHARED(*(addr)) +#endif + +#if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR +static inline __attribute__((always_inline)) +void _uatomic_link_error() +{ +#ifdef ILLEGAL_INSTR + /* generate an illegal instruction. Cannot catch this with linker tricks + * when optimizations are disabled. */ + __asm__ __volatile__(ILLEGAL_INSTR); +#else + __builtin_trap (); +#endif +} + +#else /* #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR */ +extern void _uatomic_link_error (); +#endif /* #else #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR */ + +/* cmpxchg */ + +#ifndef uatomic_cmpxchg +static inline __attribute__((always_inline)) +unsigned long _uatomic_cmpxchg(void *addr, unsigned long old, + unsigned long _new, int len) +{ + switch (len) { +#ifdef UATOMIC_HAS_ATOMIC_BYTE + case 1: + return __sync_val_compare_and_swap_1(addr, old, _new); +#endif +#ifdef UATOMIC_HAS_ATOMIC_SHORT + case 2: + return __sync_val_compare_and_swap_2(addr, old, _new); +#endif + case 4: + return __sync_val_compare_and_swap_4(addr, old, _new); +#if (CAA_BITS_PER_LONG == 64) + case 8: + return __sync_val_compare_and_swap_8(addr, old, _new); +#endif + } + _uatomic_link_error(); + return 0; +} + + +#define uatomic_cmpxchg(addr, old, _new) \ + ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\ + (unsigned long)(_new), \ + sizeof(*(addr)))) + + +/* uatomic_and */ + +#ifndef uatomic_and +static inline __attribute__((always_inline)) +void _uatomic_and(void *addr, unsigned long val, + int len) +{ + switch (len) { +#ifdef UATOMIC_HAS_ATOMIC_BYTE + case 1: + __sync_and_and_fetch_1(addr, val); +#endif +#ifdef UATOMIC_HAS_ATOMIC_SHORT + case 2: + __sync_and_and_fetch_2(addr, val); +#endif + case 4: + __sync_and_and_fetch_4(addr, val); +#if (CAA_BITS_PER_LONG == 64) + case 8: + __sync_and_and_fetch_8(addr, val); +#endif + } + _uatomic_link_error(); + return 0; +} + +#define uatomic_and(addr, v) \ + (_uatomic_and((addr), \ + (unsigned long)(v), \ + sizeof(*(addr)))) +#endif + +/* uatomic_or */ + +#ifndef uatomic_or +static inline __attribute__((always_inline)) +void _uatomic_or(void *addr, unsigned long val, + int len) +{ + switch (len) { +#ifdef UATOMIC_HAS_ATOMIC_BYTE + case 1: + __sync_or_and_fetch_1(addr, val); +#endif +#ifdef UATOMIC_HAS_ATOMIC_SHORT + case 2: + __sync_or_and_fetch_2(addr, val); +#endif + case 4: + __sync_or_and_fetch_4(addr, val); +#if (CAA_BITS_PER_LONG == 64) + case 8: + __sync_or_and_fetch_8(addr, val); +#endif + } + _uatomic_link_error(); + return 0; +} + +#define uatomic_or(addr, v) \ + (_uatomic_or((addr), \ + (unsigned long)(v), \ + sizeof(*(addr)))) +#endif + +/* uatomic_add_return */ + +#ifndef uatomic_add_return +static inline __attribute__((always_inline)) +unsigned long _uatomic_add_return(void *addr, unsigned long val, + int len) +{ + switch (len) { +#ifdef UATOMIC_HAS_ATOMIC_BYTE + case 1: + return __sync_add_and_fetch_1(addr, val); +#endif +#ifdef UATOMIC_HAS_ATOMIC_SHORT + case 2: + return __sync_add_and_fetch_2(addr, val); +#endif + case 4: + return __sync_add_and_fetch_4(addr, val); +#if (CAA_BITS_PER_LONG == 64) + case 8: + return __sync_add_and_fetch_8(addr, val); +#endif + } + _uatomic_link_error(); + return 0; +} + + +#define uatomic_add_return(addr, v) \ + ((__typeof__(*(addr))) _uatomic_add_return((addr), \ + (unsigned long)(v), \ + sizeof(*(addr)))) +#endif /* #ifndef uatomic_add_return */ + +#ifndef uatomic_xchg +/* xchg */ + +static inline __attribute__((always_inline)) +unsigned long _uatomic_exchange(void *addr, unsigned long val, int len) +{ + switch (len) { +#ifdef UATOMIC_HAS_ATOMIC_BYTE + case 1: + { + unsigned char old; + + do { + old = uatomic_read((unsigned char *)addr); + } while (!__sync_bool_compare_and_swap_1(addr, old, val)); + + return old; + } +#endif +#ifdef UATOMIC_HAS_ATOMIC_SHORT + case 2: + { + unsigned short old; + + do { + old = uatomic_read((unsigned short *)addr); + } while (!__sync_bool_compare_and_swap_2(addr, old, val)); + + return old; + } +#endif + case 4: + { + unsigned int old; + + do { + old = uatomic_read((unsigned int *)addr); + } while (!__sync_bool_compare_and_swap_4(addr, old, val)); + + return old; + } +#if (CAA_BITS_PER_LONG == 64) + case 8: + { + unsigned long old; + + do { + old = uatomic_read((unsigned long *)addr); + } while (!__sync_bool_compare_and_swap_8(addr, old, val)); + + return old; + } +#endif + } + _uatomic_link_error(); + return 0; +} + +#define uatomic_xchg(addr, v) \ + ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \ + sizeof(*(addr)))) +#endif /* #ifndef uatomic_xchg */ + +#else /* #ifndef uatomic_cmpxchg */ + +#ifndef uatomic_and +/* uatomic_and */ + +static inline __attribute__((always_inline)) +void _uatomic_and(void *addr, unsigned long val, int len) +{ + switch (len) { +#ifdef UATOMIC_HAS_ATOMIC_BYTE + case 1: + { + unsigned char old, oldt; + + oldt = uatomic_read((unsigned char *)addr); + do { + old = oldt; + oldt = _uatomic_cmpxchg(addr, old, old & val, 1); + } while (oldt != old); + } +#endif +#ifdef UATOMIC_HAS_ATOMIC_SHORT + case 2: + { + unsigned short old, oldt; + + oldt = uatomic_read((unsigned short *)addr); + do { + old = oldt; + oldt = _uatomic_cmpxchg(addr, old, old & val, 2); + } while (oldt != old); + } +#endif + case 4: + { + unsigned int old, oldt; + + oldt = uatomic_read((unsigned int *)addr); + do { + old = oldt; + oldt = _uatomic_cmpxchg(addr, old, old & val, 4); + } while (oldt != old); + } +#if (CAA_BITS_PER_LONG == 64) + case 8: + { + unsigned long old, oldt; + + oldt = uatomic_read((unsigned long *)addr); + do { + old = oldt; + oldt = _uatomic_cmpxchg(addr, old, old & val, 8); + } while (oldt != old); + } +#endif + } + _uatomic_link_error(); + return 0; +} + +#define uatomic_and(addr, v) \ + (uatomic_and((addr), \ + (unsigned long)(v), \ + sizeof(*(addr)))) +#endif /* #ifndef uatomic_and */ + +#ifndef uatomic_or +/* uatomic_or */ + +static inline __attribute__((always_inline)) +void _uatomic_or(void *addr, unsigned long val, int len) +{ + switch (len) { +#ifdef UATOMIC_HAS_ATOMIC_BYTE + case 1: + { + unsigned char old, oldt; + + oldt = uatomic_read((unsigned char *)addr); + do { + old = oldt; + oldt = _uatomic_cmpxchg(addr, old, old | val, 1); + } while (oldt != old); + } +#endif +#ifdef UATOMIC_HAS_ATOMIC_SHORT + case 2: + { + unsigned short old, oldt; + + oldt = uatomic_read((unsigned short *)addr); + do { + old = oldt; + oldt = _uatomic_cmpxchg(addr, old, old | val, 2); + } while (oldt != old); + } +#endif + case 4: + { + unsigned int old, oldt; + + oldt = uatomic_read((unsigned int *)addr); + do { + old = oldt; + oldt = _uatomic_cmpxchg(addr, old, old | val, 4); + } while (oldt != old); + } +#if (CAA_BITS_PER_LONG == 64) + case 8: + { + unsigned long old, oldt; + + oldt = uatomic_read((unsigned long *)addr); + do { + old = oldt; + oldt = _uatomic_cmpxchg(addr, old, old | val, 8); + } while (oldt != old); + } +#endif + } + _uatomic_link_error(); + return 0; +} + +#define uatomic_or(addr, v) \ + (uatomic_or((addr), \ + (unsigned long)(v), \ + sizeof(*(addr)))) +#endif /* #ifndef uatomic_or */ + +#ifndef uatomic_add_return +/* uatomic_add_return */ + +static inline __attribute__((always_inline)) +unsigned long _uatomic_add_return(void *addr, unsigned long val, int len) +{ + switch (len) { +#ifdef UATOMIC_HAS_ATOMIC_BYTE + case 1: + { + unsigned char old, oldt; + + oldt = uatomic_read((unsigned char *)addr); + do { + old = oldt; + oldt = uatomic_cmpxchg((unsigned char *)addr, + old, old + val); + } while (oldt != old); + + return old + val; + } +#endif +#ifdef UATOMIC_HAS_ATOMIC_SHORT + case 2: + { + unsigned short old, oldt; + + oldt = uatomic_read((unsigned short *)addr); + do { + old = oldt; + oldt = uatomic_cmpxchg((unsigned short *)addr, + old, old + val); + } while (oldt != old); + + return old + val; + } +#endif + case 4: + { + unsigned int old, oldt; + + oldt = uatomic_read((unsigned int *)addr); + do { + old = oldt; + oldt = uatomic_cmpxchg((unsigned int *)addr, + old, old + val); + } while (oldt != old); + + return old + val; + } +#if (CAA_BITS_PER_LONG == 64) + case 8: + { + unsigned long old, oldt; + + oldt = uatomic_read((unsigned long *)addr); + do { + old = oldt; + oldt = uatomic_cmpxchg((unsigned long *)addr, + old, old + val); + } while (oldt != old); + + return old + val; + } +#endif + } + _uatomic_link_error(); + return 0; +} + +#define uatomic_add_return(addr, v) \ + ((__typeof__(*(addr))) _uatomic_add_return((addr), \ + (unsigned long)(v), \ + sizeof(*(addr)))) +#endif /* #ifndef uatomic_add_return */ + +#ifndef uatomic_xchg +/* xchg */ + +static inline __attribute__((always_inline)) +unsigned long _uatomic_exchange(void *addr, unsigned long val, int len) +{ + switch (len) { +#ifdef UATOMIC_HAS_ATOMIC_BYTE + case 1: + { + unsigned char old, oldt; + + oldt = uatomic_read((unsigned char *)addr); + do { + old = oldt; + oldt = uatomic_cmpxchg((unsigned char *)addr, + old, val); + } while (oldt != old); + + return old; + } +#endif +#ifdef UATOMIC_HAS_ATOMIC_SHORT + case 2: + { + unsigned short old, oldt; + + oldt = uatomic_read((unsigned short *)addr); + do { + old = oldt; + oldt = uatomic_cmpxchg((unsigned short *)addr, + old, val); + } while (oldt != old); + + return old; + } +#endif + case 4: + { + unsigned int old, oldt; + + oldt = uatomic_read((unsigned int *)addr); + do { + old = oldt; + oldt = uatomic_cmpxchg((unsigned int *)addr, + old, val); + } while (oldt != old); + + return old; + } +#if (CAA_BITS_PER_LONG == 64) + case 8: + { + unsigned long old, oldt; + + oldt = uatomic_read((unsigned long *)addr); + do { + old = oldt; + oldt = uatomic_cmpxchg((unsigned long *)addr, + old, val); + } while (oldt != old); + + return old; + } +#endif + } + _uatomic_link_error(); + return 0; +} + +#define uatomic_xchg(addr, v) \ + ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \ + sizeof(*(addr)))) +#endif /* #ifndef uatomic_xchg */ + +#endif /* #else #ifndef uatomic_cmpxchg */ + +/* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */ + +#ifndef uatomic_add +#define uatomic_add(addr, v) (void)uatomic_add_return((addr), (v)) +#endif + +#define uatomic_sub_return(addr, v) uatomic_add_return((addr), -(v)) +#define uatomic_sub(addr, v) uatomic_add((addr), -(v)) + +#ifndef uatomic_inc +#define uatomic_inc(addr) uatomic_add((addr), 1) +#endif + +#ifndef uatomic_dec +#define uatomic_dec(addr) uatomic_add((addr), -1) +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* _URCU_UATOMIC_GENERIC_H */ diff --git a/urcu/uatomic/ppc.h b/urcu/uatomic/ppc.h new file mode 100644 index 0000000..3eb3d63 --- /dev/null +++ b/urcu/uatomic/ppc.h @@ -0,0 +1,219 @@ +#ifndef _URCU_ARCH_UATOMIC_PPC_H +#define _URCU_ARCH_UATOMIC_PPC_H + +/* + * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. + * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. + * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P. + * Copyright (c) 2009 Mathieu Desnoyers + * + * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED + * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. + * + * Permission is hereby granted to use or copy this program + * for any purpose, provided the above notices are retained on all copies. + * Permission to modify the code and to distribute modified code is granted, + * provided the above notices are retained, and a notice that the code was + * modified is included with the above copyright notice. + * + * Code inspired from libuatomic_ops-1.2, inherited in part from the + * Boehm-Demers-Weiser conservative garbage collector. + */ + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef __NO_LWSYNC__ +#define LWSYNC_OPCODE "sync\n" +#else +#define LWSYNC_OPCODE "lwsync\n" +#endif + +#define ILLEGAL_INSTR ".long 0xd00d00" + +/* + * Using a isync as second barrier for exchange to provide acquire semantic. + * According to uatomic_ops/sysdeps/gcc/powerpc.h, the documentation is "fairly + * explicit that this also has acquire semantics." + * Derived from AO_compare_and_swap(), but removed the comparison. + */ + +/* xchg */ + +static inline __attribute__((always_inline)) +unsigned long _uatomic_exchange(void *addr, unsigned long val, int len) +{ + switch (len) { + case 4: + { + unsigned int result; + + __asm__ __volatile__( + LWSYNC_OPCODE + "1:\t" "lwarx %0,0,%1\n" /* load and reserve */ + "stwcx. %2,0,%1\n" /* else store conditional */ + "bne- 1b\n" /* retry if lost reservation */ + "isync\n" + : "=&r"(result) + : "r"(addr), "r"(val) + : "memory", "cc"); + + return result; + } +#if (CAA_BITS_PER_LONG == 64) + case 8: + { + unsigned long result; + + __asm__ __volatile__( + LWSYNC_OPCODE + "1:\t" "ldarx %0,0,%1\n" /* load and reserve */ + "stdcx. %2,0,%1\n" /* else store conditional */ + "bne- 1b\n" /* retry if lost reservation */ + "isync\n" + : "=&r"(result) + : "r"(addr), "r"(val) + : "memory", "cc"); + + return result; + } +#endif + } + /* generate an illegal instruction. Cannot catch this with linker tricks + * when optimizations are disabled. */ + __asm__ __volatile__(ILLEGAL_INSTR); + return 0; +} + +#define uatomic_xchg(addr, v) \ + ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \ + sizeof(*(addr)))) +/* cmpxchg */ + +static inline __attribute__((always_inline)) +unsigned long _uatomic_cmpxchg(void *addr, unsigned long old, + unsigned long _new, int len) +{ + switch (len) { + case 4: + { + unsigned int old_val; + + __asm__ __volatile__( + LWSYNC_OPCODE + "1:\t" "lwarx %0,0,%1\n" /* load and reserve */ + "cmpw %0,%3\n" /* if load is not equal to */ + "bne 2f\n" /* old, fail */ + "stwcx. %2,0,%1\n" /* else store conditional */ + "bne- 1b\n" /* retry if lost reservation */ + "isync\n" + "2:\n" + : "=&r"(old_val) + : "r"(addr), "r"((unsigned int)_new), + "r"((unsigned int)old) + : "memory", "cc"); + + return old_val; + } +#if (CAA_BITS_PER_LONG == 64) + case 8: + { + unsigned long old_val; + + __asm__ __volatile__( + LWSYNC_OPCODE + "1:\t" "ldarx %0,0,%1\n" /* load and reserve */ + "cmpd %0,%3\n" /* if load is not equal to */ + "bne 2f\n" /* old, fail */ + "stdcx. %2,0,%1\n" /* else store conditional */ + "bne- 1b\n" /* retry if lost reservation */ + "isync\n" + "2:\n" + : "=&r"(old_val) + : "r"(addr), "r"((unsigned long)_new), + "r"((unsigned long)old) + : "memory", "cc"); + + return old_val; + } +#endif + } + /* generate an illegal instruction. Cannot catch this with linker tricks + * when optimizations are disabled. */ + __asm__ __volatile__(ILLEGAL_INSTR); + return 0; +} + + +#define uatomic_cmpxchg(addr, old, _new) \ + ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\ + (unsigned long)(_new), \ + sizeof(*(addr)))) + +/* uatomic_add_return */ + +static inline __attribute__((always_inline)) +unsigned long _uatomic_add_return(void *addr, unsigned long val, + int len) +{ + switch (len) { + case 4: + { + unsigned int result; + + __asm__ __volatile__( + LWSYNC_OPCODE + "1:\t" "lwarx %0,0,%1\n" /* load and reserve */ + "add %0,%2,%0\n" /* add val to value loaded */ + "stwcx. %0,0,%1\n" /* store conditional */ + "bne- 1b\n" /* retry if lost reservation */ + "isync\n" + : "=&r"(result) + : "r"(addr), "r"(val) + : "memory", "cc"); + + return result; + } +#if (CAA_BITS_PER_LONG == 64) + case 8: + { + unsigned long result; + + __asm__ __volatile__( + LWSYNC_OPCODE + "1:\t" "ldarx %0,0,%1\n" /* load and reserve */ + "add %0,%2,%0\n" /* add val to value loaded */ + "stdcx. %0,0,%1\n" /* store conditional */ + "bne- 1b\n" /* retry if lost reservation */ + "isync\n" + : "=&r"(result) + : "r"(addr), "r"(val) + : "memory", "cc"); + + return result; + } +#endif + } + /* generate an illegal instruction. Cannot catch this with linker tricks + * when optimizations are disabled. */ + __asm__ __volatile__(ILLEGAL_INSTR); + return 0; +} + + +#define uatomic_add_return(addr, v) \ + ((__typeof__(*(addr))) _uatomic_add_return((addr), \ + (unsigned long)(v), \ + sizeof(*(addr)))) + +#ifdef __cplusplus +} +#endif + +#include + +#endif /* _URCU_ARCH_UATOMIC_PPC_H */ diff --git a/urcu/uatomic/s390.h b/urcu/uatomic/s390.h new file mode 100644 index 0000000..b274c1c --- /dev/null +++ b/urcu/uatomic/s390.h @@ -0,0 +1,160 @@ +#ifndef _URCU_UATOMIC_ARCH_S390_H +#define _URCU_UATOMIC_ARCH_S390_H + +/* + * Atomic exchange operations for the S390 architecture. Based on information + * taken from the Principles of Operation Appendix A "Conditional Swapping + * Instructions (CS, CDS)". + * + * Copyright (c) 2009 Novell, Inc. + * Author: Jan Blunck + * Copyright (c) 2009 Mathieu Desnoyers + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) +#define COMPILER_HAVE_SHORT_MEM_OPERAND +#endif + +/* + * MEMOP assembler operand rules: + * - op refer to MEMOP_IN operand + * - MEMOP_IN can expand to more than a single operand. Use it at the end of + * operand list only. + */ + +#ifdef COMPILER_HAVE_SHORT_MEM_OPERAND + +#define MEMOP_OUT(addr) "=Q" (*(addr)) +#define MEMOP_IN(addr) "Q" (*(addr)) +#define MEMOP_REF(op) #op /* op refer to MEMOP_IN operand */ + +#else /* !COMPILER_HAVE_SHORT_MEM_OPERAND */ + +#define MEMOP_OUT(addr) "=m" (*(addr)) +#define MEMOP_IN(addr) "a" (addr), "m" (*(addr)) +#define MEMOP_REF(op) "0(" #op ")" /* op refer to MEMOP_IN operand */ + +#endif /* !COMPILER_HAVE_SHORT_MEM_OPERAND */ + +struct __uatomic_dummy { + unsigned long v[10]; +}; +#define __hp(x) ((struct __uatomic_dummy *)(x)) + +/* xchg */ + +static inline __attribute__((always_inline)) +unsigned long _uatomic_exchange(volatile void *addr, unsigned long val, int len) +{ + switch (len) { + case 4: + { + unsigned int old_val; + + __asm__ __volatile__( + "0: cs %0,%2," MEMOP_REF(%3) "\n" + " brc 4,0b\n" + : "=&r" (old_val), MEMOP_OUT (__hp(addr)) + : "r" (val), MEMOP_IN (__hp(addr)) + : "memory", "cc"); + return old_val; + } +#if (CAA_BITS_PER_LONG == 64) + case 8: + { + unsigned long old_val; + + __asm__ __volatile__( + "0: csg %0,%2," MEMOP_REF(%3) "\n" + " brc 4,0b\n" + : "=&r" (old_val), MEMOP_OUT (__hp(addr)) + : "r" (val), MEMOP_IN (__hp(addr)) + : "memory", "cc"); + return old_val; + } +#endif + default: + __asm__ __volatile__(".long 0xd00d00"); + } + + return 0; +} + +#define uatomic_xchg(addr, v) \ + (__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \ + sizeof(*(addr))) + +/* cmpxchg */ + +static inline __attribute__((always_inline)) +unsigned long _uatomic_cmpxchg(void *addr, unsigned long old, + unsigned long _new, int len) +{ + switch (len) { + case 4: + { + unsigned int old_val = (unsigned int)old; + + __asm__ __volatile__( + " cs %0,%2," MEMOP_REF(%3) "\n" + : "+r" (old_val), MEMOP_OUT (__hp(addr)) + : "r" (_new), MEMOP_IN (__hp(addr)) + : "memory", "cc"); + return old_val; + } +#if (CAA_BITS_PER_LONG == 64) + case 8: + { + __asm__ __volatile__( + " csg %0,%2," MEMOP_REF(%3) "\n" + : "+r" (old), MEMOP_OUT (__hp(addr)) + : "r" (_new), MEMOP_IN (__hp(addr)) + : "memory", "cc"); + return old; + } +#endif + default: + __asm__ __volatile__(".long 0xd00d00"); + } + + return 0; +} + +#define uatomic_cmpxchg(addr, old, _new) \ + (__typeof__(*(addr))) _uatomic_cmpxchg((addr), \ + (unsigned long)(old), \ + (unsigned long)(_new), \ + sizeof(*(addr))) + +#ifdef __cplusplus +} +#endif + +#include + +#endif /* _URCU_UATOMIC_ARCH_S390_H */ diff --git a/urcu/uatomic/sparc64.h b/urcu/uatomic/sparc64.h new file mode 100644 index 0000000..d9ecada --- /dev/null +++ b/urcu/uatomic/sparc64.h @@ -0,0 +1,80 @@ +#ifndef _URCU_ARCH_UATOMIC_SPARC64_H +#define _URCU_ARCH_UATOMIC_SPARC64_H + +/* + * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. + * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. + * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved. + * Copyright (c) 2009 Mathieu Desnoyers + * + * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED + * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. + * + * Permission is hereby granted to use or copy this program + * for any purpose, provided the above notices are retained on all copies. + * Permission to modify the code and to distribute modified code is granted, + * provided the above notices are retained, and a notice that the code was + * modified is included with the above copyright notice. + * + * Code inspired from libuatomic_ops-1.2, inherited in part from the + * Boehm-Demers-Weiser conservative garbage collector. + */ + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* cmpxchg */ + +static inline __attribute__((always_inline)) +unsigned long _uatomic_cmpxchg(void *addr, unsigned long old, + unsigned long _new, int len) +{ + switch (len) { + case 4: + { + __asm__ __volatile__ ( + "membar #StoreLoad | #LoadLoad\n\t" + "cas [%1],%2,%0\n\t" + "membar #StoreLoad | #StoreStore\n\t" + : "+&r" (_new) + : "r" (addr), "r" (old) + : "memory"); + + return _new; + } +#if (CAA_BITS_PER_LONG == 64) + case 8: + { + __asm__ __volatile__ ( + "membar #StoreLoad | #LoadLoad\n\t" + "casx [%1],%2,%0\n\t" + "membar #StoreLoad | #StoreStore\n\t" + : "+&r" (_new) + : "r" (addr), "r" (old) + : "memory"); + + return _new; + } +#endif + } + __builtin_trap(); + return 0; +} + + +#define uatomic_cmpxchg(addr, old, _new) \ + ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\ + (unsigned long)(_new), \ + sizeof(*(addr)))) + +#ifdef __cplusplus +} +#endif + +#include + +#endif /* _URCU_ARCH_UATOMIC_PPC_H */ diff --git a/urcu/uatomic/unknown.h b/urcu/uatomic/unknown.h new file mode 100644 index 0000000..6fb4eb3 --- /dev/null +++ b/urcu/uatomic/unknown.h @@ -0,0 +1,25 @@ +#ifndef _URCU_ARCH_UATOMIC_UNKNOWN_H +#define _URCU_ARCH_UATOMIC_UNKNOWN_H + +/* + * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. + * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. + * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P. + * Copyright (c) 2009 Mathieu Desnoyers + * Copyright (c) 2010 Paul E. McKenney, IBM Corporation + * (Adapted from uatomic_arch_ppc.h) + * + * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED + * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. + * + * Permission is hereby granted to use or copy this program + * for any purpose, provided the above notices are retained on all copies. + * Permission to modify the code and to distribute modified code is granted, + * provided the above notices are retained, and a notice that the code was + * modified is included with the above copyright notice. + */ + +/* See configure.ac for the list of recognized architectures. */ +#error "Cannot build: unrecognized architecture detected." + +#endif /* _URCU_ARCH_UATOMIC_UNKNOWN_H */ diff --git a/urcu/uatomic/x86.h b/urcu/uatomic/x86.h new file mode 100644 index 0000000..b4c108f --- /dev/null +++ b/urcu/uatomic/x86.h @@ -0,0 +1,596 @@ +#ifndef _URCU_ARCH_UATOMIC_X86_H +#define _URCU_ARCH_UATOMIC_X86_H + +/* + * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. + * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. + * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P. + * Copyright (c) 2009 Mathieu Desnoyers + * + * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED + * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. + * + * Permission is hereby granted to use or copy this program + * for any purpose, provided the above notices are retained on all copies. + * Permission to modify the code and to distribute modified code is granted, + * provided the above notices are retained, and a notice that the code was + * modified is included with the above copyright notice. + * + * Code inspired from libuatomic_ops-1.2, inherited in part from the + * Boehm-Demers-Weiser conservative garbage collector. + */ + +#include +#include + +#define UATOMIC_HAS_ATOMIC_BYTE +#define UATOMIC_HAS_ATOMIC_SHORT + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Derived from AO_compare_and_swap() and AO_test_and_set_full(). + */ + +struct __uatomic_dummy { + unsigned long v[10]; +}; +#define __hp(x) ((struct __uatomic_dummy *)(x)) + +#define _uatomic_set(addr, v) CMM_STORE_SHARED(*(addr), (v)) + +/* cmpxchg */ + +static inline __attribute__((always_inline)) +unsigned long __uatomic_cmpxchg(void *addr, unsigned long old, + unsigned long _new, int len) +{ + switch (len) { + case 1: + { + unsigned char result = old; + + __asm__ __volatile__( + "lock; cmpxchgb %2, %1" + : "+a"(result), "+m"(*__hp(addr)) + : "q"((unsigned char)_new) + : "memory"); + return result; + } + case 2: + { + unsigned short result = old; + + __asm__ __volatile__( + "lock; cmpxchgw %2, %1" + : "+a"(result), "+m"(*__hp(addr)) + : "r"((unsigned short)_new) + : "memory"); + return result; + } + case 4: + { + unsigned int result = old; + + __asm__ __volatile__( + "lock; cmpxchgl %2, %1" + : "+a"(result), "+m"(*__hp(addr)) + : "r"((unsigned int)_new) + : "memory"); + return result; + } +#if (CAA_BITS_PER_LONG == 64) + case 8: + { + unsigned long result = old; + + __asm__ __volatile__( + "lock; cmpxchgq %2, %1" + : "+a"(result), "+m"(*__hp(addr)) + : "r"((unsigned long)_new) + : "memory"); + return result; + } +#endif + } + /* generate an illegal instruction. Cannot catch this with linker tricks + * when optimizations are disabled. */ + __asm__ __volatile__("ud2"); + return 0; +} + +#define _uatomic_cmpxchg(addr, old, _new) \ + ((__typeof__(*(addr))) __uatomic_cmpxchg((addr), (unsigned long)(old),\ + (unsigned long)(_new), \ + sizeof(*(addr)))) + +/* xchg */ + +static inline __attribute__((always_inline)) +unsigned long __uatomic_exchange(void *addr, unsigned long val, int len) +{ + /* Note: the "xchg" instruction does not need a "lock" prefix. */ + switch (len) { + case 1: + { + unsigned char result; + __asm__ __volatile__( + "xchgb %0, %1" + : "=q"(result), "+m"(*__hp(addr)) + : "0" ((unsigned char)val) + : "memory"); + return result; + } + case 2: + { + unsigned short result; + __asm__ __volatile__( + "xchgw %0, %1" + : "=r"(result), "+m"(*__hp(addr)) + : "0" ((unsigned short)val) + : "memory"); + return result; + } + case 4: + { + unsigned int result; + __asm__ __volatile__( + "xchgl %0, %1" + : "=r"(result), "+m"(*__hp(addr)) + : "0" ((unsigned int)val) + : "memory"); + return result; + } +#if (CAA_BITS_PER_LONG == 64) + case 8: + { + unsigned long result; + __asm__ __volatile__( + "xchgq %0, %1" + : "=r"(result), "+m"(*__hp(addr)) + : "0" ((unsigned long)val) + : "memory"); + return result; + } +#endif + } + /* generate an illegal instruction. Cannot catch this with linker tricks + * when optimizations are disabled. */ + __asm__ __volatile__("ud2"); + return 0; +} + +#define _uatomic_xchg(addr, v) \ + ((__typeof__(*(addr))) __uatomic_exchange((addr), (unsigned long)(v), \ + sizeof(*(addr)))) + +/* uatomic_add_return */ + +static inline __attribute__((always_inline)) +unsigned long __uatomic_add_return(void *addr, unsigned long val, + int len) +{ + switch (len) { + case 1: + { + unsigned char result = val; + + __asm__ __volatile__( + "lock; xaddb %1, %0" + : "+m"(*__hp(addr)), "+q" (result) + : + : "memory"); + return result + (unsigned char)val; + } + case 2: + { + unsigned short result = val; + + __asm__ __volatile__( + "lock; xaddw %1, %0" + : "+m"(*__hp(addr)), "+r" (result) + : + : "memory"); + return result + (unsigned short)val; + } + case 4: + { + unsigned int result = val; + + __asm__ __volatile__( + "lock; xaddl %1, %0" + : "+m"(*__hp(addr)), "+r" (result) + : + : "memory"); + return result + (unsigned int)val; + } +#if (CAA_BITS_PER_LONG == 64) + case 8: + { + unsigned long result = val; + + __asm__ __volatile__( + "lock; xaddq %1, %0" + : "+m"(*__hp(addr)), "+r" (result) + : + : "memory"); + return result + (unsigned long)val; + } +#endif + } + /* generate an illegal instruction. Cannot catch this with linker tricks + * when optimizations are disabled. */ + __asm__ __volatile__("ud2"); + return 0; +} + +#define _uatomic_add_return(addr, v) \ + ((__typeof__(*(addr))) __uatomic_add_return((addr), \ + (unsigned long)(v), \ + sizeof(*(addr)))) + +/* uatomic_and */ + +static inline __attribute__((always_inline)) +void __uatomic_and(void *addr, unsigned long val, int len) +{ + switch (len) { + case 1: + { + __asm__ __volatile__( + "lock; andb %1, %0" + : "=m"(*__hp(addr)) + : "iq" ((unsigned char)val) + : "memory"); + return; + } + case 2: + { + __asm__ __volatile__( + "lock; andw %1, %0" + : "=m"(*__hp(addr)) + : "ir" ((unsigned short)val) + : "memory"); + return; + } + case 4: + { + __asm__ __volatile__( + "lock; andl %1, %0" + : "=m"(*__hp(addr)) + : "ir" ((unsigned int)val) + : "memory"); + return; + } +#if (CAA_BITS_PER_LONG == 64) + case 8: + { + __asm__ __volatile__( + "lock; andq %1, %0" + : "=m"(*__hp(addr)) + : "er" ((unsigned long)val) + : "memory"); + return; + } +#endif + } + /* generate an illegal instruction. Cannot catch this with linker tricks + * when optimizations are disabled. */ + __asm__ __volatile__("ud2"); + return; +} + +#define _uatomic_and(addr, v) \ + (__uatomic_and((addr), (unsigned long)(v), sizeof(*(addr)))) + +/* uatomic_or */ + +static inline __attribute__((always_inline)) +void __uatomic_or(void *addr, unsigned long val, int len) +{ + switch (len) { + case 1: + { + __asm__ __volatile__( + "lock; orb %1, %0" + : "=m"(*__hp(addr)) + : "iq" ((unsigned char)val) + : "memory"); + return; + } + case 2: + { + __asm__ __volatile__( + "lock; orw %1, %0" + : "=m"(*__hp(addr)) + : "ir" ((unsigned short)val) + : "memory"); + return; + } + case 4: + { + __asm__ __volatile__( + "lock; orl %1, %0" + : "=m"(*__hp(addr)) + : "ir" ((unsigned int)val) + : "memory"); + return; + } +#if (CAA_BITS_PER_LONG == 64) + case 8: + { + __asm__ __volatile__( + "lock; orq %1, %0" + : "=m"(*__hp(addr)) + : "er" ((unsigned long)val) + : "memory"); + return; + } +#endif + } + /* generate an illegal instruction. Cannot catch this with linker tricks + * when optimizations are disabled. */ + __asm__ __volatile__("ud2"); + return; +} + +#define _uatomic_or(addr, v) \ + (__uatomic_or((addr), (unsigned long)(v), sizeof(*(addr)))) + +/* uatomic_add */ + +static inline __attribute__((always_inline)) +void __uatomic_add(void *addr, unsigned long val, int len) +{ + switch (len) { + case 1: + { + __asm__ __volatile__( + "lock; addb %1, %0" + : "=m"(*__hp(addr)) + : "iq" ((unsigned char)val) + : "memory"); + return; + } + case 2: + { + __asm__ __volatile__( + "lock; addw %1, %0" + : "=m"(*__hp(addr)) + : "ir" ((unsigned short)val) + : "memory"); + return; + } + case 4: + { + __asm__ __volatile__( + "lock; addl %1, %0" + : "=m"(*__hp(addr)) + : "ir" ((unsigned int)val) + : "memory"); + return; + } +#if (CAA_BITS_PER_LONG == 64) + case 8: + { + __asm__ __volatile__( + "lock; addq %1, %0" + : "=m"(*__hp(addr)) + : "er" ((unsigned long)val) + : "memory"); + return; + } +#endif + } + /* generate an illegal instruction. Cannot catch this with linker tricks + * when optimizations are disabled. */ + __asm__ __volatile__("ud2"); + return; +} + +#define _uatomic_add(addr, v) \ + (__uatomic_add((addr), (unsigned long)(v), sizeof(*(addr)))) + + +/* uatomic_inc */ + +static inline __attribute__((always_inline)) +void __uatomic_inc(void *addr, int len) +{ + switch (len) { + case 1: + { + __asm__ __volatile__( + "lock; incb %0" + : "=m"(*__hp(addr)) + : + : "memory"); + return; + } + case 2: + { + __asm__ __volatile__( + "lock; incw %0" + : "=m"(*__hp(addr)) + : + : "memory"); + return; + } + case 4: + { + __asm__ __volatile__( + "lock; incl %0" + : "=m"(*__hp(addr)) + : + : "memory"); + return; + } +#if (CAA_BITS_PER_LONG == 64) + case 8: + { + __asm__ __volatile__( + "lock; incq %0" + : "=m"(*__hp(addr)) + : + : "memory"); + return; + } +#endif + } + /* generate an illegal instruction. Cannot catch this with linker tricks + * when optimizations are disabled. */ + __asm__ __volatile__("ud2"); + return; +} + +#define _uatomic_inc(addr) (__uatomic_inc((addr), sizeof(*(addr)))) + +/* uatomic_dec */ + +static inline __attribute__((always_inline)) +void __uatomic_dec(void *addr, int len) +{ + switch (len) { + case 1: + { + __asm__ __volatile__( + "lock; decb %0" + : "=m"(*__hp(addr)) + : + : "memory"); + return; + } + case 2: + { + __asm__ __volatile__( + "lock; decw %0" + : "=m"(*__hp(addr)) + : + : "memory"); + return; + } + case 4: + { + __asm__ __volatile__( + "lock; decl %0" + : "=m"(*__hp(addr)) + : + : "memory"); + return; + } +#if (CAA_BITS_PER_LONG == 64) + case 8: + { + __asm__ __volatile__( + "lock; decq %0" + : "=m"(*__hp(addr)) + : + : "memory"); + return; + } +#endif + } + /* generate an illegal instruction. Cannot catch this with linker tricks + * when optimizations are disabled. */ + __asm__ __volatile__("ud2"); + return; +} + +#define _uatomic_dec(addr) (__uatomic_dec((addr), sizeof(*(addr)))) + +#if ((CAA_BITS_PER_LONG != 64) && defined(CONFIG_RCU_COMPAT_ARCH)) +extern int __rcu_cas_avail; +extern int __rcu_cas_init(void); + +#define UATOMIC_COMPAT(insn) \ + ((likely(__rcu_cas_avail > 0)) \ + ? (_uatomic_##insn) \ + : ((unlikely(__rcu_cas_avail < 0) \ + ? ((__rcu_cas_init() > 0) \ + ? (_uatomic_##insn) \ + : (compat_uatomic_##insn)) \ + : (compat_uatomic_##insn)))) + +extern unsigned long _compat_uatomic_set(void *addr, + unsigned long _new, int len); +#define compat_uatomic_set(addr, _new) \ + ((__typeof__(*(addr))) _compat_uatomic_set((addr), \ + (unsigned long)(_new), \ + sizeof(*(addr)))) + + +extern unsigned long _compat_uatomic_xchg(void *addr, + unsigned long _new, int len); +#define compat_uatomic_xchg(addr, _new) \ + ((__typeof__(*(addr))) _compat_uatomic_xchg((addr), \ + (unsigned long)(_new), \ + sizeof(*(addr)))) + +extern unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old, + unsigned long _new, int len); +#define compat_uatomic_cmpxchg(addr, old, _new) \ + ((__typeof__(*(addr))) _compat_uatomic_cmpxchg((addr), \ + (unsigned long)(old), \ + (unsigned long)(_new), \ + sizeof(*(addr)))) + +extern unsigned long _compat_uatomic_and(void *addr, + unsigned long _new, int len); +#define compat_uatomic_and(addr, v) \ + ((__typeof__(*(addr))) _compat_uatomic_and((addr), \ + (unsigned long)(v), \ + sizeof(*(addr)))) + +extern unsigned long _compat_uatomic_or(void *addr, + unsigned long _new, int len); +#define compat_uatomic_or(addr, v) \ + ((__typeof__(*(addr))) _compat_uatomic_or((addr), \ + (unsigned long)(v), \ + sizeof(*(addr)))) + +extern unsigned long _compat_uatomic_add_return(void *addr, + unsigned long _new, int len); +#define compat_uatomic_add_return(addr, v) \ + ((__typeof__(*(addr))) _compat_uatomic_add_return((addr), \ + (unsigned long)(v), \ + sizeof(*(addr)))) + +#define compat_uatomic_add(addr, v) \ + ((void)compat_uatomic_add_return((addr), (v))) +#define compat_uatomic_inc(addr) \ + (compat_uatomic_add((addr), 1)) +#define compat_uatomic_dec(addr) \ + (compat_uatomic_add((addr), -1)) + +#else +#define UATOMIC_COMPAT(insn) (_uatomic_##insn) +#endif + +/* Read is atomic even in compat mode */ +#define uatomic_set(addr, v) \ + UATOMIC_COMPAT(set(addr, v)) + +#define uatomic_cmpxchg(addr, old, _new) \ + UATOMIC_COMPAT(cmpxchg(addr, old, _new)) +#define uatomic_xchg(addr, v) \ + UATOMIC_COMPAT(xchg(addr, v)) +#define uatomic_and(addr, v) \ + UATOMIC_COMPAT(and(addr, v)) +#define uatomic_or(addr, v) \ + UATOMIC_COMPAT(or(addr, v)) +#define uatomic_add_return(addr, v) \ + UATOMIC_COMPAT(add_return(addr, v)) + +#define uatomic_add(addr, v) UATOMIC_COMPAT(add(addr, v)) +#define uatomic_inc(addr) UATOMIC_COMPAT(inc(addr)) +#define uatomic_dec(addr) UATOMIC_COMPAT(dec(addr)) + +#ifdef __cplusplus +} +#endif + +#include + +#endif /* _URCU_ARCH_UATOMIC_X86_H */ diff --git a/urcu/uatomic_arch_alpha.h b/urcu/uatomic_arch_alpha.h deleted file mode 100644 index 0f795e8..0000000 --- a/urcu/uatomic_arch_alpha.h +++ /dev/null @@ -1,32 +0,0 @@ -#ifndef _URCU_UATOMIC_ARCH_ALPHA_H -#define _URCU_UATOMIC_ARCH_ALPHA_H - -/* - * Atomic exchange operations for the Alpha architecture. Let GCC do it. - * - * Copyright (c) 2010 Paolo Bonzini - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - */ - -#include -#include -#include - -#endif /* _URCU_UATOMIC_ARCH_ALPHA_H */ diff --git a/urcu/uatomic_arch_arm.h b/urcu/uatomic_arch_arm.h deleted file mode 100644 index fee3040..0000000 --- a/urcu/uatomic_arch_arm.h +++ /dev/null @@ -1,43 +0,0 @@ -#ifndef _URCU_ARCH_UATOMIC_ARM_H -#define _URCU_ARCH_UATOMIC_ARM_H - -/* - * Atomics for ARM. This approach is usable on kernels back to 2.6.15. - * - * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. - * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. - * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P. - * Copyright (c) 2009 Mathieu Desnoyers - * Copyright (c) 2010 Paul E. McKenney, IBM Corporation - * (Adapted from uatomic_arch_ppc.h) - * - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED - * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. - * - * Permission is hereby granted to use or copy this program - * for any purpose, provided the above notices are retained on all copies. - * Permission to modify the code and to distribute modified code is granted, - * provided the above notices are retained, and a notice that the code was - * modified is included with the above copyright notice. - * - * Code inspired from libuatomic_ops-1.2, inherited in part from the - * Boehm-Demers-Weiser conservative garbage collector. - */ - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/* xchg */ -#define uatomic_xchg(addr, v) __sync_lock_test_and_set(addr, v) - -#ifdef __cplusplus -} -#endif - -#include - -#endif /* _URCU_ARCH_UATOMIC_ARM_H */ diff --git a/urcu/uatomic_arch_gcc.h b/urcu/uatomic_arch_gcc.h deleted file mode 100644 index 4aa32fd..0000000 --- a/urcu/uatomic_arch_gcc.h +++ /dev/null @@ -1,46 +0,0 @@ -#ifndef _URCU_ARCH_UATOMIC_GCC_H -#define _URCU_ARCH_UATOMIC_GCC_H - -/* - * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. - * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. - * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P. - * Copyright (c) 2009 Mathieu Desnoyers - * Copyright (c) 2010 Paul E. McKenney, IBM Corporation - * (Adapted from uatomic_arch_ppc.h) - * - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED - * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. - * - * Permission is hereby granted to use or copy this program - * for any purpose, provided the above notices are retained on all copies. - * Permission to modify the code and to distribute modified code is granted, - * provided the above notices are retained, and a notice that the code was - * modified is included with the above copyright notice. - * - * Code inspired from libuatomic_ops-1.2, inherited in part from the - * Boehm-Demers-Weiser conservative garbage collector. - */ - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/* - * If your platform doesn't have a full set of atomics, you will need - * a separate uatomic_arch_*.h file for your architecture. Otherwise, - * just rely on the definitions in uatomic_generic.h. - */ -#define UATOMIC_HAS_ATOMIC_BYTE -#define UATOMIC_HAS_ATOMIC_SHORT - -#ifdef __cplusplus -} -#endif - -#include - -#endif /* _URCU_ARCH_UATOMIC_GCC_H */ diff --git a/urcu/uatomic_arch_ppc.h b/urcu/uatomic_arch_ppc.h deleted file mode 100644 index bb74934..0000000 --- a/urcu/uatomic_arch_ppc.h +++ /dev/null @@ -1,219 +0,0 @@ -#ifndef _URCU_ARCH_UATOMIC_PPC_H -#define _URCU_ARCH_UATOMIC_PPC_H - -/* - * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. - * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. - * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P. - * Copyright (c) 2009 Mathieu Desnoyers - * - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED - * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. - * - * Permission is hereby granted to use or copy this program - * for any purpose, provided the above notices are retained on all copies. - * Permission to modify the code and to distribute modified code is granted, - * provided the above notices are retained, and a notice that the code was - * modified is included with the above copyright notice. - * - * Code inspired from libuatomic_ops-1.2, inherited in part from the - * Boehm-Demers-Weiser conservative garbage collector. - */ - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -#ifdef __NO_LWSYNC__ -#define LWSYNC_OPCODE "sync\n" -#else -#define LWSYNC_OPCODE "lwsync\n" -#endif - -#define ILLEGAL_INSTR ".long 0xd00d00" - -/* - * Using a isync as second barrier for exchange to provide acquire semantic. - * According to uatomic_ops/sysdeps/gcc/powerpc.h, the documentation is "fairly - * explicit that this also has acquire semantics." - * Derived from AO_compare_and_swap(), but removed the comparison. - */ - -/* xchg */ - -static inline __attribute__((always_inline)) -unsigned long _uatomic_exchange(void *addr, unsigned long val, int len) -{ - switch (len) { - case 4: - { - unsigned int result; - - __asm__ __volatile__( - LWSYNC_OPCODE - "1:\t" "lwarx %0,0,%1\n" /* load and reserve */ - "stwcx. %2,0,%1\n" /* else store conditional */ - "bne- 1b\n" /* retry if lost reservation */ - "isync\n" - : "=&r"(result) - : "r"(addr), "r"(val) - : "memory", "cc"); - - return result; - } -#if (CAA_BITS_PER_LONG == 64) - case 8: - { - unsigned long result; - - __asm__ __volatile__( - LWSYNC_OPCODE - "1:\t" "ldarx %0,0,%1\n" /* load and reserve */ - "stdcx. %2,0,%1\n" /* else store conditional */ - "bne- 1b\n" /* retry if lost reservation */ - "isync\n" - : "=&r"(result) - : "r"(addr), "r"(val) - : "memory", "cc"); - - return result; - } -#endif - } - /* generate an illegal instruction. Cannot catch this with linker tricks - * when optimizations are disabled. */ - __asm__ __volatile__(ILLEGAL_INSTR); - return 0; -} - -#define uatomic_xchg(addr, v) \ - ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \ - sizeof(*(addr)))) -/* cmpxchg */ - -static inline __attribute__((always_inline)) -unsigned long _uatomic_cmpxchg(void *addr, unsigned long old, - unsigned long _new, int len) -{ - switch (len) { - case 4: - { - unsigned int old_val; - - __asm__ __volatile__( - LWSYNC_OPCODE - "1:\t" "lwarx %0,0,%1\n" /* load and reserve */ - "cmpw %0,%3\n" /* if load is not equal to */ - "bne 2f\n" /* old, fail */ - "stwcx. %2,0,%1\n" /* else store conditional */ - "bne- 1b\n" /* retry if lost reservation */ - "isync\n" - "2:\n" - : "=&r"(old_val) - : "r"(addr), "r"((unsigned int)_new), - "r"((unsigned int)old) - : "memory", "cc"); - - return old_val; - } -#if (CAA_BITS_PER_LONG == 64) - case 8: - { - unsigned long old_val; - - __asm__ __volatile__( - LWSYNC_OPCODE - "1:\t" "ldarx %0,0,%1\n" /* load and reserve */ - "cmpd %0,%3\n" /* if load is not equal to */ - "bne 2f\n" /* old, fail */ - "stdcx. %2,0,%1\n" /* else store conditional */ - "bne- 1b\n" /* retry if lost reservation */ - "isync\n" - "2:\n" - : "=&r"(old_val) - : "r"(addr), "r"((unsigned long)_new), - "r"((unsigned long)old) - : "memory", "cc"); - - return old_val; - } -#endif - } - /* generate an illegal instruction. Cannot catch this with linker tricks - * when optimizations are disabled. */ - __asm__ __volatile__(ILLEGAL_INSTR); - return 0; -} - - -#define uatomic_cmpxchg(addr, old, _new) \ - ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\ - (unsigned long)(_new), \ - sizeof(*(addr)))) - -/* uatomic_add_return */ - -static inline __attribute__((always_inline)) -unsigned long _uatomic_add_return(void *addr, unsigned long val, - int len) -{ - switch (len) { - case 4: - { - unsigned int result; - - __asm__ __volatile__( - LWSYNC_OPCODE - "1:\t" "lwarx %0,0,%1\n" /* load and reserve */ - "add %0,%2,%0\n" /* add val to value loaded */ - "stwcx. %0,0,%1\n" /* store conditional */ - "bne- 1b\n" /* retry if lost reservation */ - "isync\n" - : "=&r"(result) - : "r"(addr), "r"(val) - : "memory", "cc"); - - return result; - } -#if (CAA_BITS_PER_LONG == 64) - case 8: - { - unsigned long result; - - __asm__ __volatile__( - LWSYNC_OPCODE - "1:\t" "ldarx %0,0,%1\n" /* load and reserve */ - "add %0,%2,%0\n" /* add val to value loaded */ - "stdcx. %0,0,%1\n" /* store conditional */ - "bne- 1b\n" /* retry if lost reservation */ - "isync\n" - : "=&r"(result) - : "r"(addr), "r"(val) - : "memory", "cc"); - - return result; - } -#endif - } - /* generate an illegal instruction. Cannot catch this with linker tricks - * when optimizations are disabled. */ - __asm__ __volatile__(ILLEGAL_INSTR); - return 0; -} - - -#define uatomic_add_return(addr, v) \ - ((__typeof__(*(addr))) _uatomic_add_return((addr), \ - (unsigned long)(v), \ - sizeof(*(addr)))) - -#ifdef __cplusplus -} -#endif - -#include - -#endif /* _URCU_ARCH_UATOMIC_PPC_H */ diff --git a/urcu/uatomic_arch_s390.h b/urcu/uatomic_arch_s390.h deleted file mode 100644 index 2a4fa03..0000000 --- a/urcu/uatomic_arch_s390.h +++ /dev/null @@ -1,160 +0,0 @@ -#ifndef _URCU_UATOMIC_ARCH_S390_H -#define _URCU_UATOMIC_ARCH_S390_H - -/* - * Atomic exchange operations for the S390 architecture. Based on information - * taken from the Principles of Operation Appendix A "Conditional Swapping - * Instructions (CS, CDS)". - * - * Copyright (c) 2009 Novell, Inc. - * Author: Jan Blunck - * Copyright (c) 2009 Mathieu Desnoyers - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - */ - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) -#define COMPILER_HAVE_SHORT_MEM_OPERAND -#endif - -/* - * MEMOP assembler operand rules: - * - op refer to MEMOP_IN operand - * - MEMOP_IN can expand to more than a single operand. Use it at the end of - * operand list only. - */ - -#ifdef COMPILER_HAVE_SHORT_MEM_OPERAND - -#define MEMOP_OUT(addr) "=Q" (*(addr)) -#define MEMOP_IN(addr) "Q" (*(addr)) -#define MEMOP_REF(op) #op /* op refer to MEMOP_IN operand */ - -#else /* !COMPILER_HAVE_SHORT_MEM_OPERAND */ - -#define MEMOP_OUT(addr) "=m" (*(addr)) -#define MEMOP_IN(addr) "a" (addr), "m" (*(addr)) -#define MEMOP_REF(op) "0(" #op ")" /* op refer to MEMOP_IN operand */ - -#endif /* !COMPILER_HAVE_SHORT_MEM_OPERAND */ - -struct __uatomic_dummy { - unsigned long v[10]; -}; -#define __hp(x) ((struct __uatomic_dummy *)(x)) - -/* xchg */ - -static inline __attribute__((always_inline)) -unsigned long _uatomic_exchange(volatile void *addr, unsigned long val, int len) -{ - switch (len) { - case 4: - { - unsigned int old_val; - - __asm__ __volatile__( - "0: cs %0,%2," MEMOP_REF(%3) "\n" - " brc 4,0b\n" - : "=&r" (old_val), MEMOP_OUT (__hp(addr)) - : "r" (val), MEMOP_IN (__hp(addr)) - : "memory", "cc"); - return old_val; - } -#if (CAA_BITS_PER_LONG == 64) - case 8: - { - unsigned long old_val; - - __asm__ __volatile__( - "0: csg %0,%2," MEMOP_REF(%3) "\n" - " brc 4,0b\n" - : "=&r" (old_val), MEMOP_OUT (__hp(addr)) - : "r" (val), MEMOP_IN (__hp(addr)) - : "memory", "cc"); - return old_val; - } -#endif - default: - __asm__ __volatile__(".long 0xd00d00"); - } - - return 0; -} - -#define uatomic_xchg(addr, v) \ - (__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \ - sizeof(*(addr))) - -/* cmpxchg */ - -static inline __attribute__((always_inline)) -unsigned long _uatomic_cmpxchg(void *addr, unsigned long old, - unsigned long _new, int len) -{ - switch (len) { - case 4: - { - unsigned int old_val = (unsigned int)old; - - __asm__ __volatile__( - " cs %0,%2," MEMOP_REF(%3) "\n" - : "+r" (old_val), MEMOP_OUT (__hp(addr)) - : "r" (_new), MEMOP_IN (__hp(addr)) - : "memory", "cc"); - return old_val; - } -#if (CAA_BITS_PER_LONG == 64) - case 8: - { - __asm__ __volatile__( - " csg %0,%2," MEMOP_REF(%3) "\n" - : "+r" (old), MEMOP_OUT (__hp(addr)) - : "r" (_new), MEMOP_IN (__hp(addr)) - : "memory", "cc"); - return old; - } -#endif - default: - __asm__ __volatile__(".long 0xd00d00"); - } - - return 0; -} - -#define uatomic_cmpxchg(addr, old, _new) \ - (__typeof__(*(addr))) _uatomic_cmpxchg((addr), \ - (unsigned long)(old), \ - (unsigned long)(_new), \ - sizeof(*(addr))) - -#ifdef __cplusplus -} -#endif - -#include - -#endif /* _URCU_UATOMIC_ARCH_S390_H */ diff --git a/urcu/uatomic_arch_sparc64.h b/urcu/uatomic_arch_sparc64.h deleted file mode 100644 index 082c847..0000000 --- a/urcu/uatomic_arch_sparc64.h +++ /dev/null @@ -1,80 +0,0 @@ -#ifndef _URCU_ARCH_UATOMIC_SPARC64_H -#define _URCU_ARCH_UATOMIC_SPARC64_H - -/* - * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. - * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. - * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved. - * Copyright (c) 2009 Mathieu Desnoyers - * - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED - * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. - * - * Permission is hereby granted to use or copy this program - * for any purpose, provided the above notices are retained on all copies. - * Permission to modify the code and to distribute modified code is granted, - * provided the above notices are retained, and a notice that the code was - * modified is included with the above copyright notice. - * - * Code inspired from libuatomic_ops-1.2, inherited in part from the - * Boehm-Demers-Weiser conservative garbage collector. - */ - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/* cmpxchg */ - -static inline __attribute__((always_inline)) -unsigned long _uatomic_cmpxchg(void *addr, unsigned long old, - unsigned long _new, int len) -{ - switch (len) { - case 4: - { - __asm__ __volatile__ ( - "membar #StoreLoad | #LoadLoad\n\t" - "cas [%1],%2,%0\n\t" - "membar #StoreLoad | #StoreStore\n\t" - : "+&r" (_new) - : "r" (addr), "r" (old) - : "memory"); - - return _new; - } -#if (CAA_BITS_PER_LONG == 64) - case 8: - { - __asm__ __volatile__ ( - "membar #StoreLoad | #LoadLoad\n\t" - "casx [%1],%2,%0\n\t" - "membar #StoreLoad | #StoreStore\n\t" - : "+&r" (_new) - : "r" (addr), "r" (old) - : "memory"); - - return _new; - } -#endif - } - __builtin_trap(); - return 0; -} - - -#define uatomic_cmpxchg(addr, old, _new) \ - ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\ - (unsigned long)(_new), \ - sizeof(*(addr)))) - -#ifdef __cplusplus -} -#endif - -#include - -#endif /* _URCU_ARCH_UATOMIC_PPC_H */ diff --git a/urcu/uatomic_arch_unknown.h b/urcu/uatomic_arch_unknown.h deleted file mode 100644 index 6fb4eb3..0000000 --- a/urcu/uatomic_arch_unknown.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef _URCU_ARCH_UATOMIC_UNKNOWN_H -#define _URCU_ARCH_UATOMIC_UNKNOWN_H - -/* - * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. - * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. - * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P. - * Copyright (c) 2009 Mathieu Desnoyers - * Copyright (c) 2010 Paul E. McKenney, IBM Corporation - * (Adapted from uatomic_arch_ppc.h) - * - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED - * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. - * - * Permission is hereby granted to use or copy this program - * for any purpose, provided the above notices are retained on all copies. - * Permission to modify the code and to distribute modified code is granted, - * provided the above notices are retained, and a notice that the code was - * modified is included with the above copyright notice. - */ - -/* See configure.ac for the list of recognized architectures. */ -#error "Cannot build: unrecognized architecture detected." - -#endif /* _URCU_ARCH_UATOMIC_UNKNOWN_H */ diff --git a/urcu/uatomic_arch_x86.h b/urcu/uatomic_arch_x86.h deleted file mode 100644 index 9fedee6..0000000 --- a/urcu/uatomic_arch_x86.h +++ /dev/null @@ -1,596 +0,0 @@ -#ifndef _URCU_ARCH_UATOMIC_X86_H -#define _URCU_ARCH_UATOMIC_X86_H - -/* - * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. - * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. - * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P. - * Copyright (c) 2009 Mathieu Desnoyers - * - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED - * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. - * - * Permission is hereby granted to use or copy this program - * for any purpose, provided the above notices are retained on all copies. - * Permission to modify the code and to distribute modified code is granted, - * provided the above notices are retained, and a notice that the code was - * modified is included with the above copyright notice. - * - * Code inspired from libuatomic_ops-1.2, inherited in part from the - * Boehm-Demers-Weiser conservative garbage collector. - */ - -#include -#include - -#define UATOMIC_HAS_ATOMIC_BYTE -#define UATOMIC_HAS_ATOMIC_SHORT - -#ifdef __cplusplus -extern "C" { -#endif - -/* - * Derived from AO_compare_and_swap() and AO_test_and_set_full(). - */ - -struct __uatomic_dummy { - unsigned long v[10]; -}; -#define __hp(x) ((struct __uatomic_dummy *)(x)) - -#define _uatomic_set(addr, v) CMM_STORE_SHARED(*(addr), (v)) - -/* cmpxchg */ - -static inline __attribute__((always_inline)) -unsigned long __uatomic_cmpxchg(void *addr, unsigned long old, - unsigned long _new, int len) -{ - switch (len) { - case 1: - { - unsigned char result = old; - - __asm__ __volatile__( - "lock; cmpxchgb %2, %1" - : "+a"(result), "+m"(*__hp(addr)) - : "q"((unsigned char)_new) - : "memory"); - return result; - } - case 2: - { - unsigned short result = old; - - __asm__ __volatile__( - "lock; cmpxchgw %2, %1" - : "+a"(result), "+m"(*__hp(addr)) - : "r"((unsigned short)_new) - : "memory"); - return result; - } - case 4: - { - unsigned int result = old; - - __asm__ __volatile__( - "lock; cmpxchgl %2, %1" - : "+a"(result), "+m"(*__hp(addr)) - : "r"((unsigned int)_new) - : "memory"); - return result; - } -#if (CAA_BITS_PER_LONG == 64) - case 8: - { - unsigned long result = old; - - __asm__ __volatile__( - "lock; cmpxchgq %2, %1" - : "+a"(result), "+m"(*__hp(addr)) - : "r"((unsigned long)_new) - : "memory"); - return result; - } -#endif - } - /* generate an illegal instruction. Cannot catch this with linker tricks - * when optimizations are disabled. */ - __asm__ __volatile__("ud2"); - return 0; -} - -#define _uatomic_cmpxchg(addr, old, _new) \ - ((__typeof__(*(addr))) __uatomic_cmpxchg((addr), (unsigned long)(old),\ - (unsigned long)(_new), \ - sizeof(*(addr)))) - -/* xchg */ - -static inline __attribute__((always_inline)) -unsigned long __uatomic_exchange(void *addr, unsigned long val, int len) -{ - /* Note: the "xchg" instruction does not need a "lock" prefix. */ - switch (len) { - case 1: - { - unsigned char result; - __asm__ __volatile__( - "xchgb %0, %1" - : "=q"(result), "+m"(*__hp(addr)) - : "0" ((unsigned char)val) - : "memory"); - return result; - } - case 2: - { - unsigned short result; - __asm__ __volatile__( - "xchgw %0, %1" - : "=r"(result), "+m"(*__hp(addr)) - : "0" ((unsigned short)val) - : "memory"); - return result; - } - case 4: - { - unsigned int result; - __asm__ __volatile__( - "xchgl %0, %1" - : "=r"(result), "+m"(*__hp(addr)) - : "0" ((unsigned int)val) - : "memory"); - return result; - } -#if (CAA_BITS_PER_LONG == 64) - case 8: - { - unsigned long result; - __asm__ __volatile__( - "xchgq %0, %1" - : "=r"(result), "+m"(*__hp(addr)) - : "0" ((unsigned long)val) - : "memory"); - return result; - } -#endif - } - /* generate an illegal instruction. Cannot catch this with linker tricks - * when optimizations are disabled. */ - __asm__ __volatile__("ud2"); - return 0; -} - -#define _uatomic_xchg(addr, v) \ - ((__typeof__(*(addr))) __uatomic_exchange((addr), (unsigned long)(v), \ - sizeof(*(addr)))) - -/* uatomic_add_return */ - -static inline __attribute__((always_inline)) -unsigned long __uatomic_add_return(void *addr, unsigned long val, - int len) -{ - switch (len) { - case 1: - { - unsigned char result = val; - - __asm__ __volatile__( - "lock; xaddb %1, %0" - : "+m"(*__hp(addr)), "+q" (result) - : - : "memory"); - return result + (unsigned char)val; - } - case 2: - { - unsigned short result = val; - - __asm__ __volatile__( - "lock; xaddw %1, %0" - : "+m"(*__hp(addr)), "+r" (result) - : - : "memory"); - return result + (unsigned short)val; - } - case 4: - { - unsigned int result = val; - - __asm__ __volatile__( - "lock; xaddl %1, %0" - : "+m"(*__hp(addr)), "+r" (result) - : - : "memory"); - return result + (unsigned int)val; - } -#if (CAA_BITS_PER_LONG == 64) - case 8: - { - unsigned long result = val; - - __asm__ __volatile__( - "lock; xaddq %1, %0" - : "+m"(*__hp(addr)), "+r" (result) - : - : "memory"); - return result + (unsigned long)val; - } -#endif - } - /* generate an illegal instruction. Cannot catch this with linker tricks - * when optimizations are disabled. */ - __asm__ __volatile__("ud2"); - return 0; -} - -#define _uatomic_add_return(addr, v) \ - ((__typeof__(*(addr))) __uatomic_add_return((addr), \ - (unsigned long)(v), \ - sizeof(*(addr)))) - -/* uatomic_and */ - -static inline __attribute__((always_inline)) -void __uatomic_and(void *addr, unsigned long val, int len) -{ - switch (len) { - case 1: - { - __asm__ __volatile__( - "lock; andb %1, %0" - : "=m"(*__hp(addr)) - : "iq" ((unsigned char)val) - : "memory"); - return; - } - case 2: - { - __asm__ __volatile__( - "lock; andw %1, %0" - : "=m"(*__hp(addr)) - : "ir" ((unsigned short)val) - : "memory"); - return; - } - case 4: - { - __asm__ __volatile__( - "lock; andl %1, %0" - : "=m"(*__hp(addr)) - : "ir" ((unsigned int)val) - : "memory"); - return; - } -#if (CAA_BITS_PER_LONG == 64) - case 8: - { - __asm__ __volatile__( - "lock; andq %1, %0" - : "=m"(*__hp(addr)) - : "er" ((unsigned long)val) - : "memory"); - return; - } -#endif - } - /* generate an illegal instruction. Cannot catch this with linker tricks - * when optimizations are disabled. */ - __asm__ __volatile__("ud2"); - return; -} - -#define _uatomic_and(addr, v) \ - (__uatomic_and((addr), (unsigned long)(v), sizeof(*(addr)))) - -/* uatomic_or */ - -static inline __attribute__((always_inline)) -void __uatomic_or(void *addr, unsigned long val, int len) -{ - switch (len) { - case 1: - { - __asm__ __volatile__( - "lock; orb %1, %0" - : "=m"(*__hp(addr)) - : "iq" ((unsigned char)val) - : "memory"); - return; - } - case 2: - { - __asm__ __volatile__( - "lock; orw %1, %0" - : "=m"(*__hp(addr)) - : "ir" ((unsigned short)val) - : "memory"); - return; - } - case 4: - { - __asm__ __volatile__( - "lock; orl %1, %0" - : "=m"(*__hp(addr)) - : "ir" ((unsigned int)val) - : "memory"); - return; - } -#if (CAA_BITS_PER_LONG == 64) - case 8: - { - __asm__ __volatile__( - "lock; orq %1, %0" - : "=m"(*__hp(addr)) - : "er" ((unsigned long)val) - : "memory"); - return; - } -#endif - } - /* generate an illegal instruction. Cannot catch this with linker tricks - * when optimizations are disabled. */ - __asm__ __volatile__("ud2"); - return; -} - -#define _uatomic_or(addr, v) \ - (__uatomic_or((addr), (unsigned long)(v), sizeof(*(addr)))) - -/* uatomic_add */ - -static inline __attribute__((always_inline)) -void __uatomic_add(void *addr, unsigned long val, int len) -{ - switch (len) { - case 1: - { - __asm__ __volatile__( - "lock; addb %1, %0" - : "=m"(*__hp(addr)) - : "iq" ((unsigned char)val) - : "memory"); - return; - } - case 2: - { - __asm__ __volatile__( - "lock; addw %1, %0" - : "=m"(*__hp(addr)) - : "ir" ((unsigned short)val) - : "memory"); - return; - } - case 4: - { - __asm__ __volatile__( - "lock; addl %1, %0" - : "=m"(*__hp(addr)) - : "ir" ((unsigned int)val) - : "memory"); - return; - } -#if (CAA_BITS_PER_LONG == 64) - case 8: - { - __asm__ __volatile__( - "lock; addq %1, %0" - : "=m"(*__hp(addr)) - : "er" ((unsigned long)val) - : "memory"); - return; - } -#endif - } - /* generate an illegal instruction. Cannot catch this with linker tricks - * when optimizations are disabled. */ - __asm__ __volatile__("ud2"); - return; -} - -#define _uatomic_add(addr, v) \ - (__uatomic_add((addr), (unsigned long)(v), sizeof(*(addr)))) - - -/* uatomic_inc */ - -static inline __attribute__((always_inline)) -void __uatomic_inc(void *addr, int len) -{ - switch (len) { - case 1: - { - __asm__ __volatile__( - "lock; incb %0" - : "=m"(*__hp(addr)) - : - : "memory"); - return; - } - case 2: - { - __asm__ __volatile__( - "lock; incw %0" - : "=m"(*__hp(addr)) - : - : "memory"); - return; - } - case 4: - { - __asm__ __volatile__( - "lock; incl %0" - : "=m"(*__hp(addr)) - : - : "memory"); - return; - } -#if (CAA_BITS_PER_LONG == 64) - case 8: - { - __asm__ __volatile__( - "lock; incq %0" - : "=m"(*__hp(addr)) - : - : "memory"); - return; - } -#endif - } - /* generate an illegal instruction. Cannot catch this with linker tricks - * when optimizations are disabled. */ - __asm__ __volatile__("ud2"); - return; -} - -#define _uatomic_inc(addr) (__uatomic_inc((addr), sizeof(*(addr)))) - -/* uatomic_dec */ - -static inline __attribute__((always_inline)) -void __uatomic_dec(void *addr, int len) -{ - switch (len) { - case 1: - { - __asm__ __volatile__( - "lock; decb %0" - : "=m"(*__hp(addr)) - : - : "memory"); - return; - } - case 2: - { - __asm__ __volatile__( - "lock; decw %0" - : "=m"(*__hp(addr)) - : - : "memory"); - return; - } - case 4: - { - __asm__ __volatile__( - "lock; decl %0" - : "=m"(*__hp(addr)) - : - : "memory"); - return; - } -#if (CAA_BITS_PER_LONG == 64) - case 8: - { - __asm__ __volatile__( - "lock; decq %0" - : "=m"(*__hp(addr)) - : - : "memory"); - return; - } -#endif - } - /* generate an illegal instruction. Cannot catch this with linker tricks - * when optimizations are disabled. */ - __asm__ __volatile__("ud2"); - return; -} - -#define _uatomic_dec(addr) (__uatomic_dec((addr), sizeof(*(addr)))) - -#if ((CAA_BITS_PER_LONG != 64) && defined(CONFIG_RCU_COMPAT_ARCH)) -extern int __rcu_cas_avail; -extern int __rcu_cas_init(void); - -#define UATOMIC_COMPAT(insn) \ - ((likely(__rcu_cas_avail > 0)) \ - ? (_uatomic_##insn) \ - : ((unlikely(__rcu_cas_avail < 0) \ - ? ((__rcu_cas_init() > 0) \ - ? (_uatomic_##insn) \ - : (compat_uatomic_##insn)) \ - : (compat_uatomic_##insn)))) - -extern unsigned long _compat_uatomic_set(void *addr, - unsigned long _new, int len); -#define compat_uatomic_set(addr, _new) \ - ((__typeof__(*(addr))) _compat_uatomic_set((addr), \ - (unsigned long)(_new), \ - sizeof(*(addr)))) - - -extern unsigned long _compat_uatomic_xchg(void *addr, - unsigned long _new, int len); -#define compat_uatomic_xchg(addr, _new) \ - ((__typeof__(*(addr))) _compat_uatomic_xchg((addr), \ - (unsigned long)(_new), \ - sizeof(*(addr)))) - -extern unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old, - unsigned long _new, int len); -#define compat_uatomic_cmpxchg(addr, old, _new) \ - ((__typeof__(*(addr))) _compat_uatomic_cmpxchg((addr), \ - (unsigned long)(old), \ - (unsigned long)(_new), \ - sizeof(*(addr)))) - -extern unsigned long _compat_uatomic_and(void *addr, - unsigned long _new, int len); -#define compat_uatomic_and(addr, v) \ - ((__typeof__(*(addr))) _compat_uatomic_and((addr), \ - (unsigned long)(v), \ - sizeof(*(addr)))) - -extern unsigned long _compat_uatomic_or(void *addr, - unsigned long _new, int len); -#define compat_uatomic_or(addr, v) \ - ((__typeof__(*(addr))) _compat_uatomic_or((addr), \ - (unsigned long)(v), \ - sizeof(*(addr)))) - -extern unsigned long _compat_uatomic_add_return(void *addr, - unsigned long _new, int len); -#define compat_uatomic_add_return(addr, v) \ - ((__typeof__(*(addr))) _compat_uatomic_add_return((addr), \ - (unsigned long)(v), \ - sizeof(*(addr)))) - -#define compat_uatomic_add(addr, v) \ - ((void)compat_uatomic_add_return((addr), (v))) -#define compat_uatomic_inc(addr) \ - (compat_uatomic_add((addr), 1)) -#define compat_uatomic_dec(addr) \ - (compat_uatomic_add((addr), -1)) - -#else -#define UATOMIC_COMPAT(insn) (_uatomic_##insn) -#endif - -/* Read is atomic even in compat mode */ -#define uatomic_set(addr, v) \ - UATOMIC_COMPAT(set(addr, v)) - -#define uatomic_cmpxchg(addr, old, _new) \ - UATOMIC_COMPAT(cmpxchg(addr, old, _new)) -#define uatomic_xchg(addr, v) \ - UATOMIC_COMPAT(xchg(addr, v)) -#define uatomic_and(addr, v) \ - UATOMIC_COMPAT(and(addr, v)) -#define uatomic_or(addr, v) \ - UATOMIC_COMPAT(or(addr, v)) -#define uatomic_add_return(addr, v) \ - UATOMIC_COMPAT(add_return(addr, v)) - -#define uatomic_add(addr, v) UATOMIC_COMPAT(add(addr, v)) -#define uatomic_inc(addr) UATOMIC_COMPAT(inc(addr)) -#define uatomic_dec(addr) UATOMIC_COMPAT(dec(addr)) - -#ifdef __cplusplus -} -#endif - -#include - -#endif /* _URCU_ARCH_UATOMIC_X86_H */ diff --git a/urcu/uatomic_generic.h b/urcu/uatomic_generic.h deleted file mode 100644 index 337fe40..0000000 --- a/urcu/uatomic_generic.h +++ /dev/null @@ -1,557 +0,0 @@ -#ifndef _URCU_UATOMIC_GENERIC_H -#define _URCU_UATOMIC_GENERIC_H - -/* - * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. - * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. - * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P. - * Copyright (c) 2009 Mathieu Desnoyers - * Copyright (c) 2010 Paolo Bonzini - * - * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED - * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. - * - * Permission is hereby granted to use or copy this program - * for any purpose, provided the above notices are retained on all copies. - * Permission to modify the code and to distribute modified code is granted, - * provided the above notices are retained, and a notice that the code was - * modified is included with the above copyright notice. - * - * Code inspired from libuatomic_ops-1.2, inherited in part from the - * Boehm-Demers-Weiser conservative garbage collector. - */ - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -#ifndef uatomic_set -#define uatomic_set(addr, v) CMM_STORE_SHARED(*(addr), (v)) -#endif - -#ifndef uatomic_read -#define uatomic_read(addr) CMM_LOAD_SHARED(*(addr)) -#endif - -#if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR -static inline __attribute__((always_inline)) -void _uatomic_link_error() -{ -#ifdef ILLEGAL_INSTR - /* generate an illegal instruction. Cannot catch this with linker tricks - * when optimizations are disabled. */ - __asm__ __volatile__(ILLEGAL_INSTR); -#else - __builtin_trap (); -#endif -} - -#else /* #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR */ -extern void _uatomic_link_error (); -#endif /* #else #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR */ - -/* cmpxchg */ - -#ifndef uatomic_cmpxchg -static inline __attribute__((always_inline)) -unsigned long _uatomic_cmpxchg(void *addr, unsigned long old, - unsigned long _new, int len) -{ - switch (len) { -#ifdef UATOMIC_HAS_ATOMIC_BYTE - case 1: - return __sync_val_compare_and_swap_1(addr, old, _new); -#endif -#ifdef UATOMIC_HAS_ATOMIC_SHORT - case 2: - return __sync_val_compare_and_swap_2(addr, old, _new); -#endif - case 4: - return __sync_val_compare_and_swap_4(addr, old, _new); -#if (CAA_BITS_PER_LONG == 64) - case 8: - return __sync_val_compare_and_swap_8(addr, old, _new); -#endif - } - _uatomic_link_error(); - return 0; -} - - -#define uatomic_cmpxchg(addr, old, _new) \ - ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\ - (unsigned long)(_new), \ - sizeof(*(addr)))) - - -/* uatomic_and */ - -#ifndef uatomic_and -static inline __attribute__((always_inline)) -void _uatomic_and(void *addr, unsigned long val, - int len) -{ - switch (len) { -#ifdef UATOMIC_HAS_ATOMIC_BYTE - case 1: - __sync_and_and_fetch_1(addr, val); -#endif -#ifdef UATOMIC_HAS_ATOMIC_SHORT - case 2: - __sync_and_and_fetch_2(addr, val); -#endif - case 4: - __sync_and_and_fetch_4(addr, val); -#if (CAA_BITS_PER_LONG == 64) - case 8: - __sync_and_and_fetch_8(addr, val); -#endif - } - _uatomic_link_error(); - return 0; -} - -#define uatomic_and(addr, v) \ - (_uatomic_and((addr), \ - (unsigned long)(v), \ - sizeof(*(addr)))) -#endif - -/* uatomic_or */ - -#ifndef uatomic_or -static inline __attribute__((always_inline)) -void _uatomic_or(void *addr, unsigned long val, - int len) -{ - switch (len) { -#ifdef UATOMIC_HAS_ATOMIC_BYTE - case 1: - __sync_or_and_fetch_1(addr, val); -#endif -#ifdef UATOMIC_HAS_ATOMIC_SHORT - case 2: - __sync_or_and_fetch_2(addr, val); -#endif - case 4: - __sync_or_and_fetch_4(addr, val); -#if (CAA_BITS_PER_LONG == 64) - case 8: - __sync_or_and_fetch_8(addr, val); -#endif - } - _uatomic_link_error(); - return 0; -} - -#define uatomic_or(addr, v) \ - (_uatomic_or((addr), \ - (unsigned long)(v), \ - sizeof(*(addr)))) -#endif - -/* uatomic_add_return */ - -#ifndef uatomic_add_return -static inline __attribute__((always_inline)) -unsigned long _uatomic_add_return(void *addr, unsigned long val, - int len) -{ - switch (len) { -#ifdef UATOMIC_HAS_ATOMIC_BYTE - case 1: - return __sync_add_and_fetch_1(addr, val); -#endif -#ifdef UATOMIC_HAS_ATOMIC_SHORT - case 2: - return __sync_add_and_fetch_2(addr, val); -#endif - case 4: - return __sync_add_and_fetch_4(addr, val); -#if (CAA_BITS_PER_LONG == 64) - case 8: - return __sync_add_and_fetch_8(addr, val); -#endif - } - _uatomic_link_error(); - return 0; -} - - -#define uatomic_add_return(addr, v) \ - ((__typeof__(*(addr))) _uatomic_add_return((addr), \ - (unsigned long)(v), \ - sizeof(*(addr)))) -#endif /* #ifndef uatomic_add_return */ - -#ifndef uatomic_xchg -/* xchg */ - -static inline __attribute__((always_inline)) -unsigned long _uatomic_exchange(void *addr, unsigned long val, int len) -{ - switch (len) { -#ifdef UATOMIC_HAS_ATOMIC_BYTE - case 1: - { - unsigned char old; - - do { - old = uatomic_read((unsigned char *)addr); - } while (!__sync_bool_compare_and_swap_1(addr, old, val)); - - return old; - } -#endif -#ifdef UATOMIC_HAS_ATOMIC_SHORT - case 2: - { - unsigned short old; - - do { - old = uatomic_read((unsigned short *)addr); - } while (!__sync_bool_compare_and_swap_2(addr, old, val)); - - return old; - } -#endif - case 4: - { - unsigned int old; - - do { - old = uatomic_read((unsigned int *)addr); - } while (!__sync_bool_compare_and_swap_4(addr, old, val)); - - return old; - } -#if (CAA_BITS_PER_LONG == 64) - case 8: - { - unsigned long old; - - do { - old = uatomic_read((unsigned long *)addr); - } while (!__sync_bool_compare_and_swap_8(addr, old, val)); - - return old; - } -#endif - } - _uatomic_link_error(); - return 0; -} - -#define uatomic_xchg(addr, v) \ - ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \ - sizeof(*(addr)))) -#endif /* #ifndef uatomic_xchg */ - -#else /* #ifndef uatomic_cmpxchg */ - -#ifndef uatomic_and -/* uatomic_and */ - -static inline __attribute__((always_inline)) -void _uatomic_and(void *addr, unsigned long val, int len) -{ - switch (len) { -#ifdef UATOMIC_HAS_ATOMIC_BYTE - case 1: - { - unsigned char old, oldt; - - oldt = uatomic_read((unsigned char *)addr); - do { - old = oldt; - oldt = _uatomic_cmpxchg(addr, old, old & val, 1); - } while (oldt != old); - } -#endif -#ifdef UATOMIC_HAS_ATOMIC_SHORT - case 2: - { - unsigned short old, oldt; - - oldt = uatomic_read((unsigned short *)addr); - do { - old = oldt; - oldt = _uatomic_cmpxchg(addr, old, old & val, 2); - } while (oldt != old); - } -#endif - case 4: - { - unsigned int old, oldt; - - oldt = uatomic_read((unsigned int *)addr); - do { - old = oldt; - oldt = _uatomic_cmpxchg(addr, old, old & val, 4); - } while (oldt != old); - } -#if (CAA_BITS_PER_LONG == 64) - case 8: - { - unsigned long old, oldt; - - oldt = uatomic_read((unsigned long *)addr); - do { - old = oldt; - oldt = _uatomic_cmpxchg(addr, old, old & val, 8); - } while (oldt != old); - } -#endif - } - _uatomic_link_error(); - return 0; -} - -#define uatomic_and(addr, v) \ - (uatomic_and((addr), \ - (unsigned long)(v), \ - sizeof(*(addr)))) -#endif /* #ifndef uatomic_and */ - -#ifndef uatomic_or -/* uatomic_or */ - -static inline __attribute__((always_inline)) -void _uatomic_or(void *addr, unsigned long val, int len) -{ - switch (len) { -#ifdef UATOMIC_HAS_ATOMIC_BYTE - case 1: - { - unsigned char old, oldt; - - oldt = uatomic_read((unsigned char *)addr); - do { - old = oldt; - oldt = _uatomic_cmpxchg(addr, old, old | val, 1); - } while (oldt != old); - } -#endif -#ifdef UATOMIC_HAS_ATOMIC_SHORT - case 2: - { - unsigned short old, oldt; - - oldt = uatomic_read((unsigned short *)addr); - do { - old = oldt; - oldt = _uatomic_cmpxchg(addr, old, old | val, 2); - } while (oldt != old); - } -#endif - case 4: - { - unsigned int old, oldt; - - oldt = uatomic_read((unsigned int *)addr); - do { - old = oldt; - oldt = _uatomic_cmpxchg(addr, old, old | val, 4); - } while (oldt != old); - } -#if (CAA_BITS_PER_LONG == 64) - case 8: - { - unsigned long old, oldt; - - oldt = uatomic_read((unsigned long *)addr); - do { - old = oldt; - oldt = _uatomic_cmpxchg(addr, old, old | val, 8); - } while (oldt != old); - } -#endif - } - _uatomic_link_error(); - return 0; -} - -#define uatomic_or(addr, v) \ - (uatomic_or((addr), \ - (unsigned long)(v), \ - sizeof(*(addr)))) -#endif /* #ifndef uatomic_or */ - -#ifndef uatomic_add_return -/* uatomic_add_return */ - -static inline __attribute__((always_inline)) -unsigned long _uatomic_add_return(void *addr, unsigned long val, int len) -{ - switch (len) { -#ifdef UATOMIC_HAS_ATOMIC_BYTE - case 1: - { - unsigned char old, oldt; - - oldt = uatomic_read((unsigned char *)addr); - do { - old = oldt; - oldt = uatomic_cmpxchg((unsigned char *)addr, - old, old + val); - } while (oldt != old); - - return old + val; - } -#endif -#ifdef UATOMIC_HAS_ATOMIC_SHORT - case 2: - { - unsigned short old, oldt; - - oldt = uatomic_read((unsigned short *)addr); - do { - old = oldt; - oldt = uatomic_cmpxchg((unsigned short *)addr, - old, old + val); - } while (oldt != old); - - return old + val; - } -#endif - case 4: - { - unsigned int old, oldt; - - oldt = uatomic_read((unsigned int *)addr); - do { - old = oldt; - oldt = uatomic_cmpxchg((unsigned int *)addr, - old, old + val); - } while (oldt != old); - - return old + val; - } -#if (CAA_BITS_PER_LONG == 64) - case 8: - { - unsigned long old, oldt; - - oldt = uatomic_read((unsigned long *)addr); - do { - old = oldt; - oldt = uatomic_cmpxchg((unsigned long *)addr, - old, old + val); - } while (oldt != old); - - return old + val; - } -#endif - } - _uatomic_link_error(); - return 0; -} - -#define uatomic_add_return(addr, v) \ - ((__typeof__(*(addr))) _uatomic_add_return((addr), \ - (unsigned long)(v), \ - sizeof(*(addr)))) -#endif /* #ifndef uatomic_add_return */ - -#ifndef uatomic_xchg -/* xchg */ - -static inline __attribute__((always_inline)) -unsigned long _uatomic_exchange(void *addr, unsigned long val, int len) -{ - switch (len) { -#ifdef UATOMIC_HAS_ATOMIC_BYTE - case 1: - { - unsigned char old, oldt; - - oldt = uatomic_read((unsigned char *)addr); - do { - old = oldt; - oldt = uatomic_cmpxchg((unsigned char *)addr, - old, val); - } while (oldt != old); - - return old; - } -#endif -#ifdef UATOMIC_HAS_ATOMIC_SHORT - case 2: - { - unsigned short old, oldt; - - oldt = uatomic_read((unsigned short *)addr); - do { - old = oldt; - oldt = uatomic_cmpxchg((unsigned short *)addr, - old, val); - } while (oldt != old); - - return old; - } -#endif - case 4: - { - unsigned int old, oldt; - - oldt = uatomic_read((unsigned int *)addr); - do { - old = oldt; - oldt = uatomic_cmpxchg((unsigned int *)addr, - old, val); - } while (oldt != old); - - return old; - } -#if (CAA_BITS_PER_LONG == 64) - case 8: - { - unsigned long old, oldt; - - oldt = uatomic_read((unsigned long *)addr); - do { - old = oldt; - oldt = uatomic_cmpxchg((unsigned long *)addr, - old, val); - } while (oldt != old); - - return old; - } -#endif - } - _uatomic_link_error(); - return 0; -} - -#define uatomic_xchg(addr, v) \ - ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \ - sizeof(*(addr)))) -#endif /* #ifndef uatomic_xchg */ - -#endif /* #else #ifndef uatomic_cmpxchg */ - -/* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */ - -#ifndef uatomic_add -#define uatomic_add(addr, v) (void)uatomic_add_return((addr), (v)) -#endif - -#define uatomic_sub_return(addr, v) uatomic_add_return((addr), -(v)) -#define uatomic_sub(addr, v) uatomic_add((addr), -(v)) - -#ifndef uatomic_inc -#define uatomic_inc(addr) uatomic_add((addr), 1) -#endif - -#ifndef uatomic_dec -#define uatomic_dec(addr) uatomic_add((addr), -1) -#endif - -#ifdef __cplusplus -} -#endif - -#endif /* _URCU_UATOMIC_GENERIC_H */ diff --git a/urcu/urcu_ref.h b/urcu/urcu_ref.h index 75620d1..a422a99 100644 --- a/urcu/urcu_ref.h +++ b/urcu/urcu_ref.h @@ -15,7 +15,7 @@ */ #include -#include +#include struct urcu_ref { long refcount; /* ATOMIC */