Headers: move uatomic_*.h to urcu/uatomic/*.h, rename uatomic_arch.h to uatomic.h
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Fri, 10 Jun 2011 19:58:34 +0000 (15:58 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Fri, 10 Jun 2011 19:58:34 +0000 (15:58 -0400)
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
35 files changed:
Makefile.am
compat_arch_x86.c
configure.ac
tests/test_uatomic.c
tests/urcutorture.c
urcu-defer-impl.h
urcu-pointer.c
urcu-pointer.h
urcu/static/rculfqueue.h
urcu/static/rculfstack.h
urcu/static/urcu-bp.h
urcu/static/urcu-pointer.h
urcu/static/urcu-qsbr.h
urcu/static/urcu.h
urcu/static/wfqueue.h
urcu/static/wfstack.h
urcu/uatomic/alpha.h [new file with mode: 0644]
urcu/uatomic/arm.h [new file with mode: 0644]
urcu/uatomic/gcc.h [new file with mode: 0644]
urcu/uatomic/generic.h [new file with mode: 0644]
urcu/uatomic/ppc.h [new file with mode: 0644]
urcu/uatomic/s390.h [new file with mode: 0644]
urcu/uatomic/sparc64.h [new file with mode: 0644]
urcu/uatomic/unknown.h [new file with mode: 0644]
urcu/uatomic/x86.h [new file with mode: 0644]
urcu/uatomic_arch_alpha.h [deleted file]
urcu/uatomic_arch_arm.h [deleted file]
urcu/uatomic_arch_gcc.h [deleted file]
urcu/uatomic_arch_ppc.h [deleted file]
urcu/uatomic_arch_s390.h [deleted file]
urcu/uatomic_arch_sparc64.h [deleted file]
urcu/uatomic_arch_unknown.h [deleted file]
urcu/uatomic_arch_x86.h [deleted file]
urcu/uatomic_generic.h [deleted file]
urcu/urcu_ref.h

index e85c924e1ca499a4fafa3e1166249d5e389a88d2..53268e5cc487525dabe46646c5020a1cd70feeb2 100644 (file)
@@ -8,12 +8,12 @@ SUBDIRS = . tests
 include_HEADERS = urcu.h $(top_srcdir)/urcu-*.h
 nobase_dist_include_HEADERS = urcu/compiler.h urcu/hlist.h urcu/list.h \
                urcu/rculist.h urcu/rcuhlist.h urcu/system.h urcu/urcu-futex.h \
-               urcu/uatomic_generic.h urcu/arch/generic.h urcu/wfstack.h \
+               urcu/uatomic/generic.h urcu/arch/generic.h urcu/wfstack.h \
                urcu/wfqueue.h urcu/rculfstack.h urcu/rculfqueue.h \
                urcu/urcu_ref.h urcu/map/*.h urcu/static/*.h
-nobase_nodist_include_HEADERS = urcu/arch.h urcu/uatomic_arch.h urcu/config.h
+nobase_nodist_include_HEADERS = urcu/arch.h urcu/uatomic.h urcu/config.h
 
-EXTRA_DIST = $(top_srcdir)/urcu/arch/*.h $(top_srcdir)/urcu/uatomic_arch_*.h \
+EXTRA_DIST = $(top_srcdir)/urcu/arch/*.h $(top_srcdir)/urcu/uatomic/*.h \
                gpl-2.0.txt lgpl-2.1.txt lgpl-relicensing.txt \
                README LICENSE compat_arch_x86.c
 
index 692417ee5067607293ef3c9129f34f8a8b7bb739..9cbd3c8c808d794ac5ad63292479b67e99a96ed4 100644 (file)
@@ -24,7 +24,7 @@
 #include <pthread.h>
 #include <signal.h>
 #include <assert.h>
-#include <urcu/uatomic_arch.h>
+#include <urcu/uatomic.h>
 
 /*
  * It does not really matter if the constructor is called before using
index 85123a1ee28432bf851507041196002140add345..a885fd97cbea3aab19bb4d09c86f990df1c0754d 100644 (file)
@@ -81,7 +81,7 @@ asm volatile("dmb":::"memory");
 )
 fi
 
-UATOMICSRC=urcu/uatomic_arch_$ARCHTYPE.h
+UATOMICSRC=urcu/uatomic/$ARCHTYPE.h
 ARCHSRC=urcu/arch/$ARCHTYPE.h
 if test "x$ARCHTYPE" != xx86 -a "x$ARCHTYPE" != xppc; then
        APISRC=tests/api_gcc.h
@@ -240,7 +240,7 @@ CFLAGS=$saved_CFLAGS
 
 AC_CONFIG_LINKS([
        urcu/arch.h:$ARCHSRC
-       urcu/uatomic_arch.h:$UATOMICSRC
+       urcu/uatomic.h:$UATOMICSRC
        tests/api.h:$APISRC
 ])
 AC_CONFIG_FILES([
index 2c8c2324c3d658aa4ea1f0a3f3caf9c8c84dbf0d..692060bcdf175763e2290c6c207d7d66366928c9 100644 (file)
@@ -1,6 +1,6 @@
 #include <stdio.h>
 #include <assert.h>
-#include <urcu/uatomic_arch.h>
+#include <urcu/uatomic.h>
 
 struct testvals {
        unsigned char c;
index a098d875d60b8a6e76120f2d9f83f890951e3a4d..5e9b0596fc1570a1b909d78687cb8c36c10419ce 100644 (file)
@@ -24,6 +24,6 @@
 #include <urcu-bp.h>
 #endif
 
-#include <urcu/uatomic_arch.h>
+#include <urcu/uatomic.h>
 #include <urcu/rculist.h>
 #include "rcutorture.h"
index 0aedd53011cc70f5dfc626dc4380caa8aaca823b..8dcd114b573b4067a57960b89df8014f4e43e6f9 100644 (file)
@@ -45,7 +45,7 @@
 
 #include <urcu/compiler.h>
 #include <urcu/arch.h>
-#include <urcu/uatomic_arch.h>
+#include <urcu/uatomic.h>
 #include <urcu/list.h>
 #include <urcu/system.h>
 
index 7dfb53a693099c80fc100e0063caea97dad98a77..45dad2b276ba589b04794f7ce663c9bf1f6a76ba 100644 (file)
@@ -24,7 +24,7 @@
  * IBM's contributions to this file may be relicensed under LGPLv2 or later.
  */
 
-#include <urcu/uatomic_arch.h>
+#include <urcu/uatomic.h>
 
 #include "urcu/static/urcu-pointer.h"
 /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
index 359a99f9c574cde3f0f6d0587dc7145ca452a80c..027a18fd4d0b57159a096e5a575d1618ddcfe364 100644 (file)
@@ -28,7 +28,7 @@
 
 #include <urcu/compiler.h>
 #include <urcu/arch.h>
-#include <urcu/uatomic_arch.h>
+#include <urcu/uatomic.h>
 
 #ifdef __cplusplus
 extern "C" {
index 410a4cf8d820714dbc88673db078ef89b1545c45..75df98582e00f5cc94ded1b29af2a0bc8867cf09 100644 (file)
@@ -27,7 +27,7 @@
  */
 
 #include <urcu/urcu_ref.h>
-#include <urcu/uatomic_arch.h>
+#include <urcu/uatomic.h>
 #include <assert.h>
 /* A urcu implementation header should be already included. */
 
index 7caf3c8c0d15f344170c33873f4cb6f4b4d107fa..99d3d4ccc92f8cd22011d099417986e274e08d73 100644 (file)
@@ -26,7 +26,7 @@
  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  */
 
-#include <urcu/uatomic_arch.h>
+#include <urcu/uatomic.h>
 /* A urcu implementation header should be already included. */
 
 #ifdef __cplusplus
index 14c6cfecac3af01a3bc4680c36381736f37a30f7..cca8d6ac2bc40ae9e832d6475dfa6d89933e7d1d 100644 (file)
@@ -37,7 +37,7 @@
 #include <urcu/compiler.h>
 #include <urcu/arch.h>
 #include <urcu/system.h>
-#include <urcu/uatomic_arch.h>
+#include <urcu/uatomic.h>
 #include <urcu/list.h>
 
 /*
index b6444860f51143dca1bb3d5eb34e4bbd4a6ce1cd..acd7cee1f0677554794f238e945715e1e4bd3b10 100644 (file)
@@ -32,7 +32,7 @@
 #include <urcu/compiler.h>
 #include <urcu/arch.h>
 #include <urcu/system.h>
-#include <urcu/uatomic_arch.h>
+#include <urcu/uatomic.h>
 
 #ifdef __cplusplus
 extern "C" {
index e0b12be15c2ebdf540446dbbaa16cdce8ff5f4fe..f189e31ad757fdf679dc9d5b941c988b813e9041 100644 (file)
@@ -39,7 +39,7 @@
 #include <urcu/compiler.h>
 #include <urcu/arch.h>
 #include <urcu/system.h>
-#include <urcu/uatomic_arch.h>
+#include <urcu/uatomic.h>
 #include <urcu/list.h>
 #include <urcu/urcu-futex.h>
 
index 18e4826a5a31789c7cb408832421cbddb7d83c22..289827248c8405aa41918b6b7619dc80f0bd7489 100644 (file)
@@ -37,7 +37,7 @@
 #include <urcu/compiler.h>
 #include <urcu/arch.h>
 #include <urcu/system.h>
-#include <urcu/uatomic_arch.h>
+#include <urcu/uatomic.h>
 #include <urcu/list.h>
 #include <urcu/urcu-futex.h>
 
index 790931bef25db04d2156fe0cd39dd260b31a0474..77828ca1d8c046f41d3da76c8f7d5b0253e076ba 100644 (file)
@@ -30,7 +30,7 @@
 #include <assert.h>
 #include <poll.h>
 #include <urcu/compiler.h>
-#include <urcu/uatomic_arch.h>
+#include <urcu/uatomic.h>
 
 #ifdef __cplusplus
 extern "C" {
index ff18c4a3f213214188ef6bfe6f6c98a795132bab..454240f2325e4224d6625a32232463ee74fe6935 100644 (file)
@@ -30,7 +30,7 @@
 #include <assert.h>
 #include <poll.h>
 #include <urcu/compiler.h>
-#include <urcu/uatomic_arch.h>
+#include <urcu/uatomic.h>
 
 #ifdef __cplusplus
 extern "C" {
diff --git a/urcu/uatomic/alpha.h b/urcu/uatomic/alpha.h
new file mode 100644 (file)
index 0000000..5dceb90
--- /dev/null
@@ -0,0 +1,32 @@
+#ifndef _URCU_UATOMIC_ARCH_ALPHA_H
+#define _URCU_UATOMIC_ARCH_ALPHA_H
+
+/*
+ * Atomic exchange operations for the Alpha architecture. Let GCC do it.
+ *
+ * Copyright (c) 2010 Paolo Bonzini <pbonzini@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <urcu/compiler.h>
+#include <urcu/system.h>
+#include <urcu/uatomic/generic.h>
+
+#endif /* _URCU_UATOMIC_ARCH_ALPHA_H */
diff --git a/urcu/uatomic/arm.h b/urcu/uatomic/arm.h
new file mode 100644 (file)
index 0000000..e0016b8
--- /dev/null
@@ -0,0 +1,43 @@
+#ifndef _URCU_ARCH_UATOMIC_ARM_H
+#define _URCU_ARCH_UATOMIC_ARM_H
+
+/* 
+ * Atomics for ARM.  This approach is usable on kernels back to 2.6.15.
+ *
+ * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
+ * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
+ * Copyright (c) 2009      Mathieu Desnoyers
+ * Copyright (c) 2010      Paul E. McKenney, IBM Corporation
+ *                        (Adapted from uatomic_arch_ppc.h)
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose,  provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ * Code inspired from libuatomic_ops-1.2, inherited in part from the
+ * Boehm-Demers-Weiser conservative garbage collector.
+ */
+
+#include <urcu/compiler.h>
+#include <urcu/system.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif 
+
+/* xchg */
+#define uatomic_xchg(addr, v) __sync_lock_test_and_set(addr, v)
+
+#ifdef __cplusplus 
+}
+#endif
+
+#include <urcu/uatomic/generic.h>
+
+#endif /* _URCU_ARCH_UATOMIC_ARM_H */
diff --git a/urcu/uatomic/gcc.h b/urcu/uatomic/gcc.h
new file mode 100644 (file)
index 0000000..47ca195
--- /dev/null
@@ -0,0 +1,46 @@
+#ifndef _URCU_ARCH_UATOMIC_GCC_H
+#define _URCU_ARCH_UATOMIC_GCC_H
+
+/* 
+ * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
+ * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
+ * Copyright (c) 2009      Mathieu Desnoyers
+ * Copyright (c) 2010      Paul E. McKenney, IBM Corporation
+ *                        (Adapted from uatomic_arch_ppc.h)
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose,  provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ * Code inspired from libuatomic_ops-1.2, inherited in part from the
+ * Boehm-Demers-Weiser conservative garbage collector.
+ */
+
+#include <urcu/compiler.h>
+#include <urcu/system.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif 
+
+/*
+ * If your platform doesn't have a full set of atomics, you will need
+ * a separate uatomic_arch_*.h file for your architecture.  Otherwise,
+ * just rely on the definitions in uatomic/generic.h.
+ */
+#define UATOMIC_HAS_ATOMIC_BYTE
+#define UATOMIC_HAS_ATOMIC_SHORT
+
+#ifdef __cplusplus 
+}
+#endif
+
+#include <urcu/uatomic/generic.h>
+
+#endif /* _URCU_ARCH_UATOMIC_GCC_H */
diff --git a/urcu/uatomic/generic.h b/urcu/uatomic/generic.h
new file mode 100644 (file)
index 0000000..337fe40
--- /dev/null
@@ -0,0 +1,557 @@
+#ifndef _URCU_UATOMIC_GENERIC_H
+#define _URCU_UATOMIC_GENERIC_H
+
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
+ * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
+ * Copyright (c) 2009      Mathieu Desnoyers
+ * Copyright (c) 2010      Paolo Bonzini
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose,  provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ * Code inspired from libuatomic_ops-1.2, inherited in part from the
+ * Boehm-Demers-Weiser conservative garbage collector.
+ */
+
+#include <urcu/compiler.h>
+#include <urcu/system.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef uatomic_set
+#define uatomic_set(addr, v)   CMM_STORE_SHARED(*(addr), (v))
+#endif
+
+#ifndef uatomic_read
+#define uatomic_read(addr)     CMM_LOAD_SHARED(*(addr))
+#endif
+
+#if !defined __OPTIMIZE__  || defined UATOMIC_NO_LINK_ERROR
+static inline __attribute__((always_inline))
+void _uatomic_link_error()
+{
+#ifdef ILLEGAL_INSTR
+       /* generate an illegal instruction. Cannot catch this with linker tricks
+        * when optimizations are disabled. */
+       __asm__ __volatile__(ILLEGAL_INSTR);
+#else
+       __builtin_trap ();
+#endif
+}
+
+#else /* #if !defined __OPTIMIZE__  || defined UATOMIC_NO_LINK_ERROR */
+extern void _uatomic_link_error ();
+#endif /* #else #if !defined __OPTIMIZE__  || defined UATOMIC_NO_LINK_ERROR */
+
+/* cmpxchg */
+
+#ifndef uatomic_cmpxchg
+static inline __attribute__((always_inline))
+unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
+                             unsigned long _new, int len)
+{
+       switch (len) {
+#ifdef UATOMIC_HAS_ATOMIC_BYTE
+       case 1:
+               return __sync_val_compare_and_swap_1(addr, old, _new);
+#endif
+#ifdef UATOMIC_HAS_ATOMIC_SHORT
+       case 2:
+               return __sync_val_compare_and_swap_2(addr, old, _new);
+#endif
+       case 4:
+               return __sync_val_compare_and_swap_4(addr, old, _new);
+#if (CAA_BITS_PER_LONG == 64)
+       case 8:
+               return __sync_val_compare_and_swap_8(addr, old, _new);
+#endif
+       }
+       _uatomic_link_error();
+       return 0;
+}
+
+
+#define uatomic_cmpxchg(addr, old, _new)                                   \
+       ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
+                                               (unsigned long)(_new),      \
+                                               sizeof(*(addr))))
+
+
+/* uatomic_and */
+
+#ifndef uatomic_and
+static inline __attribute__((always_inline))
+void _uatomic_and(void *addr, unsigned long val,
+                 int len)
+{
+       switch (len) {
+#ifdef UATOMIC_HAS_ATOMIC_BYTE
+       case 1:
+               __sync_and_and_fetch_1(addr, val);
+#endif
+#ifdef UATOMIC_HAS_ATOMIC_SHORT
+       case 2:
+               __sync_and_and_fetch_2(addr, val);
+#endif
+       case 4:
+               __sync_and_and_fetch_4(addr, val);
+#if (CAA_BITS_PER_LONG == 64)
+       case 8:
+               __sync_and_and_fetch_8(addr, val);
+#endif
+       }
+       _uatomic_link_error();
+       return 0;
+}
+
+#define uatomic_and(addr, v)                   \
+       (_uatomic_and((addr),                   \
+                     (unsigned long)(v),       \
+                     sizeof(*(addr))))
+#endif
+
+/* uatomic_or */
+
+#ifndef uatomic_or
+static inline __attribute__((always_inline))
+void _uatomic_or(void *addr, unsigned long val,
+                int len)
+{
+       switch (len) {
+#ifdef UATOMIC_HAS_ATOMIC_BYTE
+       case 1:
+               __sync_or_and_fetch_1(addr, val);
+#endif
+#ifdef UATOMIC_HAS_ATOMIC_SHORT
+       case 2:
+               __sync_or_and_fetch_2(addr, val);
+#endif
+       case 4:
+               __sync_or_and_fetch_4(addr, val);
+#if (CAA_BITS_PER_LONG == 64)
+       case 8:
+               __sync_or_and_fetch_8(addr, val);
+#endif
+       }
+       _uatomic_link_error();
+       return 0;
+}
+
+#define uatomic_or(addr, v)                    \
+       (_uatomic_or((addr),                    \
+                    (unsigned long)(v),        \
+                    sizeof(*(addr))))
+#endif
+
+/* uatomic_add_return */
+
+#ifndef uatomic_add_return
+static inline __attribute__((always_inline))
+unsigned long _uatomic_add_return(void *addr, unsigned long val,
+                                int len)
+{
+       switch (len) {
+#ifdef UATOMIC_HAS_ATOMIC_BYTE
+       case 1:
+               return __sync_add_and_fetch_1(addr, val);
+#endif
+#ifdef UATOMIC_HAS_ATOMIC_SHORT
+       case 2:
+               return __sync_add_and_fetch_2(addr, val);
+#endif
+       case 4:
+               return __sync_add_and_fetch_4(addr, val);
+#if (CAA_BITS_PER_LONG == 64)
+       case 8:
+               return __sync_add_and_fetch_8(addr, val);
+#endif
+       }
+       _uatomic_link_error();
+       return 0;
+}
+
+
+#define uatomic_add_return(addr, v)                                    \
+       ((__typeof__(*(addr))) _uatomic_add_return((addr),              \
+                                                 (unsigned long)(v),   \
+                                                 sizeof(*(addr))))
+#endif /* #ifndef uatomic_add_return */
+
+#ifndef uatomic_xchg
+/* xchg */
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
+{
+       switch (len) {
+#ifdef UATOMIC_HAS_ATOMIC_BYTE
+       case 1:
+       {
+               unsigned char old;
+
+               do {
+                       old = uatomic_read((unsigned char *)addr);
+               } while (!__sync_bool_compare_and_swap_1(addr, old, val));
+
+               return old;
+       }
+#endif
+#ifdef UATOMIC_HAS_ATOMIC_SHORT
+       case 2:
+       {
+               unsigned short old;
+
+               do {
+                       old = uatomic_read((unsigned short *)addr);
+               } while (!__sync_bool_compare_and_swap_2(addr, old, val));
+
+               return old;
+       }
+#endif
+       case 4:
+       {
+               unsigned int old;
+
+               do {
+                       old = uatomic_read((unsigned int *)addr);
+               } while (!__sync_bool_compare_and_swap_4(addr, old, val));
+
+               return old;
+       }
+#if (CAA_BITS_PER_LONG == 64)
+       case 8:
+       {
+               unsigned long old;
+
+               do {
+                       old = uatomic_read((unsigned long *)addr);
+               } while (!__sync_bool_compare_and_swap_8(addr, old, val));
+
+               return old;
+       }
+#endif
+       }
+       _uatomic_link_error();
+       return 0;
+}
+
+#define uatomic_xchg(addr, v)                                              \
+       ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
+                                               sizeof(*(addr))))
+#endif /* #ifndef uatomic_xchg */
+
+#else /* #ifndef uatomic_cmpxchg */
+
+#ifndef uatomic_and
+/* uatomic_and */
+
+static inline __attribute__((always_inline))
+void _uatomic_and(void *addr, unsigned long val, int len)
+{
+       switch (len) {
+#ifdef UATOMIC_HAS_ATOMIC_BYTE
+       case 1:
+       {
+               unsigned char old, oldt;
+
+               oldt = uatomic_read((unsigned char *)addr);
+               do {
+                       old = oldt;
+                       oldt = _uatomic_cmpxchg(addr, old, old & val, 1);
+               } while (oldt != old);
+       }
+#endif
+#ifdef UATOMIC_HAS_ATOMIC_SHORT
+       case 2:
+       {
+               unsigned short old, oldt;
+
+               oldt = uatomic_read((unsigned short *)addr);
+               do {
+                       old = oldt;
+                       oldt = _uatomic_cmpxchg(addr, old, old & val, 2);
+               } while (oldt != old);
+       }
+#endif
+       case 4:
+       {
+               unsigned int old, oldt;
+
+               oldt = uatomic_read((unsigned int *)addr);
+               do {
+                       old = oldt;
+                       oldt = _uatomic_cmpxchg(addr, old, old & val, 4);
+               } while (oldt != old);
+       }
+#if (CAA_BITS_PER_LONG == 64)
+       case 8:
+       {
+               unsigned long old, oldt;
+
+               oldt = uatomic_read((unsigned long *)addr);
+               do {
+                       old = oldt;
+                       oldt = _uatomic_cmpxchg(addr, old, old & val, 8);
+               } while (oldt != old);
+       }
+#endif
+       }
+       _uatomic_link_error();
+       return 0;
+}
+
+#define uatomic_and(addr, v)           \
+       (uatomic_and((addr),            \
+                   (unsigned long)(v), \
+                   sizeof(*(addr))))
+#endif /* #ifndef uatomic_and */
+
+#ifndef uatomic_or
+/* uatomic_or */
+
+static inline __attribute__((always_inline))
+void _uatomic_or(void *addr, unsigned long val, int len)
+{
+       switch (len) {
+#ifdef UATOMIC_HAS_ATOMIC_BYTE
+       case 1:
+       {
+               unsigned char old, oldt;
+
+               oldt = uatomic_read((unsigned char *)addr);
+               do {
+                       old = oldt;
+                       oldt = _uatomic_cmpxchg(addr, old, old | val, 1);
+               } while (oldt != old);
+       }
+#endif
+#ifdef UATOMIC_HAS_ATOMIC_SHORT
+       case 2:
+       {
+               unsigned short old, oldt;
+
+               oldt = uatomic_read((unsigned short *)addr);
+               do {
+                       old = oldt;
+                       oldt = _uatomic_cmpxchg(addr, old, old | val, 2);
+               } while (oldt != old);
+       }
+#endif
+       case 4:
+       {
+               unsigned int old, oldt;
+
+               oldt = uatomic_read((unsigned int *)addr);
+               do {
+                       old = oldt;
+                       oldt = _uatomic_cmpxchg(addr, old, old | val, 4);
+               } while (oldt != old);
+       }
+#if (CAA_BITS_PER_LONG == 64)
+       case 8:
+       {
+               unsigned long old, oldt;
+
+               oldt = uatomic_read((unsigned long *)addr);
+               do {
+                       old = oldt;
+                       oldt = _uatomic_cmpxchg(addr, old, old | val, 8);
+               } while (oldt != old);
+       }
+#endif
+       }
+       _uatomic_link_error();
+       return 0;
+}
+
+#define uatomic_or(addr, v)            \
+       (uatomic_or((addr),             \
+                   (unsigned long)(v), \
+                   sizeof(*(addr))))
+#endif /* #ifndef uatomic_or */
+
+#ifndef uatomic_add_return
+/* uatomic_add_return */
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_add_return(void *addr, unsigned long val, int len)
+{
+       switch (len) {
+#ifdef UATOMIC_HAS_ATOMIC_BYTE
+       case 1:
+       {
+               unsigned char old, oldt;
+
+               oldt = uatomic_read((unsigned char *)addr);
+               do {
+                       old = oldt;
+                       oldt = uatomic_cmpxchg((unsigned char *)addr,
+                                               old, old + val);
+               } while (oldt != old);
+
+               return old + val;
+       }
+#endif
+#ifdef UATOMIC_HAS_ATOMIC_SHORT
+       case 2:
+       {
+               unsigned short old, oldt;
+
+               oldt = uatomic_read((unsigned short *)addr);
+               do {
+                       old = oldt;
+                       oldt = uatomic_cmpxchg((unsigned short *)addr,
+                                               old, old + val);
+               } while (oldt != old);
+
+               return old + val;
+       }
+#endif
+       case 4:
+       {
+               unsigned int old, oldt;
+
+               oldt = uatomic_read((unsigned int *)addr);
+               do {
+                       old = oldt;
+                       oldt = uatomic_cmpxchg((unsigned int *)addr,
+                                               old, old + val);
+               } while (oldt != old);
+
+               return old + val;
+       }
+#if (CAA_BITS_PER_LONG == 64)
+       case 8:
+       {
+               unsigned long old, oldt;
+
+               oldt = uatomic_read((unsigned long *)addr);
+               do {
+                       old = oldt;
+                       oldt = uatomic_cmpxchg((unsigned long *)addr,
+                                               old, old + val);
+               } while (oldt != old);
+
+               return old + val;
+       }
+#endif
+       }
+       _uatomic_link_error();
+       return 0;
+}
+
+#define uatomic_add_return(addr, v)                                    \
+       ((__typeof__(*(addr))) _uatomic_add_return((addr),              \
+                                                 (unsigned long)(v),   \
+                                                 sizeof(*(addr))))
+#endif /* #ifndef uatomic_add_return */
+
+#ifndef uatomic_xchg
+/* xchg */
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
+{
+       switch (len) {
+#ifdef UATOMIC_HAS_ATOMIC_BYTE
+       case 1:
+       {
+               unsigned char old, oldt;
+
+               oldt = uatomic_read((unsigned char *)addr);
+               do {
+                       old = oldt;
+                       oldt = uatomic_cmpxchg((unsigned char *)addr,
+                                               old, val);
+               } while (oldt != old);
+
+               return old;
+       }
+#endif
+#ifdef UATOMIC_HAS_ATOMIC_SHORT
+       case 2:
+       {
+               unsigned short old, oldt;
+
+               oldt = uatomic_read((unsigned short *)addr);
+               do {
+                       old = oldt;
+                       oldt = uatomic_cmpxchg((unsigned short *)addr,
+                                               old, val);
+               } while (oldt != old);
+
+               return old;
+       }
+#endif
+       case 4:
+       {
+               unsigned int old, oldt;
+
+               oldt = uatomic_read((unsigned int *)addr);
+               do {
+                       old = oldt;
+                       oldt = uatomic_cmpxchg((unsigned int *)addr,
+                                               old, val);
+               } while (oldt != old);
+
+               return old;
+       }
+#if (CAA_BITS_PER_LONG == 64)
+       case 8:
+       {
+               unsigned long old, oldt;
+
+               oldt = uatomic_read((unsigned long *)addr);
+               do {
+                       old = oldt;
+                       oldt = uatomic_cmpxchg((unsigned long *)addr,
+                                               old, val);
+               } while (oldt != old);
+
+               return old;
+       }
+#endif
+       }
+       _uatomic_link_error();
+       return 0;
+}
+
+#define uatomic_xchg(addr, v)                                              \
+       ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
+                                               sizeof(*(addr))))
+#endif /* #ifndef uatomic_xchg */
+
+#endif /* #else #ifndef uatomic_cmpxchg */
+
+/* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */
+
+#ifndef uatomic_add
+#define uatomic_add(addr, v)           (void)uatomic_add_return((addr), (v))
+#endif
+
+#define uatomic_sub_return(addr, v)    uatomic_add_return((addr), -(v))
+#define uatomic_sub(addr, v)           uatomic_add((addr), -(v))
+
+#ifndef uatomic_inc
+#define uatomic_inc(addr)              uatomic_add((addr), 1)
+#endif
+
+#ifndef uatomic_dec
+#define uatomic_dec(addr)              uatomic_add((addr), -1)
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _URCU_UATOMIC_GENERIC_H */
diff --git a/urcu/uatomic/ppc.h b/urcu/uatomic/ppc.h
new file mode 100644 (file)
index 0000000..3eb3d63
--- /dev/null
@@ -0,0 +1,219 @@
+#ifndef _URCU_ARCH_UATOMIC_PPC_H
+#define _URCU_ARCH_UATOMIC_PPC_H
+
+/* 
+ * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
+ * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
+ * Copyright (c) 2009      Mathieu Desnoyers
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose,  provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ * Code inspired from libuatomic_ops-1.2, inherited in part from the
+ * Boehm-Demers-Weiser conservative garbage collector.
+ */
+
+#include <urcu/compiler.h>
+#include <urcu/system.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif 
+
+#ifdef __NO_LWSYNC__
+#define LWSYNC_OPCODE  "sync\n"
+#else
+#define LWSYNC_OPCODE  "lwsync\n"
+#endif
+
+#define ILLEGAL_INSTR  ".long  0xd00d00"
+
+/*
+ * Using a isync as second barrier for exchange to provide acquire semantic.
+ * According to uatomic_ops/sysdeps/gcc/powerpc.h, the documentation is "fairly
+ * explicit that this also has acquire semantics."
+ * Derived from AO_compare_and_swap(), but removed the comparison.
+ */
+
+/* xchg */
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
+{
+       switch (len) {
+       case 4:
+       {
+               unsigned int result;
+
+               __asm__ __volatile__(
+                       LWSYNC_OPCODE
+               "1:\t"  "lwarx %0,0,%1\n"       /* load and reserve */
+                       "stwcx. %2,0,%1\n"      /* else store conditional */
+                       "bne- 1b\n"             /* retry if lost reservation */
+                       "isync\n"
+                               : "=&r"(result)
+                               : "r"(addr), "r"(val)
+                               : "memory", "cc");
+
+               return result;
+       }
+#if (CAA_BITS_PER_LONG == 64)
+       case 8:
+       {
+               unsigned long result;
+
+               __asm__ __volatile__(
+                       LWSYNC_OPCODE
+               "1:\t"  "ldarx %0,0,%1\n"       /* load and reserve */
+                       "stdcx. %2,0,%1\n"      /* else store conditional */
+                       "bne- 1b\n"             /* retry if lost reservation */
+                       "isync\n"
+                               : "=&r"(result)
+                               : "r"(addr), "r"(val)
+                               : "memory", "cc");
+
+               return result;
+       }
+#endif
+       }
+       /* generate an illegal instruction. Cannot catch this with linker tricks
+        * when optimizations are disabled. */
+       __asm__ __volatile__(ILLEGAL_INSTR);
+       return 0;
+}
+
+#define uatomic_xchg(addr, v)                                              \
+       ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
+                                               sizeof(*(addr))))
+/* cmpxchg */
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
+                             unsigned long _new, int len)
+{
+       switch (len) {
+       case 4:
+       {
+               unsigned int old_val;
+
+               __asm__ __volatile__(
+                       LWSYNC_OPCODE
+               "1:\t"  "lwarx %0,0,%1\n"       /* load and reserve */
+                       "cmpw %0,%3\n"          /* if load is not equal to */
+                       "bne 2f\n"              /* old, fail */
+                       "stwcx. %2,0,%1\n"      /* else store conditional */
+                       "bne- 1b\n"             /* retry if lost reservation */
+                       "isync\n"
+               "2:\n"
+                               : "=&r"(old_val)
+                               : "r"(addr), "r"((unsigned int)_new),
+                                 "r"((unsigned int)old)
+                               : "memory", "cc");
+
+               return old_val;
+       }
+#if (CAA_BITS_PER_LONG == 64)
+       case 8:
+       {
+               unsigned long old_val;
+
+               __asm__ __volatile__(
+                       LWSYNC_OPCODE
+               "1:\t"  "ldarx %0,0,%1\n"       /* load and reserve */
+                       "cmpd %0,%3\n"          /* if load is not equal to */
+                       "bne 2f\n"              /* old, fail */
+                       "stdcx. %2,0,%1\n"      /* else store conditional */
+                       "bne- 1b\n"             /* retry if lost reservation */
+                       "isync\n"
+               "2:\n"
+                               : "=&r"(old_val)
+                               : "r"(addr), "r"((unsigned long)_new),
+                                 "r"((unsigned long)old)
+                               : "memory", "cc");
+
+               return old_val;
+       }
+#endif
+       }
+       /* generate an illegal instruction. Cannot catch this with linker tricks
+        * when optimizations are disabled. */
+       __asm__ __volatile__(ILLEGAL_INSTR);
+       return 0;
+}
+
+
+#define uatomic_cmpxchg(addr, old, _new)                                   \
+       ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
+                                               (unsigned long)(_new),      \
+                                               sizeof(*(addr))))
+
+/* uatomic_add_return */
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_add_return(void *addr, unsigned long val,
+                                int len)
+{
+       switch (len) {
+       case 4:
+       {
+               unsigned int result;
+
+               __asm__ __volatile__(
+                       LWSYNC_OPCODE
+               "1:\t"  "lwarx %0,0,%1\n"       /* load and reserve */
+                       "add %0,%2,%0\n"        /* add val to value loaded */
+                       "stwcx. %0,0,%1\n"      /* store conditional */
+                       "bne- 1b\n"             /* retry if lost reservation */
+                       "isync\n"
+                               : "=&r"(result)
+                               : "r"(addr), "r"(val)
+                               : "memory", "cc");
+
+               return result;
+       }
+#if (CAA_BITS_PER_LONG == 64)
+       case 8:
+       {
+               unsigned long result;
+
+               __asm__ __volatile__(
+                       LWSYNC_OPCODE
+               "1:\t"  "ldarx %0,0,%1\n"       /* load and reserve */
+                       "add %0,%2,%0\n"        /* add val to value loaded */
+                       "stdcx. %0,0,%1\n"      /* store conditional */
+                       "bne- 1b\n"             /* retry if lost reservation */
+                       "isync\n"
+                               : "=&r"(result)
+                               : "r"(addr), "r"(val)
+                               : "memory", "cc");
+
+               return result;
+       }
+#endif
+       }
+       /* generate an illegal instruction. Cannot catch this with linker tricks
+        * when optimizations are disabled. */
+       __asm__ __volatile__(ILLEGAL_INSTR);
+       return 0;
+}
+
+
+#define uatomic_add_return(addr, v)                                    \
+       ((__typeof__(*(addr))) _uatomic_add_return((addr),              \
+                                                 (unsigned long)(v),   \
+                                                 sizeof(*(addr))))
+
+#ifdef __cplusplus 
+}
+#endif
+
+#include <urcu/uatomic/generic.h>
+
+#endif /* _URCU_ARCH_UATOMIC_PPC_H */
diff --git a/urcu/uatomic/s390.h b/urcu/uatomic/s390.h
new file mode 100644 (file)
index 0000000..b274c1c
--- /dev/null
@@ -0,0 +1,160 @@
+#ifndef _URCU_UATOMIC_ARCH_S390_H
+#define _URCU_UATOMIC_ARCH_S390_H
+
+/*
+ * Atomic exchange operations for the S390 architecture. Based on information
+ * taken from the Principles of Operation Appendix A "Conditional Swapping
+ * Instructions (CS, CDS)".
+ *
+ * Copyright (c) 2009 Novell, Inc.
+ * Author: Jan Blunck <jblunck@suse.de>
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <urcu/compiler.h>
+#include <urcu/system.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif 
+
+#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
+#define COMPILER_HAVE_SHORT_MEM_OPERAND
+#endif
+
+/*
+ * MEMOP assembler operand rules:
+ * - op refer to MEMOP_IN operand
+ * - MEMOP_IN can expand to more than a single operand. Use it at the end of
+ *   operand list only.
+ */
+
+#ifdef COMPILER_HAVE_SHORT_MEM_OPERAND
+
+#define MEMOP_OUT(addr)        "=Q" (*(addr))
+#define MEMOP_IN(addr) "Q" (*(addr))
+#define MEMOP_REF(op)  #op             /* op refer to MEMOP_IN operand */
+
+#else /* !COMPILER_HAVE_SHORT_MEM_OPERAND */
+
+#define MEMOP_OUT(addr)        "=m" (*(addr))
+#define MEMOP_IN(addr) "a" (addr), "m" (*(addr))
+#define MEMOP_REF(op)  "0(" #op ")"    /* op refer to MEMOP_IN operand */
+
+#endif /* !COMPILER_HAVE_SHORT_MEM_OPERAND */
+
+struct __uatomic_dummy {
+       unsigned long v[10];
+};
+#define __hp(x)        ((struct __uatomic_dummy *)(x))
+
+/* xchg */
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_exchange(volatile void *addr, unsigned long val, int len)
+{
+       switch (len) {
+       case 4:
+       {
+               unsigned int old_val;
+
+               __asm__ __volatile__(
+                       "0:     cs %0,%2," MEMOP_REF(%3) "\n"
+                       "       brc 4,0b\n"
+                       : "=&r" (old_val), MEMOP_OUT (__hp(addr))
+                       : "r" (val), MEMOP_IN (__hp(addr))
+                       : "memory", "cc");
+               return old_val;
+       }
+#if (CAA_BITS_PER_LONG == 64)
+       case 8:
+       {
+               unsigned long old_val;
+
+               __asm__ __volatile__(
+                       "0:     csg %0,%2," MEMOP_REF(%3) "\n"
+                       "       brc 4,0b\n"
+                       : "=&r" (old_val), MEMOP_OUT (__hp(addr))
+                       : "r" (val), MEMOP_IN (__hp(addr))
+                       : "memory", "cc");
+               return old_val;
+       }
+#endif
+       default:
+               __asm__ __volatile__(".long     0xd00d00");
+       }
+
+       return 0;
+}
+
+#define uatomic_xchg(addr, v)                                              \
+       (__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
+                                              sizeof(*(addr)))
+
+/* cmpxchg */
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
+                              unsigned long _new, int len)
+{
+       switch (len) {
+       case 4:
+       {
+               unsigned int old_val = (unsigned int)old;
+
+               __asm__ __volatile__(
+                       "       cs %0,%2," MEMOP_REF(%3) "\n"
+                       : "+r" (old_val), MEMOP_OUT (__hp(addr))
+                       : "r" (_new), MEMOP_IN (__hp(addr))
+                       : "memory", "cc");
+               return old_val;
+       }
+#if (CAA_BITS_PER_LONG == 64)
+       case 8:
+       {
+               __asm__ __volatile__(
+                       "       csg %0,%2," MEMOP_REF(%3) "\n"
+                       : "+r" (old), MEMOP_OUT (__hp(addr))
+                       : "r" (_new), MEMOP_IN (__hp(addr))
+                       : "memory", "cc");
+               return old;
+       }
+#endif
+       default:
+               __asm__ __volatile__(".long     0xd00d00");
+       }
+
+       return 0;
+}
+
+#define uatomic_cmpxchg(addr, old, _new)                               \
+       (__typeof__(*(addr))) _uatomic_cmpxchg((addr),                  \
+                                              (unsigned long)(old),    \
+                                              (unsigned long)(_new),   \
+                                              sizeof(*(addr)))
+
+#ifdef __cplusplus 
+}
+#endif
+
+#include <urcu/uatomic/generic.h>
+
+#endif /* _URCU_UATOMIC_ARCH_S390_H */
diff --git a/urcu/uatomic/sparc64.h b/urcu/uatomic/sparc64.h
new file mode 100644 (file)
index 0000000..d9ecada
--- /dev/null
@@ -0,0 +1,80 @@
+#ifndef _URCU_ARCH_UATOMIC_SPARC64_H
+#define _URCU_ARCH_UATOMIC_SPARC64_H
+
+/* 
+ * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
+ * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
+ * Copyright (c) 2009      Mathieu Desnoyers
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose,  provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ * Code inspired from libuatomic_ops-1.2, inherited in part from the
+ * Boehm-Demers-Weiser conservative garbage collector.
+ */
+
+#include <urcu/compiler.h>
+#include <urcu/system.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif 
+
+/* cmpxchg */
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
+                             unsigned long _new, int len)
+{
+       switch (len) {
+       case 4:
+       {
+               __asm__ __volatile__ (
+                       "membar #StoreLoad | #LoadLoad\n\t"
+                        "cas [%1],%2,%0\n\t"
+                        "membar #StoreLoad | #StoreStore\n\t"
+                        : "+&r" (_new)
+                        : "r" (addr), "r" (old)
+                        : "memory");
+
+               return _new;
+       }
+#if (CAA_BITS_PER_LONG == 64)
+       case 8:
+       {
+               __asm__ __volatile__ (
+                       "membar #StoreLoad | #LoadLoad\n\t"
+                        "casx [%1],%2,%0\n\t"
+                        "membar #StoreLoad | #StoreStore\n\t"
+                        : "+&r" (_new)
+                        : "r" (addr), "r" (old)
+                        : "memory");
+
+               return _new;
+       }
+#endif
+       }
+       __builtin_trap();
+       return 0;
+}
+
+
+#define uatomic_cmpxchg(addr, old, _new)                                   \
+       ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
+                                               (unsigned long)(_new),      \
+                                               sizeof(*(addr))))
+
+#ifdef __cplusplus 
+}
+#endif
+
+#include <urcu/uatomic/generic.h>
+
+#endif /* _URCU_ARCH_UATOMIC_PPC_H */
diff --git a/urcu/uatomic/unknown.h b/urcu/uatomic/unknown.h
new file mode 100644 (file)
index 0000000..6fb4eb3
--- /dev/null
@@ -0,0 +1,25 @@
+#ifndef _URCU_ARCH_UATOMIC_UNKNOWN_H
+#define _URCU_ARCH_UATOMIC_UNKNOWN_H
+
+/* 
+ * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
+ * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
+ * Copyright (c) 2009      Mathieu Desnoyers
+ * Copyright (c) 2010      Paul E. McKenney, IBM Corporation
+ *                        (Adapted from uatomic_arch_ppc.h)
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose,  provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+
+/* See configure.ac for the list of recognized architectures.  */
+#error "Cannot build: unrecognized architecture detected."
+
+#endif /* _URCU_ARCH_UATOMIC_UNKNOWN_H */
diff --git a/urcu/uatomic/x86.h b/urcu/uatomic/x86.h
new file mode 100644 (file)
index 0000000..b4c108f
--- /dev/null
@@ -0,0 +1,596 @@
+#ifndef _URCU_ARCH_UATOMIC_X86_H
+#define _URCU_ARCH_UATOMIC_X86_H
+
+/* 
+ * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
+ * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
+ * Copyright (c) 2009      Mathieu Desnoyers
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose,  provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ * Code inspired from libuatomic_ops-1.2, inherited in part from the
+ * Boehm-Demers-Weiser conservative garbage collector.
+ */
+
+#include <urcu/compiler.h>
+#include <urcu/system.h>
+
+#define UATOMIC_HAS_ATOMIC_BYTE
+#define UATOMIC_HAS_ATOMIC_SHORT
+
+#ifdef __cplusplus
+extern "C" {
+#endif 
+
+/*
+ * Derived from AO_compare_and_swap() and AO_test_and_set_full().
+ */
+
+struct __uatomic_dummy {
+       unsigned long v[10];
+};
+#define __hp(x)        ((struct __uatomic_dummy *)(x))
+
+#define _uatomic_set(addr, v)  CMM_STORE_SHARED(*(addr), (v))
+
+/* cmpxchg */
+
+static inline __attribute__((always_inline))
+unsigned long __uatomic_cmpxchg(void *addr, unsigned long old,
+                             unsigned long _new, int len)
+{
+       switch (len) {
+       case 1:
+       {
+               unsigned char result = old;
+
+               __asm__ __volatile__(
+               "lock; cmpxchgb %2, %1"
+                       : "+a"(result), "+m"(*__hp(addr))
+                       : "q"((unsigned char)_new)
+                       : "memory");
+               return result;
+       }
+       case 2:
+       {
+               unsigned short result = old;
+
+               __asm__ __volatile__(
+               "lock; cmpxchgw %2, %1"
+                       : "+a"(result), "+m"(*__hp(addr))
+                       : "r"((unsigned short)_new)
+                       : "memory");
+               return result;
+       }
+       case 4:
+       {
+               unsigned int result = old;
+
+               __asm__ __volatile__(
+               "lock; cmpxchgl %2, %1"
+                       : "+a"(result), "+m"(*__hp(addr))
+                       : "r"((unsigned int)_new)
+                       : "memory");
+               return result;
+       }
+#if (CAA_BITS_PER_LONG == 64)
+       case 8:
+       {
+               unsigned long result = old;
+
+               __asm__ __volatile__(
+               "lock; cmpxchgq %2, %1"
+                       : "+a"(result), "+m"(*__hp(addr))
+                       : "r"((unsigned long)_new)
+                       : "memory");
+               return result;
+       }
+#endif
+       }
+       /* generate an illegal instruction. Cannot catch this with linker tricks
+        * when optimizations are disabled. */
+       __asm__ __volatile__("ud2");
+       return 0;
+}
+
+#define _uatomic_cmpxchg(addr, old, _new)                                    \
+       ((__typeof__(*(addr))) __uatomic_cmpxchg((addr), (unsigned long)(old),\
+                                               (unsigned long)(_new),        \
+                                               sizeof(*(addr))))
+
+/* xchg */
+
+static inline __attribute__((always_inline))
+unsigned long __uatomic_exchange(void *addr, unsigned long val, int len)
+{
+       /* Note: the "xchg" instruction does not need a "lock" prefix. */
+       switch (len) {
+       case 1:
+       {
+               unsigned char result;
+               __asm__ __volatile__(
+               "xchgb %0, %1"
+                       : "=q"(result), "+m"(*__hp(addr))
+                       : "0" ((unsigned char)val)
+                       : "memory");
+               return result;
+       }
+       case 2:
+       {
+               unsigned short result;
+               __asm__ __volatile__(
+               "xchgw %0, %1"
+                       : "=r"(result), "+m"(*__hp(addr))
+                       : "0" ((unsigned short)val)
+                       : "memory");
+               return result;
+       }
+       case 4:
+       {
+               unsigned int result;
+               __asm__ __volatile__(
+               "xchgl %0, %1"
+                       : "=r"(result), "+m"(*__hp(addr))
+                       : "0" ((unsigned int)val)
+                       : "memory");
+               return result;
+       }
+#if (CAA_BITS_PER_LONG == 64)
+       case 8:
+       {
+               unsigned long result;
+               __asm__ __volatile__(
+               "xchgq %0, %1"
+                       : "=r"(result), "+m"(*__hp(addr))
+                       : "0" ((unsigned long)val)
+                       : "memory");
+               return result;
+       }
+#endif
+       }
+       /* generate an illegal instruction. Cannot catch this with linker tricks
+        * when optimizations are disabled. */
+       __asm__ __volatile__("ud2");
+       return 0;
+}
+
+#define _uatomic_xchg(addr, v)                                               \
+       ((__typeof__(*(addr))) __uatomic_exchange((addr), (unsigned long)(v), \
+                                               sizeof(*(addr))))
+
+/* uatomic_add_return */
+
+static inline __attribute__((always_inline))
+unsigned long __uatomic_add_return(void *addr, unsigned long val,
+                                int len)
+{
+       switch (len) {
+       case 1:
+       {
+               unsigned char result = val;
+
+               __asm__ __volatile__(
+               "lock; xaddb %1, %0"
+                       : "+m"(*__hp(addr)), "+q" (result)
+                       :
+                       : "memory");
+               return result + (unsigned char)val;
+       }
+       case 2:
+       {
+               unsigned short result = val;
+
+               __asm__ __volatile__(
+               "lock; xaddw %1, %0"
+                       : "+m"(*__hp(addr)), "+r" (result)
+                       :
+                       : "memory");
+               return result + (unsigned short)val;
+       }
+       case 4:
+       {
+               unsigned int result = val;
+
+               __asm__ __volatile__(
+               "lock; xaddl %1, %0"
+                       : "+m"(*__hp(addr)), "+r" (result)
+                       :
+                       : "memory");
+               return result + (unsigned int)val;
+       }
+#if (CAA_BITS_PER_LONG == 64)
+       case 8:
+       {
+               unsigned long result = val;
+
+               __asm__ __volatile__(
+               "lock; xaddq %1, %0"
+                       : "+m"(*__hp(addr)), "+r" (result)
+                       :
+                       : "memory");
+               return result + (unsigned long)val;
+       }
+#endif
+       }
+       /* generate an illegal instruction. Cannot catch this with linker tricks
+        * when optimizations are disabled. */
+       __asm__ __volatile__("ud2");
+       return 0;
+}
+
+#define _uatomic_add_return(addr, v)                                   \
+       ((__typeof__(*(addr))) __uatomic_add_return((addr),             \
+                                                 (unsigned long)(v),   \
+                                                 sizeof(*(addr))))
+
+/* uatomic_and */
+
+static inline __attribute__((always_inline))
+void __uatomic_and(void *addr, unsigned long val, int len)
+{
+       switch (len) {
+       case 1:
+       {
+               __asm__ __volatile__(
+               "lock; andb %1, %0"
+                       : "=m"(*__hp(addr))
+                       : "iq" ((unsigned char)val)
+                       : "memory");
+               return;
+       }
+       case 2:
+       {
+               __asm__ __volatile__(
+               "lock; andw %1, %0"
+                       : "=m"(*__hp(addr))
+                       : "ir" ((unsigned short)val)
+                       : "memory");
+               return;
+       }
+       case 4:
+       {
+               __asm__ __volatile__(
+               "lock; andl %1, %0"
+                       : "=m"(*__hp(addr))
+                       : "ir" ((unsigned int)val)
+                       : "memory");
+               return;
+       }
+#if (CAA_BITS_PER_LONG == 64)
+       case 8:
+       {
+               __asm__ __volatile__(
+               "lock; andq %1, %0"
+                       : "=m"(*__hp(addr))
+                       : "er" ((unsigned long)val)
+                       : "memory");
+               return;
+       }
+#endif
+       }
+       /* generate an illegal instruction. Cannot catch this with linker tricks
+        * when optimizations are disabled. */
+       __asm__ __volatile__("ud2");
+       return;
+}
+
+#define _uatomic_and(addr, v)                                             \
+       (__uatomic_and((addr), (unsigned long)(v), sizeof(*(addr))))
+
+/* uatomic_or */
+
+static inline __attribute__((always_inline))
+void __uatomic_or(void *addr, unsigned long val, int len)
+{
+       switch (len) {
+       case 1:
+       {
+               __asm__ __volatile__(
+               "lock; orb %1, %0"
+                       : "=m"(*__hp(addr))
+                       : "iq" ((unsigned char)val)
+                       : "memory");
+               return;
+       }
+       case 2:
+       {
+               __asm__ __volatile__(
+               "lock; orw %1, %0"
+                       : "=m"(*__hp(addr))
+                       : "ir" ((unsigned short)val)
+                       : "memory");
+               return;
+       }
+       case 4:
+       {
+               __asm__ __volatile__(
+               "lock; orl %1, %0"
+                       : "=m"(*__hp(addr))
+                       : "ir" ((unsigned int)val)
+                       : "memory");
+               return;
+       }
+#if (CAA_BITS_PER_LONG == 64)
+       case 8:
+       {
+               __asm__ __volatile__(
+               "lock; orq %1, %0"
+                       : "=m"(*__hp(addr))
+                       : "er" ((unsigned long)val)
+                       : "memory");
+               return;
+       }
+#endif
+       }
+       /* generate an illegal instruction. Cannot catch this with linker tricks
+        * when optimizations are disabled. */
+       __asm__ __volatile__("ud2");
+       return;
+}
+
+#define _uatomic_or(addr, v)                                              \
+       (__uatomic_or((addr), (unsigned long)(v), sizeof(*(addr))))
+
+/* uatomic_add */
+
+static inline __attribute__((always_inline))
+void __uatomic_add(void *addr, unsigned long val, int len)
+{
+       switch (len) {
+       case 1:
+       {
+               __asm__ __volatile__(
+               "lock; addb %1, %0"
+                       : "=m"(*__hp(addr))
+                       : "iq" ((unsigned char)val)
+                       : "memory");
+               return;
+       }
+       case 2:
+       {
+               __asm__ __volatile__(
+               "lock; addw %1, %0"
+                       : "=m"(*__hp(addr))
+                       : "ir" ((unsigned short)val)
+                       : "memory");
+               return;
+       }
+       case 4:
+       {
+               __asm__ __volatile__(
+               "lock; addl %1, %0"
+                       : "=m"(*__hp(addr))
+                       : "ir" ((unsigned int)val)
+                       : "memory");
+               return;
+       }
+#if (CAA_BITS_PER_LONG == 64)
+       case 8:
+       {
+               __asm__ __volatile__(
+               "lock; addq %1, %0"
+                       : "=m"(*__hp(addr))
+                       : "er" ((unsigned long)val)
+                       : "memory");
+               return;
+       }
+#endif
+       }
+       /* generate an illegal instruction. Cannot catch this with linker tricks
+        * when optimizations are disabled. */
+       __asm__ __volatile__("ud2");
+       return;
+}
+
+#define _uatomic_add(addr, v)                                             \
+       (__uatomic_add((addr), (unsigned long)(v), sizeof(*(addr))))
+
+
+/* uatomic_inc */
+
+static inline __attribute__((always_inline))
+void __uatomic_inc(void *addr, int len)
+{
+       switch (len) {
+       case 1:
+       {
+               __asm__ __volatile__(
+               "lock; incb %0"
+                       : "=m"(*__hp(addr))
+                       :
+                       : "memory");
+               return;
+       }
+       case 2:
+       {
+               __asm__ __volatile__(
+               "lock; incw %0"
+                       : "=m"(*__hp(addr))
+                       :
+                       : "memory");
+               return;
+       }
+       case 4:
+       {
+               __asm__ __volatile__(
+               "lock; incl %0"
+                       : "=m"(*__hp(addr))
+                       :
+                       : "memory");
+               return;
+       }
+#if (CAA_BITS_PER_LONG == 64)
+       case 8:
+       {
+               __asm__ __volatile__(
+               "lock; incq %0"
+                       : "=m"(*__hp(addr))
+                       :
+                       : "memory");
+               return;
+       }
+#endif
+       }
+       /* generate an illegal instruction. Cannot catch this with linker tricks
+        * when optimizations are disabled. */
+       __asm__ __volatile__("ud2");
+       return;
+}
+
+#define _uatomic_inc(addr)     (__uatomic_inc((addr), sizeof(*(addr))))
+
+/* uatomic_dec */
+
+static inline __attribute__((always_inline))
+void __uatomic_dec(void *addr, int len)
+{
+       switch (len) {
+       case 1:
+       {
+               __asm__ __volatile__(
+               "lock; decb %0"
+                       : "=m"(*__hp(addr))
+                       :
+                       : "memory");
+               return;
+       }
+       case 2:
+       {
+               __asm__ __volatile__(
+               "lock; decw %0"
+                       : "=m"(*__hp(addr))
+                       :
+                       : "memory");
+               return;
+       }
+       case 4:
+       {
+               __asm__ __volatile__(
+               "lock; decl %0"
+                       : "=m"(*__hp(addr))
+                       :
+                       : "memory");
+               return;
+       }
+#if (CAA_BITS_PER_LONG == 64)
+       case 8:
+       {
+               __asm__ __volatile__(
+               "lock; decq %0"
+                       : "=m"(*__hp(addr))
+                       :
+                       : "memory");
+               return;
+       }
+#endif
+       }
+       /* generate an illegal instruction. Cannot catch this with linker tricks
+        * when optimizations are disabled. */
+       __asm__ __volatile__("ud2");
+       return;
+}
+
+#define _uatomic_dec(addr)     (__uatomic_dec((addr), sizeof(*(addr))))
+
+#if ((CAA_BITS_PER_LONG != 64) && defined(CONFIG_RCU_COMPAT_ARCH))
+extern int __rcu_cas_avail;
+extern int __rcu_cas_init(void);
+
+#define UATOMIC_COMPAT(insn)                                                   \
+       ((likely(__rcu_cas_avail > 0))                                          \
+       ? (_uatomic_##insn)                                                     \
+               : ((unlikely(__rcu_cas_avail < 0)                               \
+                       ? ((__rcu_cas_init() > 0)                               \
+                               ? (_uatomic_##insn)                             \
+                               : (compat_uatomic_##insn))                      \
+                       : (compat_uatomic_##insn))))
+
+extern unsigned long _compat_uatomic_set(void *addr,
+                                        unsigned long _new, int len);
+#define compat_uatomic_set(addr, _new)                                        \
+       ((__typeof__(*(addr))) _compat_uatomic_set((addr),                     \
+                                               (unsigned long)(_new),         \
+                                               sizeof(*(addr))))
+
+
+extern unsigned long _compat_uatomic_xchg(void *addr,
+                                         unsigned long _new, int len);
+#define compat_uatomic_xchg(addr, _new)                                               \
+       ((__typeof__(*(addr))) _compat_uatomic_xchg((addr),                    \
+                                               (unsigned long)(_new),         \
+                                               sizeof(*(addr))))
+
+extern unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old,
+                                            unsigned long _new, int len);
+#define compat_uatomic_cmpxchg(addr, old, _new)                                       \
+       ((__typeof__(*(addr))) _compat_uatomic_cmpxchg((addr),                 \
+                                               (unsigned long)(old),          \
+                                               (unsigned long)(_new),         \
+                                               sizeof(*(addr))))
+
+extern unsigned long _compat_uatomic_and(void *addr,
+                                        unsigned long _new, int len);
+#define compat_uatomic_and(addr, v)                                   \
+       ((__typeof__(*(addr))) _compat_uatomic_and((addr),             \
+                                                  (unsigned long)(v), \
+                                                  sizeof(*(addr))))
+
+extern unsigned long _compat_uatomic_or(void *addr,
+                                       unsigned long _new, int len);
+#define compat_uatomic_or(addr, v)                                    \
+       ((__typeof__(*(addr))) _compat_uatomic_or((addr),              \
+                                                 (unsigned long)(v),  \
+                                                 sizeof(*(addr))))
+
+extern unsigned long _compat_uatomic_add_return(void *addr,
+                                               unsigned long _new, int len);
+#define compat_uatomic_add_return(addr, v)                                    \
+       ((__typeof__(*(addr))) _compat_uatomic_add_return((addr),              \
+                                               (unsigned long)(v),            \
+                                               sizeof(*(addr))))
+
+#define compat_uatomic_add(addr, v)                                           \
+               ((void)compat_uatomic_add_return((addr), (v)))
+#define compat_uatomic_inc(addr)                                              \
+               (compat_uatomic_add((addr), 1))
+#define compat_uatomic_dec(addr)                                              \
+               (compat_uatomic_add((addr), -1))
+
+#else
+#define UATOMIC_COMPAT(insn)   (_uatomic_##insn)
+#endif
+
+/* Read is atomic even in compat mode */
+#define uatomic_set(addr, v)                   \
+               UATOMIC_COMPAT(set(addr, v))
+
+#define uatomic_cmpxchg(addr, old, _new)       \
+               UATOMIC_COMPAT(cmpxchg(addr, old, _new))
+#define uatomic_xchg(addr, v)                  \
+               UATOMIC_COMPAT(xchg(addr, v))
+#define uatomic_and(addr, v)           \
+               UATOMIC_COMPAT(and(addr, v))
+#define uatomic_or(addr, v)            \
+               UATOMIC_COMPAT(or(addr, v))
+#define uatomic_add_return(addr, v)            \
+               UATOMIC_COMPAT(add_return(addr, v))
+
+#define uatomic_add(addr, v)   UATOMIC_COMPAT(add(addr, v))
+#define uatomic_inc(addr)      UATOMIC_COMPAT(inc(addr))
+#define uatomic_dec(addr)      UATOMIC_COMPAT(dec(addr))
+
+#ifdef __cplusplus 
+}
+#endif
+
+#include <urcu/uatomic/generic.h>
+
+#endif /* _URCU_ARCH_UATOMIC_X86_H */
diff --git a/urcu/uatomic_arch_alpha.h b/urcu/uatomic_arch_alpha.h
deleted file mode 100644 (file)
index 0f795e8..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifndef _URCU_UATOMIC_ARCH_ALPHA_H
-#define _URCU_UATOMIC_ARCH_ALPHA_H
-
-/*
- * Atomic exchange operations for the Alpha architecture. Let GCC do it.
- *
- * Copyright (c) 2010 Paolo Bonzini <pbonzini@redhat.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-#include <urcu/uatomic_generic.h>
-
-#endif /* _URCU_UATOMIC_ARCH_ALPHA_H */
diff --git a/urcu/uatomic_arch_arm.h b/urcu/uatomic_arch_arm.h
deleted file mode 100644 (file)
index fee3040..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-#ifndef _URCU_ARCH_UATOMIC_ARM_H
-#define _URCU_ARCH_UATOMIC_ARM_H
-
-/* 
- * Atomics for ARM.  This approach is usable on kernels back to 2.6.15.
- *
- * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
- * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
- * Copyright (c) 2009      Mathieu Desnoyers
- * Copyright (c) 2010      Paul E. McKenney, IBM Corporation
- *                        (Adapted from uatomic_arch_ppc.h)
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose,  provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- *
- * Code inspired from libuatomic_ops-1.2, inherited in part from the
- * Boehm-Demers-Weiser conservative garbage collector.
- */
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif 
-
-/* xchg */
-#define uatomic_xchg(addr, v) __sync_lock_test_and_set(addr, v)
-
-#ifdef __cplusplus 
-}
-#endif
-
-#include <urcu/uatomic_generic.h>
-
-#endif /* _URCU_ARCH_UATOMIC_ARM_H */
diff --git a/urcu/uatomic_arch_gcc.h b/urcu/uatomic_arch_gcc.h
deleted file mode 100644 (file)
index 4aa32fd..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-#ifndef _URCU_ARCH_UATOMIC_GCC_H
-#define _URCU_ARCH_UATOMIC_GCC_H
-
-/* 
- * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
- * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
- * Copyright (c) 2009      Mathieu Desnoyers
- * Copyright (c) 2010      Paul E. McKenney, IBM Corporation
- *                        (Adapted from uatomic_arch_ppc.h)
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose,  provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- *
- * Code inspired from libuatomic_ops-1.2, inherited in part from the
- * Boehm-Demers-Weiser conservative garbage collector.
- */
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif 
-
-/*
- * If your platform doesn't have a full set of atomics, you will need
- * a separate uatomic_arch_*.h file for your architecture.  Otherwise,
- * just rely on the definitions in uatomic_generic.h.
- */
-#define UATOMIC_HAS_ATOMIC_BYTE
-#define UATOMIC_HAS_ATOMIC_SHORT
-
-#ifdef __cplusplus 
-}
-#endif
-
-#include <urcu/uatomic_generic.h>
-
-#endif /* _URCU_ARCH_UATOMIC_GCC_H */
diff --git a/urcu/uatomic_arch_ppc.h b/urcu/uatomic_arch_ppc.h
deleted file mode 100644 (file)
index bb74934..0000000
+++ /dev/null
@@ -1,219 +0,0 @@
-#ifndef _URCU_ARCH_UATOMIC_PPC_H
-#define _URCU_ARCH_UATOMIC_PPC_H
-
-/* 
- * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
- * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
- * Copyright (c) 2009      Mathieu Desnoyers
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose,  provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- *
- * Code inspired from libuatomic_ops-1.2, inherited in part from the
- * Boehm-Demers-Weiser conservative garbage collector.
- */
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif 
-
-#ifdef __NO_LWSYNC__
-#define LWSYNC_OPCODE  "sync\n"
-#else
-#define LWSYNC_OPCODE  "lwsync\n"
-#endif
-
-#define ILLEGAL_INSTR  ".long  0xd00d00"
-
-/*
- * Using a isync as second barrier for exchange to provide acquire semantic.
- * According to uatomic_ops/sysdeps/gcc/powerpc.h, the documentation is "fairly
- * explicit that this also has acquire semantics."
- * Derived from AO_compare_and_swap(), but removed the comparison.
- */
-
-/* xchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
-{
-       switch (len) {
-       case 4:
-       {
-               unsigned int result;
-
-               __asm__ __volatile__(
-                       LWSYNC_OPCODE
-               "1:\t"  "lwarx %0,0,%1\n"       /* load and reserve */
-                       "stwcx. %2,0,%1\n"      /* else store conditional */
-                       "bne- 1b\n"             /* retry if lost reservation */
-                       "isync\n"
-                               : "=&r"(result)
-                               : "r"(addr), "r"(val)
-                               : "memory", "cc");
-
-               return result;
-       }
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-       {
-               unsigned long result;
-
-               __asm__ __volatile__(
-                       LWSYNC_OPCODE
-               "1:\t"  "ldarx %0,0,%1\n"       /* load and reserve */
-                       "stdcx. %2,0,%1\n"      /* else store conditional */
-                       "bne- 1b\n"             /* retry if lost reservation */
-                       "isync\n"
-                               : "=&r"(result)
-                               : "r"(addr), "r"(val)
-                               : "memory", "cc");
-
-               return result;
-       }
-#endif
-       }
-       /* generate an illegal instruction. Cannot catch this with linker tricks
-        * when optimizations are disabled. */
-       __asm__ __volatile__(ILLEGAL_INSTR);
-       return 0;
-}
-
-#define uatomic_xchg(addr, v)                                              \
-       ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
-                                               sizeof(*(addr))))
-/* cmpxchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
-                             unsigned long _new, int len)
-{
-       switch (len) {
-       case 4:
-       {
-               unsigned int old_val;
-
-               __asm__ __volatile__(
-                       LWSYNC_OPCODE
-               "1:\t"  "lwarx %0,0,%1\n"       /* load and reserve */
-                       "cmpw %0,%3\n"          /* if load is not equal to */
-                       "bne 2f\n"              /* old, fail */
-                       "stwcx. %2,0,%1\n"      /* else store conditional */
-                       "bne- 1b\n"             /* retry if lost reservation */
-                       "isync\n"
-               "2:\n"
-                               : "=&r"(old_val)
-                               : "r"(addr), "r"((unsigned int)_new),
-                                 "r"((unsigned int)old)
-                               : "memory", "cc");
-
-               return old_val;
-       }
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-       {
-               unsigned long old_val;
-
-               __asm__ __volatile__(
-                       LWSYNC_OPCODE
-               "1:\t"  "ldarx %0,0,%1\n"       /* load and reserve */
-                       "cmpd %0,%3\n"          /* if load is not equal to */
-                       "bne 2f\n"              /* old, fail */
-                       "stdcx. %2,0,%1\n"      /* else store conditional */
-                       "bne- 1b\n"             /* retry if lost reservation */
-                       "isync\n"
-               "2:\n"
-                               : "=&r"(old_val)
-                               : "r"(addr), "r"((unsigned long)_new),
-                                 "r"((unsigned long)old)
-                               : "memory", "cc");
-
-               return old_val;
-       }
-#endif
-       }
-       /* generate an illegal instruction. Cannot catch this with linker tricks
-        * when optimizations are disabled. */
-       __asm__ __volatile__(ILLEGAL_INSTR);
-       return 0;
-}
-
-
-#define uatomic_cmpxchg(addr, old, _new)                                   \
-       ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
-                                               (unsigned long)(_new),      \
-                                               sizeof(*(addr))))
-
-/* uatomic_add_return */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_add_return(void *addr, unsigned long val,
-                                int len)
-{
-       switch (len) {
-       case 4:
-       {
-               unsigned int result;
-
-               __asm__ __volatile__(
-                       LWSYNC_OPCODE
-               "1:\t"  "lwarx %0,0,%1\n"       /* load and reserve */
-                       "add %0,%2,%0\n"        /* add val to value loaded */
-                       "stwcx. %0,0,%1\n"      /* store conditional */
-                       "bne- 1b\n"             /* retry if lost reservation */
-                       "isync\n"
-                               : "=&r"(result)
-                               : "r"(addr), "r"(val)
-                               : "memory", "cc");
-
-               return result;
-       }
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-       {
-               unsigned long result;
-
-               __asm__ __volatile__(
-                       LWSYNC_OPCODE
-               "1:\t"  "ldarx %0,0,%1\n"       /* load and reserve */
-                       "add %0,%2,%0\n"        /* add val to value loaded */
-                       "stdcx. %0,0,%1\n"      /* store conditional */
-                       "bne- 1b\n"             /* retry if lost reservation */
-                       "isync\n"
-                               : "=&r"(result)
-                               : "r"(addr), "r"(val)
-                               : "memory", "cc");
-
-               return result;
-       }
-#endif
-       }
-       /* generate an illegal instruction. Cannot catch this with linker tricks
-        * when optimizations are disabled. */
-       __asm__ __volatile__(ILLEGAL_INSTR);
-       return 0;
-}
-
-
-#define uatomic_add_return(addr, v)                                    \
-       ((__typeof__(*(addr))) _uatomic_add_return((addr),              \
-                                                 (unsigned long)(v),   \
-                                                 sizeof(*(addr))))
-
-#ifdef __cplusplus 
-}
-#endif
-
-#include <urcu/uatomic_generic.h>
-
-#endif /* _URCU_ARCH_UATOMIC_PPC_H */
diff --git a/urcu/uatomic_arch_s390.h b/urcu/uatomic_arch_s390.h
deleted file mode 100644 (file)
index 2a4fa03..0000000
+++ /dev/null
@@ -1,160 +0,0 @@
-#ifndef _URCU_UATOMIC_ARCH_S390_H
-#define _URCU_UATOMIC_ARCH_S390_H
-
-/*
- * Atomic exchange operations for the S390 architecture. Based on information
- * taken from the Principles of Operation Appendix A "Conditional Swapping
- * Instructions (CS, CDS)".
- *
- * Copyright (c) 2009 Novell, Inc.
- * Author: Jan Blunck <jblunck@suse.de>
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif 
-
-#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
-#define COMPILER_HAVE_SHORT_MEM_OPERAND
-#endif
-
-/*
- * MEMOP assembler operand rules:
- * - op refer to MEMOP_IN operand
- * - MEMOP_IN can expand to more than a single operand. Use it at the end of
- *   operand list only.
- */
-
-#ifdef COMPILER_HAVE_SHORT_MEM_OPERAND
-
-#define MEMOP_OUT(addr)        "=Q" (*(addr))
-#define MEMOP_IN(addr) "Q" (*(addr))
-#define MEMOP_REF(op)  #op             /* op refer to MEMOP_IN operand */
-
-#else /* !COMPILER_HAVE_SHORT_MEM_OPERAND */
-
-#define MEMOP_OUT(addr)        "=m" (*(addr))
-#define MEMOP_IN(addr) "a" (addr), "m" (*(addr))
-#define MEMOP_REF(op)  "0(" #op ")"    /* op refer to MEMOP_IN operand */
-
-#endif /* !COMPILER_HAVE_SHORT_MEM_OPERAND */
-
-struct __uatomic_dummy {
-       unsigned long v[10];
-};
-#define __hp(x)        ((struct __uatomic_dummy *)(x))
-
-/* xchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_exchange(volatile void *addr, unsigned long val, int len)
-{
-       switch (len) {
-       case 4:
-       {
-               unsigned int old_val;
-
-               __asm__ __volatile__(
-                       "0:     cs %0,%2," MEMOP_REF(%3) "\n"
-                       "       brc 4,0b\n"
-                       : "=&r" (old_val), MEMOP_OUT (__hp(addr))
-                       : "r" (val), MEMOP_IN (__hp(addr))
-                       : "memory", "cc");
-               return old_val;
-       }
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-       {
-               unsigned long old_val;
-
-               __asm__ __volatile__(
-                       "0:     csg %0,%2," MEMOP_REF(%3) "\n"
-                       "       brc 4,0b\n"
-                       : "=&r" (old_val), MEMOP_OUT (__hp(addr))
-                       : "r" (val), MEMOP_IN (__hp(addr))
-                       : "memory", "cc");
-               return old_val;
-       }
-#endif
-       default:
-               __asm__ __volatile__(".long     0xd00d00");
-       }
-
-       return 0;
-}
-
-#define uatomic_xchg(addr, v)                                              \
-       (__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
-                                              sizeof(*(addr)))
-
-/* cmpxchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
-                              unsigned long _new, int len)
-{
-       switch (len) {
-       case 4:
-       {
-               unsigned int old_val = (unsigned int)old;
-
-               __asm__ __volatile__(
-                       "       cs %0,%2," MEMOP_REF(%3) "\n"
-                       : "+r" (old_val), MEMOP_OUT (__hp(addr))
-                       : "r" (_new), MEMOP_IN (__hp(addr))
-                       : "memory", "cc");
-               return old_val;
-       }
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-       {
-               __asm__ __volatile__(
-                       "       csg %0,%2," MEMOP_REF(%3) "\n"
-                       : "+r" (old), MEMOP_OUT (__hp(addr))
-                       : "r" (_new), MEMOP_IN (__hp(addr))
-                       : "memory", "cc");
-               return old;
-       }
-#endif
-       default:
-               __asm__ __volatile__(".long     0xd00d00");
-       }
-
-       return 0;
-}
-
-#define uatomic_cmpxchg(addr, old, _new)                               \
-       (__typeof__(*(addr))) _uatomic_cmpxchg((addr),                  \
-                                              (unsigned long)(old),    \
-                                              (unsigned long)(_new),   \
-                                              sizeof(*(addr)))
-
-#ifdef __cplusplus 
-}
-#endif
-
-#include <urcu/uatomic_generic.h>
-
-#endif /* _URCU_UATOMIC_ARCH_S390_H */
diff --git a/urcu/uatomic_arch_sparc64.h b/urcu/uatomic_arch_sparc64.h
deleted file mode 100644 (file)
index 082c847..0000000
+++ /dev/null
@@ -1,80 +0,0 @@
-#ifndef _URCU_ARCH_UATOMIC_SPARC64_H
-#define _URCU_ARCH_UATOMIC_SPARC64_H
-
-/* 
- * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
- * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
- * Copyright (c) 2009      Mathieu Desnoyers
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose,  provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- *
- * Code inspired from libuatomic_ops-1.2, inherited in part from the
- * Boehm-Demers-Weiser conservative garbage collector.
- */
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif 
-
-/* cmpxchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
-                             unsigned long _new, int len)
-{
-       switch (len) {
-       case 4:
-       {
-               __asm__ __volatile__ (
-                       "membar #StoreLoad | #LoadLoad\n\t"
-                        "cas [%1],%2,%0\n\t"
-                        "membar #StoreLoad | #StoreStore\n\t"
-                        : "+&r" (_new)
-                        : "r" (addr), "r" (old)
-                        : "memory");
-
-               return _new;
-       }
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-       {
-               __asm__ __volatile__ (
-                       "membar #StoreLoad | #LoadLoad\n\t"
-                        "casx [%1],%2,%0\n\t"
-                        "membar #StoreLoad | #StoreStore\n\t"
-                        : "+&r" (_new)
-                        : "r" (addr), "r" (old)
-                        : "memory");
-
-               return _new;
-       }
-#endif
-       }
-       __builtin_trap();
-       return 0;
-}
-
-
-#define uatomic_cmpxchg(addr, old, _new)                                   \
-       ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
-                                               (unsigned long)(_new),      \
-                                               sizeof(*(addr))))
-
-#ifdef __cplusplus 
-}
-#endif
-
-#include <urcu/uatomic_generic.h>
-
-#endif /* _URCU_ARCH_UATOMIC_PPC_H */
diff --git a/urcu/uatomic_arch_unknown.h b/urcu/uatomic_arch_unknown.h
deleted file mode 100644 (file)
index 6fb4eb3..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-#ifndef _URCU_ARCH_UATOMIC_UNKNOWN_H
-#define _URCU_ARCH_UATOMIC_UNKNOWN_H
-
-/* 
- * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
- * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
- * Copyright (c) 2009      Mathieu Desnoyers
- * Copyright (c) 2010      Paul E. McKenney, IBM Corporation
- *                        (Adapted from uatomic_arch_ppc.h)
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose,  provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- */
-
-/* See configure.ac for the list of recognized architectures.  */
-#error "Cannot build: unrecognized architecture detected."
-
-#endif /* _URCU_ARCH_UATOMIC_UNKNOWN_H */
diff --git a/urcu/uatomic_arch_x86.h b/urcu/uatomic_arch_x86.h
deleted file mode 100644 (file)
index 9fedee6..0000000
+++ /dev/null
@@ -1,596 +0,0 @@
-#ifndef _URCU_ARCH_UATOMIC_X86_H
-#define _URCU_ARCH_UATOMIC_X86_H
-
-/* 
- * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
- * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
- * Copyright (c) 2009      Mathieu Desnoyers
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose,  provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- *
- * Code inspired from libuatomic_ops-1.2, inherited in part from the
- * Boehm-Demers-Weiser conservative garbage collector.
- */
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-
-#define UATOMIC_HAS_ATOMIC_BYTE
-#define UATOMIC_HAS_ATOMIC_SHORT
-
-#ifdef __cplusplus
-extern "C" {
-#endif 
-
-/*
- * Derived from AO_compare_and_swap() and AO_test_and_set_full().
- */
-
-struct __uatomic_dummy {
-       unsigned long v[10];
-};
-#define __hp(x)        ((struct __uatomic_dummy *)(x))
-
-#define _uatomic_set(addr, v)  CMM_STORE_SHARED(*(addr), (v))
-
-/* cmpxchg */
-
-static inline __attribute__((always_inline))
-unsigned long __uatomic_cmpxchg(void *addr, unsigned long old,
-                             unsigned long _new, int len)
-{
-       switch (len) {
-       case 1:
-       {
-               unsigned char result = old;
-
-               __asm__ __volatile__(
-               "lock; cmpxchgb %2, %1"
-                       : "+a"(result), "+m"(*__hp(addr))
-                       : "q"((unsigned char)_new)
-                       : "memory");
-               return result;
-       }
-       case 2:
-       {
-               unsigned short result = old;
-
-               __asm__ __volatile__(
-               "lock; cmpxchgw %2, %1"
-                       : "+a"(result), "+m"(*__hp(addr))
-                       : "r"((unsigned short)_new)
-                       : "memory");
-               return result;
-       }
-       case 4:
-       {
-               unsigned int result = old;
-
-               __asm__ __volatile__(
-               "lock; cmpxchgl %2, %1"
-                       : "+a"(result), "+m"(*__hp(addr))
-                       : "r"((unsigned int)_new)
-                       : "memory");
-               return result;
-       }
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-       {
-               unsigned long result = old;
-
-               __asm__ __volatile__(
-               "lock; cmpxchgq %2, %1"
-                       : "+a"(result), "+m"(*__hp(addr))
-                       : "r"((unsigned long)_new)
-                       : "memory");
-               return result;
-       }
-#endif
-       }
-       /* generate an illegal instruction. Cannot catch this with linker tricks
-        * when optimizations are disabled. */
-       __asm__ __volatile__("ud2");
-       return 0;
-}
-
-#define _uatomic_cmpxchg(addr, old, _new)                                    \
-       ((__typeof__(*(addr))) __uatomic_cmpxchg((addr), (unsigned long)(old),\
-                                               (unsigned long)(_new),        \
-                                               sizeof(*(addr))))
-
-/* xchg */
-
-static inline __attribute__((always_inline))
-unsigned long __uatomic_exchange(void *addr, unsigned long val, int len)
-{
-       /* Note: the "xchg" instruction does not need a "lock" prefix. */
-       switch (len) {
-       case 1:
-       {
-               unsigned char result;
-               __asm__ __volatile__(
-               "xchgb %0, %1"
-                       : "=q"(result), "+m"(*__hp(addr))
-                       : "0" ((unsigned char)val)
-                       : "memory");
-               return result;
-       }
-       case 2:
-       {
-               unsigned short result;
-               __asm__ __volatile__(
-               "xchgw %0, %1"
-                       : "=r"(result), "+m"(*__hp(addr))
-                       : "0" ((unsigned short)val)
-                       : "memory");
-               return result;
-       }
-       case 4:
-       {
-               unsigned int result;
-               __asm__ __volatile__(
-               "xchgl %0, %1"
-                       : "=r"(result), "+m"(*__hp(addr))
-                       : "0" ((unsigned int)val)
-                       : "memory");
-               return result;
-       }
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-       {
-               unsigned long result;
-               __asm__ __volatile__(
-               "xchgq %0, %1"
-                       : "=r"(result), "+m"(*__hp(addr))
-                       : "0" ((unsigned long)val)
-                       : "memory");
-               return result;
-       }
-#endif
-       }
-       /* generate an illegal instruction. Cannot catch this with linker tricks
-        * when optimizations are disabled. */
-       __asm__ __volatile__("ud2");
-       return 0;
-}
-
-#define _uatomic_xchg(addr, v)                                               \
-       ((__typeof__(*(addr))) __uatomic_exchange((addr), (unsigned long)(v), \
-                                               sizeof(*(addr))))
-
-/* uatomic_add_return */
-
-static inline __attribute__((always_inline))
-unsigned long __uatomic_add_return(void *addr, unsigned long val,
-                                int len)
-{
-       switch (len) {
-       case 1:
-       {
-               unsigned char result = val;
-
-               __asm__ __volatile__(
-               "lock; xaddb %1, %0"
-                       : "+m"(*__hp(addr)), "+q" (result)
-                       :
-                       : "memory");
-               return result + (unsigned char)val;
-       }
-       case 2:
-       {
-               unsigned short result = val;
-
-               __asm__ __volatile__(
-               "lock; xaddw %1, %0"
-                       : "+m"(*__hp(addr)), "+r" (result)
-                       :
-                       : "memory");
-               return result + (unsigned short)val;
-       }
-       case 4:
-       {
-               unsigned int result = val;
-
-               __asm__ __volatile__(
-               "lock; xaddl %1, %0"
-                       : "+m"(*__hp(addr)), "+r" (result)
-                       :
-                       : "memory");
-               return result + (unsigned int)val;
-       }
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-       {
-               unsigned long result = val;
-
-               __asm__ __volatile__(
-               "lock; xaddq %1, %0"
-                       : "+m"(*__hp(addr)), "+r" (result)
-                       :
-                       : "memory");
-               return result + (unsigned long)val;
-       }
-#endif
-       }
-       /* generate an illegal instruction. Cannot catch this with linker tricks
-        * when optimizations are disabled. */
-       __asm__ __volatile__("ud2");
-       return 0;
-}
-
-#define _uatomic_add_return(addr, v)                                   \
-       ((__typeof__(*(addr))) __uatomic_add_return((addr),             \
-                                                 (unsigned long)(v),   \
-                                                 sizeof(*(addr))))
-
-/* uatomic_and */
-
-static inline __attribute__((always_inline))
-void __uatomic_and(void *addr, unsigned long val, int len)
-{
-       switch (len) {
-       case 1:
-       {
-               __asm__ __volatile__(
-               "lock; andb %1, %0"
-                       : "=m"(*__hp(addr))
-                       : "iq" ((unsigned char)val)
-                       : "memory");
-               return;
-       }
-       case 2:
-       {
-               __asm__ __volatile__(
-               "lock; andw %1, %0"
-                       : "=m"(*__hp(addr))
-                       : "ir" ((unsigned short)val)
-                       : "memory");
-               return;
-       }
-       case 4:
-       {
-               __asm__ __volatile__(
-               "lock; andl %1, %0"
-                       : "=m"(*__hp(addr))
-                       : "ir" ((unsigned int)val)
-                       : "memory");
-               return;
-       }
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-       {
-               __asm__ __volatile__(
-               "lock; andq %1, %0"
-                       : "=m"(*__hp(addr))
-                       : "er" ((unsigned long)val)
-                       : "memory");
-               return;
-       }
-#endif
-       }
-       /* generate an illegal instruction. Cannot catch this with linker tricks
-        * when optimizations are disabled. */
-       __asm__ __volatile__("ud2");
-       return;
-}
-
-#define _uatomic_and(addr, v)                                             \
-       (__uatomic_and((addr), (unsigned long)(v), sizeof(*(addr))))
-
-/* uatomic_or */
-
-static inline __attribute__((always_inline))
-void __uatomic_or(void *addr, unsigned long val, int len)
-{
-       switch (len) {
-       case 1:
-       {
-               __asm__ __volatile__(
-               "lock; orb %1, %0"
-                       : "=m"(*__hp(addr))
-                       : "iq" ((unsigned char)val)
-                       : "memory");
-               return;
-       }
-       case 2:
-       {
-               __asm__ __volatile__(
-               "lock; orw %1, %0"
-                       : "=m"(*__hp(addr))
-                       : "ir" ((unsigned short)val)
-                       : "memory");
-               return;
-       }
-       case 4:
-       {
-               __asm__ __volatile__(
-               "lock; orl %1, %0"
-                       : "=m"(*__hp(addr))
-                       : "ir" ((unsigned int)val)
-                       : "memory");
-               return;
-       }
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-       {
-               __asm__ __volatile__(
-               "lock; orq %1, %0"
-                       : "=m"(*__hp(addr))
-                       : "er" ((unsigned long)val)
-                       : "memory");
-               return;
-       }
-#endif
-       }
-       /* generate an illegal instruction. Cannot catch this with linker tricks
-        * when optimizations are disabled. */
-       __asm__ __volatile__("ud2");
-       return;
-}
-
-#define _uatomic_or(addr, v)                                              \
-       (__uatomic_or((addr), (unsigned long)(v), sizeof(*(addr))))
-
-/* uatomic_add */
-
-static inline __attribute__((always_inline))
-void __uatomic_add(void *addr, unsigned long val, int len)
-{
-       switch (len) {
-       case 1:
-       {
-               __asm__ __volatile__(
-               "lock; addb %1, %0"
-                       : "=m"(*__hp(addr))
-                       : "iq" ((unsigned char)val)
-                       : "memory");
-               return;
-       }
-       case 2:
-       {
-               __asm__ __volatile__(
-               "lock; addw %1, %0"
-                       : "=m"(*__hp(addr))
-                       : "ir" ((unsigned short)val)
-                       : "memory");
-               return;
-       }
-       case 4:
-       {
-               __asm__ __volatile__(
-               "lock; addl %1, %0"
-                       : "=m"(*__hp(addr))
-                       : "ir" ((unsigned int)val)
-                       : "memory");
-               return;
-       }
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-       {
-               __asm__ __volatile__(
-               "lock; addq %1, %0"
-                       : "=m"(*__hp(addr))
-                       : "er" ((unsigned long)val)
-                       : "memory");
-               return;
-       }
-#endif
-       }
-       /* generate an illegal instruction. Cannot catch this with linker tricks
-        * when optimizations are disabled. */
-       __asm__ __volatile__("ud2");
-       return;
-}
-
-#define _uatomic_add(addr, v)                                             \
-       (__uatomic_add((addr), (unsigned long)(v), sizeof(*(addr))))
-
-
-/* uatomic_inc */
-
-static inline __attribute__((always_inline))
-void __uatomic_inc(void *addr, int len)
-{
-       switch (len) {
-       case 1:
-       {
-               __asm__ __volatile__(
-               "lock; incb %0"
-                       : "=m"(*__hp(addr))
-                       :
-                       : "memory");
-               return;
-       }
-       case 2:
-       {
-               __asm__ __volatile__(
-               "lock; incw %0"
-                       : "=m"(*__hp(addr))
-                       :
-                       : "memory");
-               return;
-       }
-       case 4:
-       {
-               __asm__ __volatile__(
-               "lock; incl %0"
-                       : "=m"(*__hp(addr))
-                       :
-                       : "memory");
-               return;
-       }
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-       {
-               __asm__ __volatile__(
-               "lock; incq %0"
-                       : "=m"(*__hp(addr))
-                       :
-                       : "memory");
-               return;
-       }
-#endif
-       }
-       /* generate an illegal instruction. Cannot catch this with linker tricks
-        * when optimizations are disabled. */
-       __asm__ __volatile__("ud2");
-       return;
-}
-
-#define _uatomic_inc(addr)     (__uatomic_inc((addr), sizeof(*(addr))))
-
-/* uatomic_dec */
-
-static inline __attribute__((always_inline))
-void __uatomic_dec(void *addr, int len)
-{
-       switch (len) {
-       case 1:
-       {
-               __asm__ __volatile__(
-               "lock; decb %0"
-                       : "=m"(*__hp(addr))
-                       :
-                       : "memory");
-               return;
-       }
-       case 2:
-       {
-               __asm__ __volatile__(
-               "lock; decw %0"
-                       : "=m"(*__hp(addr))
-                       :
-                       : "memory");
-               return;
-       }
-       case 4:
-       {
-               __asm__ __volatile__(
-               "lock; decl %0"
-                       : "=m"(*__hp(addr))
-                       :
-                       : "memory");
-               return;
-       }
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-       {
-               __asm__ __volatile__(
-               "lock; decq %0"
-                       : "=m"(*__hp(addr))
-                       :
-                       : "memory");
-               return;
-       }
-#endif
-       }
-       /* generate an illegal instruction. Cannot catch this with linker tricks
-        * when optimizations are disabled. */
-       __asm__ __volatile__("ud2");
-       return;
-}
-
-#define _uatomic_dec(addr)     (__uatomic_dec((addr), sizeof(*(addr))))
-
-#if ((CAA_BITS_PER_LONG != 64) && defined(CONFIG_RCU_COMPAT_ARCH))
-extern int __rcu_cas_avail;
-extern int __rcu_cas_init(void);
-
-#define UATOMIC_COMPAT(insn)                                                   \
-       ((likely(__rcu_cas_avail > 0))                                          \
-       ? (_uatomic_##insn)                                                     \
-               : ((unlikely(__rcu_cas_avail < 0)                               \
-                       ? ((__rcu_cas_init() > 0)                               \
-                               ? (_uatomic_##insn)                             \
-                               : (compat_uatomic_##insn))                      \
-                       : (compat_uatomic_##insn))))
-
-extern unsigned long _compat_uatomic_set(void *addr,
-                                        unsigned long _new, int len);
-#define compat_uatomic_set(addr, _new)                                        \
-       ((__typeof__(*(addr))) _compat_uatomic_set((addr),                     \
-                                               (unsigned long)(_new),         \
-                                               sizeof(*(addr))))
-
-
-extern unsigned long _compat_uatomic_xchg(void *addr,
-                                         unsigned long _new, int len);
-#define compat_uatomic_xchg(addr, _new)                                               \
-       ((__typeof__(*(addr))) _compat_uatomic_xchg((addr),                    \
-                                               (unsigned long)(_new),         \
-                                               sizeof(*(addr))))
-
-extern unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old,
-                                            unsigned long _new, int len);
-#define compat_uatomic_cmpxchg(addr, old, _new)                                       \
-       ((__typeof__(*(addr))) _compat_uatomic_cmpxchg((addr),                 \
-                                               (unsigned long)(old),          \
-                                               (unsigned long)(_new),         \
-                                               sizeof(*(addr))))
-
-extern unsigned long _compat_uatomic_and(void *addr,
-                                        unsigned long _new, int len);
-#define compat_uatomic_and(addr, v)                                   \
-       ((__typeof__(*(addr))) _compat_uatomic_and((addr),             \
-                                                  (unsigned long)(v), \
-                                                  sizeof(*(addr))))
-
-extern unsigned long _compat_uatomic_or(void *addr,
-                                       unsigned long _new, int len);
-#define compat_uatomic_or(addr, v)                                    \
-       ((__typeof__(*(addr))) _compat_uatomic_or((addr),              \
-                                                 (unsigned long)(v),  \
-                                                 sizeof(*(addr))))
-
-extern unsigned long _compat_uatomic_add_return(void *addr,
-                                               unsigned long _new, int len);
-#define compat_uatomic_add_return(addr, v)                                    \
-       ((__typeof__(*(addr))) _compat_uatomic_add_return((addr),              \
-                                               (unsigned long)(v),            \
-                                               sizeof(*(addr))))
-
-#define compat_uatomic_add(addr, v)                                           \
-               ((void)compat_uatomic_add_return((addr), (v)))
-#define compat_uatomic_inc(addr)                                              \
-               (compat_uatomic_add((addr), 1))
-#define compat_uatomic_dec(addr)                                              \
-               (compat_uatomic_add((addr), -1))
-
-#else
-#define UATOMIC_COMPAT(insn)   (_uatomic_##insn)
-#endif
-
-/* Read is atomic even in compat mode */
-#define uatomic_set(addr, v)                   \
-               UATOMIC_COMPAT(set(addr, v))
-
-#define uatomic_cmpxchg(addr, old, _new)       \
-               UATOMIC_COMPAT(cmpxchg(addr, old, _new))
-#define uatomic_xchg(addr, v)                  \
-               UATOMIC_COMPAT(xchg(addr, v))
-#define uatomic_and(addr, v)           \
-               UATOMIC_COMPAT(and(addr, v))
-#define uatomic_or(addr, v)            \
-               UATOMIC_COMPAT(or(addr, v))
-#define uatomic_add_return(addr, v)            \
-               UATOMIC_COMPAT(add_return(addr, v))
-
-#define uatomic_add(addr, v)   UATOMIC_COMPAT(add(addr, v))
-#define uatomic_inc(addr)      UATOMIC_COMPAT(inc(addr))
-#define uatomic_dec(addr)      UATOMIC_COMPAT(dec(addr))
-
-#ifdef __cplusplus 
-}
-#endif
-
-#include <urcu/uatomic_generic.h>
-
-#endif /* _URCU_ARCH_UATOMIC_X86_H */
diff --git a/urcu/uatomic_generic.h b/urcu/uatomic_generic.h
deleted file mode 100644 (file)
index 337fe40..0000000
+++ /dev/null
@@ -1,557 +0,0 @@
-#ifndef _URCU_UATOMIC_GENERIC_H
-#define _URCU_UATOMIC_GENERIC_H
-
-/*
- * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
- * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
- * Copyright (c) 2009      Mathieu Desnoyers
- * Copyright (c) 2010      Paolo Bonzini
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose,  provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- *
- * Code inspired from libuatomic_ops-1.2, inherited in part from the
- * Boehm-Demers-Weiser conservative garbage collector.
- */
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#ifndef uatomic_set
-#define uatomic_set(addr, v)   CMM_STORE_SHARED(*(addr), (v))
-#endif
-
-#ifndef uatomic_read
-#define uatomic_read(addr)     CMM_LOAD_SHARED(*(addr))
-#endif
-
-#if !defined __OPTIMIZE__  || defined UATOMIC_NO_LINK_ERROR
-static inline __attribute__((always_inline))
-void _uatomic_link_error()
-{
-#ifdef ILLEGAL_INSTR
-       /* generate an illegal instruction. Cannot catch this with linker tricks
-        * when optimizations are disabled. */
-       __asm__ __volatile__(ILLEGAL_INSTR);
-#else
-       __builtin_trap ();
-#endif
-}
-
-#else /* #if !defined __OPTIMIZE__  || defined UATOMIC_NO_LINK_ERROR */
-extern void _uatomic_link_error ();
-#endif /* #else #if !defined __OPTIMIZE__  || defined UATOMIC_NO_LINK_ERROR */
-
-/* cmpxchg */
-
-#ifndef uatomic_cmpxchg
-static inline __attribute__((always_inline))
-unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
-                             unsigned long _new, int len)
-{
-       switch (len) {
-#ifdef UATOMIC_HAS_ATOMIC_BYTE
-       case 1:
-               return __sync_val_compare_and_swap_1(addr, old, _new);
-#endif
-#ifdef UATOMIC_HAS_ATOMIC_SHORT
-       case 2:
-               return __sync_val_compare_and_swap_2(addr, old, _new);
-#endif
-       case 4:
-               return __sync_val_compare_and_swap_4(addr, old, _new);
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-               return __sync_val_compare_and_swap_8(addr, old, _new);
-#endif
-       }
-       _uatomic_link_error();
-       return 0;
-}
-
-
-#define uatomic_cmpxchg(addr, old, _new)                                   \
-       ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
-                                               (unsigned long)(_new),      \
-                                               sizeof(*(addr))))
-
-
-/* uatomic_and */
-
-#ifndef uatomic_and
-static inline __attribute__((always_inline))
-void _uatomic_and(void *addr, unsigned long val,
-                 int len)
-{
-       switch (len) {
-#ifdef UATOMIC_HAS_ATOMIC_BYTE
-       case 1:
-               __sync_and_and_fetch_1(addr, val);
-#endif
-#ifdef UATOMIC_HAS_ATOMIC_SHORT
-       case 2:
-               __sync_and_and_fetch_2(addr, val);
-#endif
-       case 4:
-               __sync_and_and_fetch_4(addr, val);
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-               __sync_and_and_fetch_8(addr, val);
-#endif
-       }
-       _uatomic_link_error();
-       return 0;
-}
-
-#define uatomic_and(addr, v)                   \
-       (_uatomic_and((addr),                   \
-                     (unsigned long)(v),       \
-                     sizeof(*(addr))))
-#endif
-
-/* uatomic_or */
-
-#ifndef uatomic_or
-static inline __attribute__((always_inline))
-void _uatomic_or(void *addr, unsigned long val,
-                int len)
-{
-       switch (len) {
-#ifdef UATOMIC_HAS_ATOMIC_BYTE
-       case 1:
-               __sync_or_and_fetch_1(addr, val);
-#endif
-#ifdef UATOMIC_HAS_ATOMIC_SHORT
-       case 2:
-               __sync_or_and_fetch_2(addr, val);
-#endif
-       case 4:
-               __sync_or_and_fetch_4(addr, val);
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-               __sync_or_and_fetch_8(addr, val);
-#endif
-       }
-       _uatomic_link_error();
-       return 0;
-}
-
-#define uatomic_or(addr, v)                    \
-       (_uatomic_or((addr),                    \
-                    (unsigned long)(v),        \
-                    sizeof(*(addr))))
-#endif
-
-/* uatomic_add_return */
-
-#ifndef uatomic_add_return
-static inline __attribute__((always_inline))
-unsigned long _uatomic_add_return(void *addr, unsigned long val,
-                                int len)
-{
-       switch (len) {
-#ifdef UATOMIC_HAS_ATOMIC_BYTE
-       case 1:
-               return __sync_add_and_fetch_1(addr, val);
-#endif
-#ifdef UATOMIC_HAS_ATOMIC_SHORT
-       case 2:
-               return __sync_add_and_fetch_2(addr, val);
-#endif
-       case 4:
-               return __sync_add_and_fetch_4(addr, val);
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-               return __sync_add_and_fetch_8(addr, val);
-#endif
-       }
-       _uatomic_link_error();
-       return 0;
-}
-
-
-#define uatomic_add_return(addr, v)                                    \
-       ((__typeof__(*(addr))) _uatomic_add_return((addr),              \
-                                                 (unsigned long)(v),   \
-                                                 sizeof(*(addr))))
-#endif /* #ifndef uatomic_add_return */
-
-#ifndef uatomic_xchg
-/* xchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
-{
-       switch (len) {
-#ifdef UATOMIC_HAS_ATOMIC_BYTE
-       case 1:
-       {
-               unsigned char old;
-
-               do {
-                       old = uatomic_read((unsigned char *)addr);
-               } while (!__sync_bool_compare_and_swap_1(addr, old, val));
-
-               return old;
-       }
-#endif
-#ifdef UATOMIC_HAS_ATOMIC_SHORT
-       case 2:
-       {
-               unsigned short old;
-
-               do {
-                       old = uatomic_read((unsigned short *)addr);
-               } while (!__sync_bool_compare_and_swap_2(addr, old, val));
-
-               return old;
-       }
-#endif
-       case 4:
-       {
-               unsigned int old;
-
-               do {
-                       old = uatomic_read((unsigned int *)addr);
-               } while (!__sync_bool_compare_and_swap_4(addr, old, val));
-
-               return old;
-       }
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-       {
-               unsigned long old;
-
-               do {
-                       old = uatomic_read((unsigned long *)addr);
-               } while (!__sync_bool_compare_and_swap_8(addr, old, val));
-
-               return old;
-       }
-#endif
-       }
-       _uatomic_link_error();
-       return 0;
-}
-
-#define uatomic_xchg(addr, v)                                              \
-       ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
-                                               sizeof(*(addr))))
-#endif /* #ifndef uatomic_xchg */
-
-#else /* #ifndef uatomic_cmpxchg */
-
-#ifndef uatomic_and
-/* uatomic_and */
-
-static inline __attribute__((always_inline))
-void _uatomic_and(void *addr, unsigned long val, int len)
-{
-       switch (len) {
-#ifdef UATOMIC_HAS_ATOMIC_BYTE
-       case 1:
-       {
-               unsigned char old, oldt;
-
-               oldt = uatomic_read((unsigned char *)addr);
-               do {
-                       old = oldt;
-                       oldt = _uatomic_cmpxchg(addr, old, old & val, 1);
-               } while (oldt != old);
-       }
-#endif
-#ifdef UATOMIC_HAS_ATOMIC_SHORT
-       case 2:
-       {
-               unsigned short old, oldt;
-
-               oldt = uatomic_read((unsigned short *)addr);
-               do {
-                       old = oldt;
-                       oldt = _uatomic_cmpxchg(addr, old, old & val, 2);
-               } while (oldt != old);
-       }
-#endif
-       case 4:
-       {
-               unsigned int old, oldt;
-
-               oldt = uatomic_read((unsigned int *)addr);
-               do {
-                       old = oldt;
-                       oldt = _uatomic_cmpxchg(addr, old, old & val, 4);
-               } while (oldt != old);
-       }
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-       {
-               unsigned long old, oldt;
-
-               oldt = uatomic_read((unsigned long *)addr);
-               do {
-                       old = oldt;
-                       oldt = _uatomic_cmpxchg(addr, old, old & val, 8);
-               } while (oldt != old);
-       }
-#endif
-       }
-       _uatomic_link_error();
-       return 0;
-}
-
-#define uatomic_and(addr, v)           \
-       (uatomic_and((addr),            \
-                   (unsigned long)(v), \
-                   sizeof(*(addr))))
-#endif /* #ifndef uatomic_and */
-
-#ifndef uatomic_or
-/* uatomic_or */
-
-static inline __attribute__((always_inline))
-void _uatomic_or(void *addr, unsigned long val, int len)
-{
-       switch (len) {
-#ifdef UATOMIC_HAS_ATOMIC_BYTE
-       case 1:
-       {
-               unsigned char old, oldt;
-
-               oldt = uatomic_read((unsigned char *)addr);
-               do {
-                       old = oldt;
-                       oldt = _uatomic_cmpxchg(addr, old, old | val, 1);
-               } while (oldt != old);
-       }
-#endif
-#ifdef UATOMIC_HAS_ATOMIC_SHORT
-       case 2:
-       {
-               unsigned short old, oldt;
-
-               oldt = uatomic_read((unsigned short *)addr);
-               do {
-                       old = oldt;
-                       oldt = _uatomic_cmpxchg(addr, old, old | val, 2);
-               } while (oldt != old);
-       }
-#endif
-       case 4:
-       {
-               unsigned int old, oldt;
-
-               oldt = uatomic_read((unsigned int *)addr);
-               do {
-                       old = oldt;
-                       oldt = _uatomic_cmpxchg(addr, old, old | val, 4);
-               } while (oldt != old);
-       }
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-       {
-               unsigned long old, oldt;
-
-               oldt = uatomic_read((unsigned long *)addr);
-               do {
-                       old = oldt;
-                       oldt = _uatomic_cmpxchg(addr, old, old | val, 8);
-               } while (oldt != old);
-       }
-#endif
-       }
-       _uatomic_link_error();
-       return 0;
-}
-
-#define uatomic_or(addr, v)            \
-       (uatomic_or((addr),             \
-                   (unsigned long)(v), \
-                   sizeof(*(addr))))
-#endif /* #ifndef uatomic_or */
-
-#ifndef uatomic_add_return
-/* uatomic_add_return */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_add_return(void *addr, unsigned long val, int len)
-{
-       switch (len) {
-#ifdef UATOMIC_HAS_ATOMIC_BYTE
-       case 1:
-       {
-               unsigned char old, oldt;
-
-               oldt = uatomic_read((unsigned char *)addr);
-               do {
-                       old = oldt;
-                       oldt = uatomic_cmpxchg((unsigned char *)addr,
-                                               old, old + val);
-               } while (oldt != old);
-
-               return old + val;
-       }
-#endif
-#ifdef UATOMIC_HAS_ATOMIC_SHORT
-       case 2:
-       {
-               unsigned short old, oldt;
-
-               oldt = uatomic_read((unsigned short *)addr);
-               do {
-                       old = oldt;
-                       oldt = uatomic_cmpxchg((unsigned short *)addr,
-                                               old, old + val);
-               } while (oldt != old);
-
-               return old + val;
-       }
-#endif
-       case 4:
-       {
-               unsigned int old, oldt;
-
-               oldt = uatomic_read((unsigned int *)addr);
-               do {
-                       old = oldt;
-                       oldt = uatomic_cmpxchg((unsigned int *)addr,
-                                               old, old + val);
-               } while (oldt != old);
-
-               return old + val;
-       }
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-       {
-               unsigned long old, oldt;
-
-               oldt = uatomic_read((unsigned long *)addr);
-               do {
-                       old = oldt;
-                       oldt = uatomic_cmpxchg((unsigned long *)addr,
-                                               old, old + val);
-               } while (oldt != old);
-
-               return old + val;
-       }
-#endif
-       }
-       _uatomic_link_error();
-       return 0;
-}
-
-#define uatomic_add_return(addr, v)                                    \
-       ((__typeof__(*(addr))) _uatomic_add_return((addr),              \
-                                                 (unsigned long)(v),   \
-                                                 sizeof(*(addr))))
-#endif /* #ifndef uatomic_add_return */
-
-#ifndef uatomic_xchg
-/* xchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
-{
-       switch (len) {
-#ifdef UATOMIC_HAS_ATOMIC_BYTE
-       case 1:
-       {
-               unsigned char old, oldt;
-
-               oldt = uatomic_read((unsigned char *)addr);
-               do {
-                       old = oldt;
-                       oldt = uatomic_cmpxchg((unsigned char *)addr,
-                                               old, val);
-               } while (oldt != old);
-
-               return old;
-       }
-#endif
-#ifdef UATOMIC_HAS_ATOMIC_SHORT
-       case 2:
-       {
-               unsigned short old, oldt;
-
-               oldt = uatomic_read((unsigned short *)addr);
-               do {
-                       old = oldt;
-                       oldt = uatomic_cmpxchg((unsigned short *)addr,
-                                               old, val);
-               } while (oldt != old);
-
-               return old;
-       }
-#endif
-       case 4:
-       {
-               unsigned int old, oldt;
-
-               oldt = uatomic_read((unsigned int *)addr);
-               do {
-                       old = oldt;
-                       oldt = uatomic_cmpxchg((unsigned int *)addr,
-                                               old, val);
-               } while (oldt != old);
-
-               return old;
-       }
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-       {
-               unsigned long old, oldt;
-
-               oldt = uatomic_read((unsigned long *)addr);
-               do {
-                       old = oldt;
-                       oldt = uatomic_cmpxchg((unsigned long *)addr,
-                                               old, val);
-               } while (oldt != old);
-
-               return old;
-       }
-#endif
-       }
-       _uatomic_link_error();
-       return 0;
-}
-
-#define uatomic_xchg(addr, v)                                              \
-       ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
-                                               sizeof(*(addr))))
-#endif /* #ifndef uatomic_xchg */
-
-#endif /* #else #ifndef uatomic_cmpxchg */
-
-/* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */
-
-#ifndef uatomic_add
-#define uatomic_add(addr, v)           (void)uatomic_add_return((addr), (v))
-#endif
-
-#define uatomic_sub_return(addr, v)    uatomic_add_return((addr), -(v))
-#define uatomic_sub(addr, v)           uatomic_add((addr), -(v))
-
-#ifndef uatomic_inc
-#define uatomic_inc(addr)              uatomic_add((addr), 1)
-#endif
-
-#ifndef uatomic_dec
-#define uatomic_dec(addr)              uatomic_add((addr), -1)
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _URCU_UATOMIC_GENERIC_H */
index 75620d14f576b30f38bcc4836d4c2b7e88a56594..a422a9906fd787d1482210a5a61b6f3611b7f99c 100644 (file)
@@ -15,7 +15,7 @@
  */
 
 #include <assert.h>
-#include <urcu/uatomic_arch.h>
+#include <urcu/uatomic.h>
 
 struct urcu_ref {
        long refcount; /* ATOMIC */
This page took 0.068201 seconds and 4 git commands to generate.