sequence of operations atomically: check if `addr` contains `old`.
If true, then replace the content of `addr` by `new`. Return the
value previously contained by `addr`. This function implies a full
-memory barrier before and after the atomic operation.
+memory barrier before and after the atomic operation on success.
+On failure, no memory order is guaranteed.
```c
urcu/uatomic/alpha.h \
urcu/uatomic_arch.h \
urcu/uatomic/arm.h \
+ urcu/uatomic/builtins.h \
+ urcu/uatomic/builtins-generic.h \
urcu/uatomic/gcc.h \
urcu/uatomic/generic.h \
urcu/uatomic.h \
+ __GNUC_PATCHLEVEL__)
#endif
+#ifdef __cplusplus
+#define caa_unqual_scalar_typeof(x) \
+ std::remove_cv<std::remove_reference<decltype(x)>::type>::type
+#else
+#define caa_scalar_type_to_expr(type) \
+ unsigned type: (unsigned type)0, \
+ signed type: (signed type)0
+
+/*
+ * Use C11 _Generic to express unqualified type from expression. This removes
+ * volatile qualifier from expression type.
+ */
+#define caa_unqual_scalar_typeof(x) \
+ __typeof__( \
+ _Generic((x), \
+ char: (char)0, \
+ caa_scalar_type_to_expr(char), \
+ caa_scalar_type_to_expr(short), \
+ caa_scalar_type_to_expr(int), \
+ caa_scalar_type_to_expr(long), \
+ caa_scalar_type_to_expr(long long), \
+ default: (x) \
+ ) \
+ )
+#endif
+
+/*
+ * Allow user to manually define CMM_SANITIZE_THREAD if their toolchain is not
+ * supported by this check.
+ */
+#ifndef CMM_SANITIZE_THREAD
+# if defined(__GNUC__) && defined(__SANITIZE_THREAD__)
+# define CMM_SANITIZE_THREAD
+# elif defined(__clang__) && defined(__has_feature)
+# if __has_feature(thread_sanitizer)
+# define CMM_SANITIZE_THREAD
+# endif
+# endif
+#endif /* !CMM_SANITIZE_THREAD */
+
+/*
+ * Helper to add the volatile qualifier to a pointer.
+ */
+#if defined __cplusplus
+template <typename T>
+volatile T cmm_cast_volatile(T t)
+{
+ return static_cast<volatile T>(t);
+}
+#else
+# define cmm_cast_volatile(ptr) \
+ __extension__ \
+ ({ \
+ (volatile __typeof__(ptr))(ptr); \
+ })
+#endif
+
#endif /* _URCU_COMPILER_H */
* -Wincompatible-pointer-types errors. Using the statement expression
* makes it an rvalue and gets rid of the const-ness.
*/
-#ifdef __URCU_DEREFERENCE_USE_ATOMIC_CONSUME
-# define _rcu_dereference(p) __extension__ ({ \
- __typeof__(__extension__ ({ \
- __typeof__(p) __attribute__((unused)) _________p0 = { 0 }; \
- _________p0; \
- })) _________p1; \
- __atomic_load(&(p), &_________p1, __ATOMIC_CONSUME); \
- (_________p1); \
- })
-#else
-# define _rcu_dereference(p) __extension__ ({ \
- __typeof__(p) _________p1 = CMM_LOAD_SHARED(p); \
- cmm_smp_read_barrier_depends(); \
- (_________p1); \
- })
-#endif
-
+# define _rcu_dereference(p) \
+ uatomic_load(&(p), CMM_CONSUME)
/**
* _rcu_cmpxchg_pointer - same as rcu_assign_pointer, but tests if the pointer
* is as expected by "old". If succeeds, returns the previous pointer to the
* using synchronize_rcu(). If fails (unexpected value), returns old (which
* should not be freed !).
*
- * uatomic_cmpxchg() acts as both release and acquire barriers.
+ * uatomic_cmpxchg() acts as both release and acquire barriers on success.
*
* This macro is less than 10 lines long. The intent is that this macro
* meets the 10-line criterion in LGPL, allowing this function to be
({ \
__typeof__(*p) _________pold = (old); \
__typeof__(*p) _________pnew = (_new); \
- uatomic_cmpxchg(p, _________pold, _________pnew); \
- })
+ uatomic_cmpxchg_mo(p, _________pold, _________pnew, \
+ CMM_SEQ_CST, CMM_RELAXED); \
+ });
/**
* _rcu_xchg_pointer - same as rcu_assign_pointer, but returns the previous
__extension__ \
({ \
__typeof__(*p) _________pv = (v); \
- uatomic_xchg(p, _________pv); \
+ uatomic_xchg_mo(p, _________pv, \
+ CMM_SEQ_CST); \
})
-#define _rcu_set_pointer(p, v) \
- do { \
- __typeof__(*p) _________pv = (v); \
- if (!__builtin_constant_p(v) || \
- ((v) != NULL)) \
- cmm_wmb(); \
- uatomic_set(p, _________pv); \
+#define _rcu_set_pointer(p, v) \
+ do { \
+ __typeof__(*p) _________pv = (v); \
+ uatomic_store(p, _________pv, \
+ __builtin_constant_p(v) && (v) == NULL ? \
+ CMM_RELAXED : CMM_RELEASE); \
} while (0)
/**
* System definitions.
*/
+#include <urcu/config.h>
#include <urcu/compiler.h>
#include <urcu/arch.h>
+#ifdef CONFIG_RCU_USE_ATOMIC_BUILTINS
+
+#define CMM_LOAD_SHARED(x) \
+ __atomic_load_n(cmm_cast_volatile(&(x)), __ATOMIC_RELAXED)
+
+#define _CMM_LOAD_SHARED(x) CMM_LOAD_SHARED(x)
+
+#define CMM_STORE_SHARED(x, v) \
+ __extension__ \
+ ({ \
+ __typeof__(v) _v = (v); \
+ __atomic_store_n(cmm_cast_volatile(&(x)), _v, \
+ __ATOMIC_RELAXED); \
+ _v; \
+ })
+
+#define _CMM_STORE_SHARED(x, v) CMM_STORE_SHARED(x, v)
+
+#else
/*
* Identify a shared load. A cmm_smp_rmc() or cmm_smp_mc() should come
* before the load.
_v = _v; /* Work around clang "unused result" */ \
})
+#endif /* CONFIG_RCU_USE_ATOMIC_BUILTINS */
+
#endif /* _URCU_SYSTEM_H */
#ifndef _URCU_UATOMIC_H
#define _URCU_UATOMIC_H
+#include <assert.h>
+
#include <urcu/arch.h>
+#include <urcu/config.h>
+
+enum cmm_memorder {
+ CMM_RELAXED = 0,
+ CMM_CONSUME = 1,
+ CMM_ACQUIRE = 2,
+ CMM_RELEASE = 3,
+ CMM_ACQ_REL = 4,
+ CMM_SEQ_CST = 5,
+ CMM_SEQ_CST_FENCE = 6,
+};
+
+#ifdef CONFIG_RCU_USE_ATOMIC_BUILTINS
+
+/*
+ * Make sure that CMM_SEQ_CST_FENCE is not equivalent to other memory orders.
+ */
+# ifdef static_assert
+static_assert(CMM_RELAXED == __ATOMIC_RELAXED, "");
+static_assert(CMM_CONSUME == __ATOMIC_CONSUME, "");
+static_assert(CMM_ACQUIRE == __ATOMIC_ACQUIRE, "");
+static_assert(CMM_RELEASE == __ATOMIC_RELEASE, "");
+static_assert(CMM_ACQ_REL == __ATOMIC_ACQ_REL, "");
+static_assert(CMM_SEQ_CST == __ATOMIC_SEQ_CST, "");
+# endif
+
+/*
+ * This is not part of the public API. It it used internally to implement the
+ * CMM_SEQ_CST_FENCE memory order.
+ *
+ * NOTE: Using switch here instead of if statement to avoid -Wduplicated-cond
+ * warning when memory order is conditionally determined.
+ */
+static inline void cmm_seq_cst_fence_after_atomic(enum cmm_memorder mo)
+{
+ switch (mo) {
+ case CMM_SEQ_CST_FENCE:
+ cmm_smp_mb();
+ break;
+ default:
+ break;
+ }
+}
+
+#endif
+
+/*
+ * This is not part of the public API. It is used internally to convert from the
+ * CMM memory model to the C11 memory model.
+ */
+static inline int cmm_to_c11(int mo)
+{
+ if (mo == CMM_SEQ_CST_FENCE) {
+ return CMM_SEQ_CST;
+ }
+ return mo;
+}
-#if defined(URCU_ARCH_X86)
+#if defined(CONFIG_RCU_USE_ATOMIC_BUILTINS)
+#include <urcu/uatomic/builtins.h>
+#elif defined(URCU_ARCH_X86)
#include <urcu/uatomic/x86.h>
#elif defined(URCU_ARCH_PPC)
#include <urcu/uatomic/ppc.h>
--- /dev/null
+/*
+ * urcu/uatomic/builtins-generic.h
+ *
+ * Copyright (c) 2023 Olivier Dion <odion@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _URCU_UATOMIC_BUILTINS_GENERIC_H
+#define _URCU_UATOMIC_BUILTINS_GENERIC_H
+
+#include <urcu/compiler.h>
+#include <urcu/system.h>
+
+#define uatomic_store(addr, v, mo) \
+ do { \
+ __atomic_store_n(cmm_cast_volatile(addr), v, \
+ cmm_to_c11(mo)); \
+ cmm_seq_cst_fence_after_atomic(mo); \
+ } while (0)
+
+#define uatomic_set(addr, v) \
+ do { \
+ uatomic_store(addr, v, CMM_RELAXED); \
+ } while (0)
+
+#define uatomic_load(addr, mo) \
+ __extension__ \
+ ({ \
+ __typeof__(*(addr)) _value = \
+ __atomic_load_n(cmm_cast_volatile(addr), \
+ cmm_to_c11(mo)); \
+ cmm_seq_cst_fence_after_atomic(mo); \
+ \
+ _value; \
+ })
+
+#define uatomic_read(addr) \
+ uatomic_load(addr, CMM_RELAXED)
+
+#define uatomic_cmpxchg_mo(addr, old, new, mos, mof) \
+ __extension__ \
+ ({ \
+ __typeof__(*(addr)) _old = (__typeof__(*(addr)))old; \
+ \
+ if (__atomic_compare_exchange_n(cmm_cast_volatile(addr), \
+ &_old, new, 0, \
+ cmm_to_c11(mos), \
+ cmm_to_c11(mof))) { \
+ cmm_seq_cst_fence_after_atomic(mos); \
+ } else { \
+ cmm_seq_cst_fence_after_atomic(mof); \
+ } \
+ _old; \
+ })
+
+#define uatomic_cmpxchg(addr, old, new) \
+ uatomic_cmpxchg_mo(addr, old, new, CMM_SEQ_CST_FENCE, CMM_RELAXED)
+
+#define uatomic_xchg_mo(addr, v, mo) \
+ __extension__ \
+ ({ \
+ __typeof__((*addr)) _old = \
+ __atomic_exchange_n(cmm_cast_volatile(addr), v, \
+ cmm_to_c11(mo)); \
+ cmm_seq_cst_fence_after_atomic(mo); \
+ _old; \
+ })
+
+#define uatomic_xchg(addr, v) \
+ uatomic_xchg_mo(addr, v, CMM_SEQ_CST_FENCE)
+
+#define uatomic_add_return_mo(addr, v, mo) \
+ __extension__ \
+ ({ \
+ __typeof__(*(addr)) _old = \
+ __atomic_add_fetch(cmm_cast_volatile(addr), v, \
+ cmm_to_c11(mo)); \
+ cmm_seq_cst_fence_after_atomic(mo); \
+ _old; \
+ })
+
+#define uatomic_add_return(addr, v) \
+ uatomic_add_return_mo(addr, v, CMM_SEQ_CST_FENCE)
+
+#define uatomic_sub_return_mo(addr, v, mo) \
+ __extension__ \
+ ({ \
+ __typeof__(*(addr)) _old = \
+ __atomic_sub_fetch(cmm_cast_volatile(addr), v, \
+ cmm_to_c11(mo)); \
+ cmm_seq_cst_fence_after_atomic(mo); \
+ _old; \
+ })
+
+#define uatomic_sub_return(addr, v) \
+ uatomic_sub_return_mo(addr, v, CMM_SEQ_CST_FENCE)
+
+#define uatomic_and_mo(addr, mask, mo) \
+ do { \
+ (void) __atomic_and_fetch(cmm_cast_volatile(addr), mask, \
+ cmm_to_c11(mo)); \
+ cmm_seq_cst_fence_after_atomic(mo); \
+ } while (0)
+
+#define uatomic_and(addr, mask) \
+ uatomic_and_mo(addr, mask, CMM_SEQ_CST)
+
+#define uatomic_or_mo(addr, mask, mo) \
+ do { \
+ (void) __atomic_or_fetch(cmm_cast_volatile(addr), mask, \
+ cmm_to_c11(mo)); \
+ cmm_seq_cst_fence_after_atomic(mo); \
+ } while (0)
+
+
+#define uatomic_or(addr, mask) \
+ uatomic_or_mo(addr, mask, CMM_RELAXED)
+
+#define uatomic_add_mo(addr, v, mo) \
+ (void) uatomic_add_return_mo(addr, v, mo)
+
+#define uatomic_add(addr, v) \
+ uatomic_add_mo(addr, v, CMM_RELAXED)
+
+#define uatomic_sub_mo(addr, v, mo) \
+ (void) uatomic_sub_return_mo(addr, v, mo)
+
+#define uatomic_sub(addr, v) \
+ uatomic_sub_mo(addr, v, CMM_RELAXED)
+
+#define uatomic_inc_mo(addr, mo) \
+ uatomic_add_mo(addr, 1, mo)
+
+#define uatomic_inc(addr) \
+ uatomic_inc_mo(addr, CMM_RELAXED)
+
+#define uatomic_dec_mo(addr, mo) \
+ uatomic_sub_mo(addr, 1, mo)
+
+#define uatomic_dec(addr) \
+ uatomic_dec_mo(addr, CMM_RELAXED)
+
+#define cmm_smp_mb__before_uatomic_and() cmm_smp_mb()
+#define cmm_smp_mb__after_uatomic_and() cmm_smp_mb()
+
+#define cmm_smp_mb__before_uatomic_or() cmm_smp_mb()
+#define cmm_smp_mb__after_uatomic_or() cmm_smp_mb()
+
+#define cmm_smp_mb__before_uatomic_add() cmm_smp_mb()
+#define cmm_smp_mb__after_uatomic_add() cmm_smp_mb()
+
+#define cmm_smp_mb__before_uatomic_sub() cmm_smp_mb()
+#define cmm_smp_mb__after_uatomic_sub() cmm_smp_mb()
+
+#define cmm_smp_mb__before_uatomic_inc() cmm_smp_mb()
+#define cmm_smp_mb__after_uatomic_inc() cmm_smp_mb()
+
+#define cmm_smp_mb__before_uatomic_dec() cmm_smp_mb()
+#define cmm_smp_mb__after_uatomic_dec() cmm_smp_mb()
+
+#endif /* _URCU_UATOMIC_BUILTINS_X86_H */
--- /dev/null
+/*
+ * urcu/uatomic/builtins.h
+ *
+ * Copyright (c) 2023 Olivier Dion <odion@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _URCU_UATOMIC_BUILTINS_H
+#define _URCU_UATOMIC_BUILTINS_H
+
+#include <urcu/arch.h>
+
+#if defined(__has_builtin)
+# if !__has_builtin(__atomic_store_n)
+# error "Toolchain does not support __atomic_store_n."
+# endif
+# if !__has_builtin(__atomic_load_n)
+# error "Toolchain does not support __atomic_load_n."
+# endif
+# if !__has_builtin(__atomic_exchange_n)
+# error "Toolchain does not support __atomic_exchange_n."
+# endif
+# if !__has_builtin(__atomic_compare_exchange_n)
+# error "Toolchain does not support __atomic_compare_exchange_n."
+# endif
+# if !__has_builtin(__atomic_add_fetch)
+# error "Toolchain does not support __atomic_add_fetch."
+# endif
+# if !__has_builtin(__atomic_sub_fetch)
+# error "Toolchain does not support __atomic_sub_fetch."
+# endif
+# if !__has_builtin(__atomic_or_fetch)
+# error "Toolchain does not support __atomic_or_fetch."
+# endif
+# if !__has_builtin(__atomic_thread_fence)
+# error "Toolchain does not support __atomic_thread_fence."
+# endif
+# if !__has_builtin(__atomic_signal_fence)
+# error "Toolchain does not support __atomic_signal_fence."
+# endif
+#elif defined(__GNUC__)
+# define GCC_VERSION (__GNUC__ * 10000 + \
+ __GNUC_MINOR__ * 100 + \
+ __GNUC_PATCHLEVEL__)
+# if GCC_VERSION < 40700
+# error "GCC version is too old. Version must be 4.7 or greater"
+# endif
+# undef GCC_VERSION
+#else
+# error "Toolchain is not supported."
+#endif
+
+#if defined(__GNUC__)
+# define UATOMIC_HAS_ATOMIC_BYTE __GCC_ATOMIC_CHAR_LOCK_FREE
+# define UATOMIC_HAS_ATOMIC_SHORT __GCC_ATOMIC_SHORT_LOCK_FREE
+#elif defined(__clang__)
+# define UATOMIC_HAS_ATOMIC_BYTE __CLANG_ATOMIC_CHAR_LOCK_FREE
+# define UATOMIC_HAS_ATOMIC_SHORT __CLANG_ATOMIC_SHORT_LOCK_FREE
+#else
+/* # define UATOMIC_HAS_ATOMIC_BYTE */
+/* # define UATOMIC_HAS_ATOMIC_SHORT */
+#endif
+
+#include <urcu/uatomic/builtins-generic.h>
+
+#endif /* _URCU_UATOMIC_BUILTINS_H */
#define uatomic_set(addr, v) ((void) CMM_STORE_SHARED(*(addr), (v)))
#endif
+extern void abort(void);
+
+#define uatomic_load_store_return_op(op, addr, v, mo) \
+ __extension__ \
+ ({ \
+ \
+ switch (mo) { \
+ case CMM_ACQUIRE: \
+ case CMM_CONSUME: \
+ case CMM_RELAXED: \
+ break; \
+ case CMM_RELEASE: \
+ case CMM_ACQ_REL: \
+ case CMM_SEQ_CST: \
+ case CMM_SEQ_CST_FENCE: \
+ cmm_smp_mb(); \
+ break; \
+ default: \
+ abort(); \
+ } \
+ \
+ __typeof__((*addr)) _value = op(addr, v); \
+ \
+ switch (mo) { \
+ case CMM_CONSUME: \
+ cmm_smp_read_barrier_depends(); \
+ break; \
+ case CMM_ACQUIRE: \
+ case CMM_ACQ_REL: \
+ case CMM_SEQ_CST: \
+ case CMM_SEQ_CST_FENCE: \
+ cmm_smp_mb(); \
+ break; \
+ case CMM_RELAXED: \
+ case CMM_RELEASE: \
+ break; \
+ default: \
+ abort(); \
+ } \
+ _value; \
+ })
+
+#define uatomic_load_store_op(op, addr, v, mo) \
+ do { \
+ switch (mo) { \
+ case CMM_ACQUIRE: \
+ case CMM_CONSUME: \
+ case CMM_RELAXED: \
+ break; \
+ case CMM_RELEASE: \
+ case CMM_ACQ_REL: \
+ case CMM_SEQ_CST: \
+ case CMM_SEQ_CST_FENCE: \
+ cmm_smp_mb(); \
+ break; \
+ default: \
+ abort(); \
+ } \
+ \
+ op(addr, v); \
+ \
+ switch (mo) { \
+ case CMM_CONSUME: \
+ cmm_smp_read_barrier_depends(); \
+ break; \
+ case CMM_ACQUIRE: \
+ case CMM_ACQ_REL: \
+ case CMM_SEQ_CST: \
+ case CMM_SEQ_CST_FENCE: \
+ cmm_smp_mb(); \
+ break; \
+ case CMM_RELAXED: \
+ case CMM_RELEASE: \
+ break; \
+ default: \
+ abort(); \
+ } \
+ } while (0)
+
+#define uatomic_store(addr, v, mo) \
+ do { \
+ switch (mo) { \
+ case CMM_RELAXED: \
+ break; \
+ case CMM_RELEASE: \
+ case CMM_SEQ_CST: \
+ case CMM_SEQ_CST_FENCE: \
+ cmm_smp_mb(); \
+ break; \
+ default: \
+ abort(); \
+ } \
+ \
+ uatomic_set(addr, v); \
+ \
+ switch (mo) { \
+ case CMM_RELAXED: \
+ case CMM_RELEASE: \
+ break; \
+ case CMM_SEQ_CST: \
+ case CMM_SEQ_CST_FENCE: \
+ cmm_smp_mb(); \
+ break; \
+ default: \
+ abort(); \
+ } \
+ } while (0)
+
+#define uatomic_and_mo(addr, v, mo) \
+ uatomic_load_store_op(uatomic_and, addr, v, mo)
+
+#define uatomic_or_mo(addr, v, mo) \
+ uatomic_load_store_op(uatomic_or, addr, v, mo)
+
+#define uatomic_add_mo(addr, v, mo) \
+ uatomic_load_store_op(uatomic_add, addr, v, mo)
+
+#define uatomic_sub_mo(addr, v, mo) \
+ uatomic_load_store_op(uatomic_sub, addr, v, mo)
+
+#define uatomic_inc_mo(addr, mo) \
+ uatomic_load_store_op(uatomic_add, addr, 1, mo)
+
+#define uatomic_dec_mo(addr, mo) \
+ uatomic_load_store_op(uatomic_add, addr, -1, mo)
+/*
+ * NOTE: We can not just do switch (_value == (old) ? mos : mof) otherwise the
+ * compiler emit a -Wduplicated-cond warning.
+ */
+#define uatomic_cmpxchg_mo(addr, old, new, mos, mof) \
+ __extension__ \
+ ({ \
+ switch (mos) { \
+ case CMM_ACQUIRE: \
+ case CMM_CONSUME: \
+ case CMM_RELAXED: \
+ break; \
+ case CMM_RELEASE: \
+ case CMM_ACQ_REL: \
+ case CMM_SEQ_CST: \
+ case CMM_SEQ_CST_FENCE: \
+ cmm_smp_mb(); \
+ break; \
+ default: \
+ abort(); \
+ } \
+ \
+ __typeof__(*(addr)) _value = uatomic_cmpxchg(addr, old, \
+ new); \
+ \
+ if (_value == (old)) { \
+ switch (mos) { \
+ case CMM_CONSUME: \
+ cmm_smp_read_barrier_depends(); \
+ break; \
+ case CMM_ACQUIRE: \
+ case CMM_ACQ_REL: \
+ case CMM_SEQ_CST: \
+ case CMM_SEQ_CST_FENCE: \
+ cmm_smp_mb(); \
+ break; \
+ case CMM_RELAXED: \
+ case CMM_RELEASE: \
+ break; \
+ default: \
+ abort(); \
+ } \
+ } else { \
+ switch (mof) { \
+ case CMM_CONSUME: \
+ cmm_smp_read_barrier_depends(); \
+ break; \
+ case CMM_ACQUIRE: \
+ case CMM_ACQ_REL: \
+ case CMM_SEQ_CST: \
+ case CMM_SEQ_CST_FENCE: \
+ cmm_smp_mb(); \
+ break; \
+ case CMM_RELAXED: \
+ case CMM_RELEASE: \
+ break; \
+ default: \
+ abort(); \
+ } \
+ } \
+ _value; \
+ })
+
+#define uatomic_xchg_mo(addr, v, mo) \
+ uatomic_load_store_return_op(uatomic_xchg, addr, v, mo)
+
+#define uatomic_add_return_mo(addr, v, mo) \
+ uatomic_load_store_return_op(uatomic_add_return, addr, v)
+
+#define uatomic_sub_return_mo(addr, v, mo) \
+ uatomic_load_store_return_op(uatomic_sub_return, addr, v)
+
+
#ifndef uatomic_read
#define uatomic_read(addr) CMM_LOAD_SHARED(*(addr))
#endif
+#define uatomic_load(addr, mo) \
+ __extension__ \
+ ({ \
+ switch (mo) { \
+ case CMM_ACQUIRE: \
+ case CMM_CONSUME: \
+ case CMM_RELAXED: \
+ break; \
+ case CMM_SEQ_CST: \
+ case CMM_SEQ_CST_FENCE: \
+ cmm_smp_mb(); \
+ break; \
+ default: \
+ abort(); \
+ } \
+ \
+ __typeof__(*(addr)) _rcu_value = uatomic_read(addr); \
+ \
+ switch (mo) { \
+ case CMM_RELAXED: \
+ break; \
+ case CMM_CONSUME: \
+ cmm_smp_read_barrier_depends(); \
+ break; \
+ case CMM_ACQUIRE: \
+ case CMM_SEQ_CST: \
+ case CMM_SEQ_CST_FENCE: \
+ cmm_smp_mb(); \
+ break; \
+ default: \
+ abort(); \
+ } \
+ \
+ _rcu_value; \
+ })
+
#if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR
#ifdef ILLEGAL_INSTR
static inline __attribute__((always_inline))
void *rcu_set_pointer_sym(void **p, void *v)
{
- cmm_wmb();
- uatomic_set(p, v);
+ uatomic_store(p, v, CMM_RELEASE);
return v;
}
void *rcu_xchg_pointer_sym(void **p, void *v)
{
- cmm_wmb();
- return uatomic_xchg(p, v);
+ return uatomic_xchg_mo(p, v, CMM_SEQ_CST);
}
void *rcu_cmpxchg_pointer_sym(void **p, void *old, void *_new)
{
- cmm_wmb();
- return uatomic_cmpxchg(p, old, _new);
+ return uatomic_cmpxchg_mo(p, old, _new, CMM_SEQ_CST, CMM_RELAXED);
}