urcu/tls-compat.h \
urcu/uatomic/aarch64.h \
urcu/uatomic/alpha.h \
+ urcu/uatomic/api.h \
urcu/uatomic_arch.h \
urcu/uatomic/arm.h \
urcu/uatomic/builtins.h \
return mo;
}
+#include <urcu/uatomic/api.h>
+
#if defined(CONFIG_RCU_USE_ATOMIC_BUILTINS)
#include <urcu/uatomic/builtins.h>
#elif defined(URCU_ARCH_X86)
--- /dev/null
+#ifndef _URCU_UATOMIC_API_H
+#define _URCU_UATOMIC_API_H
+
+/*
+ * Select second argument. Use inside macros to implement optional last macro
+ * argument, such as:
+ *
+ * #define macro(_a, _b, _c, _optional...) \
+ * _uatomic_select_arg1(_, ##_optional, do_default_macro())
+ */
+#define _uatomic_select_arg1(arg0, arg1, ...) arg1
+
+/*
+ * Like _uatomic_select_arg2(), but can be used for selecting a second optional
+ * argument.
+ */
+#define _uatomic_select_arg2(arg0, arg1, arg2, ...) arg2
+
+#define _uatomic_default_mo(dflt, mo...) \
+ _uatomic_select_arg1(_, ##mo, dflt)
+
+#define _uatomic_default_mo2(dflt, mo...) \
+ _uatomic_select_arg2(_, ##mo, dflt, dflt)
+
+#define uatomic_load(addr, mo...) \
+ uatomic_load_mo(addr, _uatomic_default_mo(CMM_RELAXED, ##mo))
+
+#define uatomic_read(addr, mo...) \
+ uatomic_load_mo(addr, _uatomic_default_mo(CMM_RELAXED, ##mo))
+
+#define uatomic_store(addr, value, mo...) \
+ uatomic_store_mo(addr, value, _uatomic_default_mo(CMM_RELAXED, ##mo))
+
+#define uatomic_set(addr, value, mo...) \
+ uatomic_store_mo(addr, value, _uatomic_default_mo(CMM_RELAXED, ##mo))
+
+#define uatomic_add_return(addr, v, mo...) \
+ uatomic_add_return_mo(addr, v, _uatomic_default_mo(CMM_SEQ_CST_FENCE, ##mo))
+
+#define uatomic_sub_return(addr, v, mo...) \
+ uatomic_sub_return_mo(addr, v, _uatomic_default_mo(CMM_SEQ_CST_FENCE, ##mo))
+
+#define uatomic_and(addr, mask, mo...) \
+ uatomic_and_mo(addr, mask, _uatomic_default_mo(CMM_SEQ_CST, ##mo))
+
+#define uatomic_or(addr, mask, mo...) \
+ uatomic_or_mo(addr, mask, _uatomic_default_mo(CMM_RELAXED, ##mo))
+
+#define uatomic_add(addr, v, mo...) \
+ uatomic_add_mo(addr, v, _uatomic_default_mo(CMM_RELAXED, ##mo))
+
+#define uatomic_sub(addr, v, mo...) \
+ uatomic_sub_mo(addr, v, _uatomic_default_mo(CMM_RELAXED, ##mo))
+
+#define uatomic_inc(addr, mo...) \
+ uatomic_inc_mo(addr, _uatomic_default_mo(CMM_RELAXED, ##mo))
+
+#define uatomic_dec(addr, mo...) \
+ uatomic_dec_mo(addr, _uatomic_default_mo(CMM_RELAXED, ##mo))
+
+#define uatomic_xchg(addr, value, mo...) \
+ uatomic_xchg_mo(addr, value, \
+ _uatomic_default_mo(CMM_SEQ_CST_FENCE, ##mo))
+
+#define uatomic_cmpxchg(addr, value, _new, mo...) \
+ uatomic_cmpxchg_mo(addr, value, _new, \
+ _uatomic_default_mo(CMM_SEQ_CST_FENCE, ##mo), \
+ _uatomic_default_mo2(CMM_RELAXED, ##mo))
+
+
+#endif /* _URUC_UATOMIC_API_H */
#endif
/* xchg */
+static inline void _cmm_compat_c11_smp_mb__before_xchg_mo(enum cmm_memorder mo)
+{
+ switch (mo) {
+ case CMM_SEQ_CST_FENCE:
+ case CMM_SEQ_CST:
+ case CMM_ACQ_REL:
+ case CMM_RELEASE:
+ cmm_smp_mb();
+ break;
+ case CMM_ACQUIRE:
+ case CMM_CONSUME:
+ case CMM_RELAXED:
+ break;
+ default:
+ abort();
+ }
+}
/*
* Based on [1], __sync_lock_test_and_set() is not a full barrier, but
*
* [1] https://gcc.gnu.org/onlinedocs/gcc-4.1.0/gcc/Atomic-Builtins.html
*/
-#define uatomic_xchg(addr, v) \
- ({ \
- cmm_smp_mb(); \
- __sync_lock_test_and_set(addr, v); \
+#define uatomic_xchg_mo(addr, v, mo) \
+ ({ \
+ _cmm_compat_c11_smp_mb__before_xchg_mo(mo); \
+ __sync_lock_test_and_set(addr, v); \
})
#ifdef __cplusplus
#include <urcu/compiler.h>
#include <urcu/system.h>
-#define uatomic_store(addr, v, mo) \
+#define uatomic_store_mo(addr, v, mo) \
do { \
__atomic_store_n(cmm_cast_volatile(addr), v, \
cmm_to_c11(mo)); \
cmm_seq_cst_fence_after_atomic(mo); \
} while (0)
-#define uatomic_set(addr, v) \
- do { \
- uatomic_store(addr, v, CMM_RELAXED); \
- } while (0)
-
-#define uatomic_load(addr, mo) \
+#define uatomic_load_mo(addr, mo) \
__extension__ \
({ \
__typeof__(*(addr)) _value = \
_value; \
})
-#define uatomic_read(addr) \
- uatomic_load(addr, CMM_RELAXED)
-
#define uatomic_cmpxchg_mo(addr, old, new, mos, mof) \
__extension__ \
({ \
_old; \
})
-#define uatomic_cmpxchg(addr, old, new) \
- uatomic_cmpxchg_mo(addr, old, new, CMM_SEQ_CST_FENCE, CMM_RELAXED)
-
#define uatomic_xchg_mo(addr, v, mo) \
__extension__ \
({ \
_old; \
})
-#define uatomic_xchg(addr, v) \
- uatomic_xchg_mo(addr, v, CMM_SEQ_CST_FENCE)
-
#define uatomic_add_return_mo(addr, v, mo) \
__extension__ \
({ \
_old; \
})
-#define uatomic_add_return(addr, v) \
- uatomic_add_return_mo(addr, v, CMM_SEQ_CST_FENCE)
#define uatomic_sub_return_mo(addr, v, mo) \
__extension__ \
_old; \
})
-#define uatomic_sub_return(addr, v) \
- uatomic_sub_return_mo(addr, v, CMM_SEQ_CST_FENCE)
#define uatomic_and_mo(addr, mask, mo) \
do { \
cmm_seq_cst_fence_after_atomic(mo); \
} while (0)
-#define uatomic_and(addr, mask) \
- uatomic_and_mo(addr, mask, CMM_SEQ_CST)
#define uatomic_or_mo(addr, mask, mo) \
do { \
} while (0)
-#define uatomic_or(addr, mask) \
- uatomic_or_mo(addr, mask, CMM_RELAXED)
-
#define uatomic_add_mo(addr, v, mo) \
(void) uatomic_add_return_mo(addr, v, mo)
-#define uatomic_add(addr, v) \
- uatomic_add_mo(addr, v, CMM_RELAXED)
-
#define uatomic_sub_mo(addr, v, mo) \
(void) uatomic_sub_return_mo(addr, v, mo)
-#define uatomic_sub(addr, v) \
- uatomic_sub_mo(addr, v, CMM_RELAXED)
-
#define uatomic_inc_mo(addr, mo) \
uatomic_add_mo(addr, 1, mo)
-#define uatomic_inc(addr) \
- uatomic_inc_mo(addr, CMM_RELAXED)
-
#define uatomic_dec_mo(addr, mo) \
uatomic_sub_mo(addr, 1, mo)
-#define uatomic_dec(addr) \
- uatomic_dec_mo(addr, CMM_RELAXED)
-
#define cmm_smp_mb__before_uatomic_and() cmm_smp_mb()
#define cmm_smp_mb__after_uatomic_and() cmm_smp_mb()
extern "C" {
#endif
-#ifndef uatomic_set
-#define uatomic_set(addr, v) ((void) CMM_STORE_SHARED(*(addr), (v)))
-#endif
-
/*
* Can be defined for the architecture.
*
* What needs to be emitted _before_ the `operation' with memory ordering `mo'.
*/
#ifndef _cmm_compat_c11_smp_mb__before_mo
-# define _cmm_compat_c11_smp_mb__before_mo(operation, mo) cmm_smp_mb()
-#endif
+# define _cmm_compat_c11_smp_mb__before_mo(operation, mo) \
+ do { \
+ switch (mo) { \
+ case CMM_SEQ_CST_FENCE: \
+ case CMM_SEQ_CST: \
+ case CMM_ACQ_REL: \
+ case CMM_RELEASE: \
+ cmm_smp_mb(); \
+ break; \
+ case CMM_ACQUIRE: \
+ case CMM_CONSUME: \
+ case CMM_RELAXED: \
+ break; \
+ default: \
+ abort(); \
+ break; \
+ \
+ } \
+ } while(0)
+
+#endif /* _cmm_compat_c11_smp_mb__before_mo */
/*
* Can be defined for the architecture.
* What needs to be emitted _after_ the `operation' with memory ordering `mo'.
*/
#ifndef _cmm_compat_c11_smp_mb__after_mo
-# define _cmm_compat_c11_smp_mb__after_mo(operation, mo) cmm_smp_mb()
-#endif
-
-#define uatomic_load_store_return_op(op, addr, v, mo) \
- __extension__ \
- ({ \
- _cmm_compat_c11_smp_mb__before_mo(op, mo); \
- __typeof__((*addr)) _value = op(addr, v); \
- _cmm_compat_c11_smp_mb__after_mo(op, mo); \
+# define _cmm_compat_c11_smp_mb__after_mo(operation, mo) \
+ do { \
+ switch (mo) { \
+ case CMM_SEQ_CST_FENCE: \
+ case CMM_SEQ_CST: \
+ case CMM_ACQUIRE: \
+ case CMM_CONSUME: \
+ case CMM_ACQ_REL: \
+ cmm_smp_mb(); \
+ break; \
+ case CMM_RELEASE: \
+ case CMM_RELAXED: \
+ break; \
+ default: \
+ abort(); \
+ break; \
\
- _value; \
- })
+ } \
+ } while(0)
+#endif /* _cmm_compat_c11_smp_mb__after_mo */
-#define uatomic_load_store_op(op, addr, v, mo) \
do { \
_cmm_compat_c11_smp_mb__before_mo(op, mo); \
op(addr, v); \
_cmm_compat_c11_smp_mb__after_mo(op, mo); \
} while (0)
-#define uatomic_store(addr, v, mo) \
+#define uatomic_store_mo(addr, v, mo) \
do { \
_cmm_compat_c11_smp_mb__before_mo(uatomic_set, mo); \
uatomic_set(addr, v); \
_cmm_compat_c11_smp_mb__after_mo(uatomic_set, mo); \
} while (0)
-#define uatomic_and_mo(addr, v, mo) \
- uatomic_load_store_op(uatomic_and, addr, v, mo)
-
-#define uatomic_or_mo(addr, v, mo) \
- uatomic_load_store_op(uatomic_or, addr, v, mo)
-
-#define uatomic_add_mo(addr, v, mo) \
- uatomic_load_store_op(uatomic_add, addr, v, mo)
-
-#define uatomic_sub_mo(addr, v, mo) \
- uatomic_load_store_op(uatomic_sub, addr, v, mo)
-
-#define uatomic_inc_mo(addr, mo) \
- uatomic_load_store_op(uatomic_add, addr, 1, mo)
-
-#define uatomic_dec_mo(addr, mo) \
- uatomic_load_store_op(uatomic_add, addr, -1, mo)
/*
- * NOTE: We can not just do switch (_value == (old) ? mos : mof) otherwise the
- * compiler emit a -Wduplicated-cond warning.
*/
-#define uatomic_cmpxchg_mo(addr, old, new, mos, mof) \
__extension__ \
({ \
- _cmm_compat_c11_smp_mb__before_mo(uatomic_cmpxchg, mos); \
- __typeof__(*(addr)) _value = uatomic_cmpxchg(addr, old, \
- new); \
+ __typeof__(*(addr)) _value = \
+ __atomic_load_n(cmm_cast_volatile(addr), \
+ cmm_to_c11(mo)); \
+ cmm_seq_cst_fence_after_atomic(mo); \
\
- if (_value == (old)) { \
- _cmm_compat_c11_smp_mb__after_mo(uatomic_cmpxchg, mos); \
- } else { \
- _cmm_compat_c11_smp_mb__after_mo(uatomic_cmpxchg, mof); \
- } \
_value; \
})
-#define uatomic_xchg_mo(addr, v, mo) \
- uatomic_load_store_return_op(uatomic_xchg, addr, v, mo)
-
-#define uatomic_add_return_mo(addr, v, mo) \
- uatomic_load_store_return_op(uatomic_add_return, addr, v)
-#define uatomic_sub_return_mo(addr, v, mo) \
- uatomic_load_store_return_op(uatomic_sub_return, addr, v)
-
-#ifndef uatomic_read
-#define uatomic_read(addr) CMM_LOAD_SHARED(*(addr))
-#endif
-
-#define uatomic_load(addr, mo) \
+#define uatomic_load_mo(addr, mo) \
__extension__ \
({ \
_cmm_compat_c11_smp_mb__before_mo(uatomic_read, mo); \
}
#endif
+
+/*
+ * NOTE: All RMW operations are implemented using the `__sync' builtins. All
+ * builtins used are documented to be considered a "full barrier". Therefore,
+ * for RMW operations, nothing is emitted for any memory order.
+ */
+
#else /* #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR */
extern void _uatomic_link_error(void);
#endif /* #else #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR */
-/* cmpxchg */
+/* uatomic_cmpxchg_mo */
-#ifndef uatomic_cmpxchg
+#ifndef uatomic_cmpxchg_mo
static inline __attribute__((always_inline))
unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
unsigned long _new, int len)
return 0;
}
-
-#define uatomic_cmpxchg(addr, old, _new) \
- ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), \
+#define uatomic_cmpxchg_mo(addr, old, _new, mos, mof) \
+ ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), \
caa_cast_long_keep_sign(old), \
- caa_cast_long_keep_sign(_new),\
+ caa_cast_long_keep_sign(_new), \
sizeof(*(addr))))
+/* uatomic_and_mo */
-
-/* uatomic_and */
-
-#ifndef uatomic_and
+#ifndef uatomic_and_mo
static inline __attribute__((always_inline))
void _uatomic_and(void *addr, unsigned long val,
int len)
_uatomic_link_error();
}
-#define uatomic_and(addr, v) \
+#define uatomic_and_mo(addr, v, mo) \
(_uatomic_and((addr), \
caa_cast_long_keep_sign(v), \
sizeof(*(addr))))
#endif
-/* uatomic_or */
+/* uatomic_or_mo */
-#ifndef uatomic_or
+#ifndef uatomic_or_mo
static inline __attribute__((always_inline))
void _uatomic_or(void *addr, unsigned long val,
int len)
return;
}
-#define uatomic_or(addr, v) \
+#define uatomic_or_mo(addr, v, mo) \
(_uatomic_or((addr), \
caa_cast_long_keep_sign(v), \
sizeof(*(addr))))
#endif
-/* uatomic_add_return */
+/* uatomic_add_return_mo */
-#ifndef uatomic_add_return
+#ifndef uatomic_add_return_mo
static inline __attribute__((always_inline))
unsigned long _uatomic_add_return(void *addr, unsigned long val,
int len)
}
-#define uatomic_add_return(addr, v) \
+#define uatomic_add_return_mo(addr, v, mo) \
((__typeof__(*(addr))) _uatomic_add_return((addr), \
caa_cast_long_keep_sign(v), \
sizeof(*(addr))))
#endif /* #ifndef uatomic_add_return */
-#ifndef uatomic_xchg
+#ifndef uatomic_xchg_mo
/* xchg */
static inline __attribute__((always_inline))
return 0;
}
-#define uatomic_xchg(addr, v) \
+#define uatomic_xchg_mo(addr, v, mo) \
((__typeof__(*(addr))) _uatomic_exchange((addr), \
caa_cast_long_keep_sign(v), \
sizeof(*(addr))))
-#endif /* #ifndef uatomic_xchg */
+#endif /* #ifndef uatomic_xchg_mo */
-#else /* #ifndef uatomic_cmpxchg */
+#else /* #ifndef uatomic_cmpxchg_mo */
-#ifndef uatomic_and
-/* uatomic_and */
+#ifndef uatomic_and_mo
+/* uatomic_and_mo */
static inline __attribute__((always_inline))
void _uatomic_and(void *addr, unsigned long val, int len)
_uatomic_link_error();
}
-#define uatomic_and(addr, v) \
+#define uatomic_and_mo(addr, v, mo) \
(_uatomic_and((addr), \
caa_cast_long_keep_sign(v), \
sizeof(*(addr))))
#define cmm_smp_mb__before_uatomic_and() cmm_barrier()
#define cmm_smp_mb__after_uatomic_and() cmm_barrier()
-#endif /* #ifndef uatomic_and */
+#endif /* #ifndef uatomic_and_mo */
-#ifndef uatomic_or
-/* uatomic_or */
+#ifndef uatomic_or_mo
+/* uatomic_or_mo */
static inline __attribute__((always_inline))
void _uatomic_or(void *addr, unsigned long val, int len)
_uatomic_link_error();
}
-#define uatomic_or(addr, v) \
+#define uatomic_or_mo(addr, v, mo) \
(_uatomic_or((addr), \
caa_cast_long_keep_sign(v), \
sizeof(*(addr))))
#define cmm_smp_mb__before_uatomic_or() cmm_barrier()
#define cmm_smp_mb__after_uatomic_or() cmm_barrier()
-#endif /* #ifndef uatomic_or */
+#endif /* #ifndef uatomic_or_mo */
-#ifndef uatomic_add_return
-/* uatomic_add_return */
+#ifndef uatomic_add_return_mo
+/* uatomic_add_return_mo */
static inline __attribute__((always_inline))
unsigned long _uatomic_add_return(void *addr, unsigned long val, int len)
return 0;
}
-#define uatomic_add_return(addr, v) \
- ((__typeof__(*(addr))) _uatomic_add_return((addr), \
+#define uatomic_add_return_mo(addr, v, mo) \
+ ((__typeof__(*(addr))) _uatomic_add_return((addr), \
caa_cast_long_keep_sign(v), \
sizeof(*(addr))))
-#endif /* #ifndef uatomic_add_return */
+#endif /* #ifndef uatomic_add_return_mo */
-#ifndef uatomic_xchg
-/* xchg */
+#ifndef uatomic_xchg_mo
+/* uatomic_xchg_mo */
static inline __attribute__((always_inline))
unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
return 0;
}
-#define uatomic_xchg(addr, v) \
- ((__typeof__(*(addr))) _uatomic_exchange((addr), \
+#define uatomic_xchg_mo(addr, v, mo) \
+ ((__typeof__(*(addr))) _uatomic_exchange((addr), \
caa_cast_long_keep_sign(v), \
sizeof(*(addr))))
-#endif /* #ifndef uatomic_xchg */
+#endif /* #ifndef uatomic_xchg_mo */
-#endif /* #else #ifndef uatomic_cmpxchg */
+#endif /* #else #ifndef uatomic_cmpxchg_mo */
-/* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */
+/* uatomic_sub_return_mo, uatomic_add_mo, uatomic_sub_mo, uatomic_inc_mo, uatomic_dec_mo */
-#ifndef uatomic_add
-#define uatomic_add(addr, v) (void)uatomic_add_return((addr), (v))
+#ifndef uatomic_add_mo
+#define uatomic_add_mo(addr, v, mo) (void)uatomic_add_return_mo((addr), (v), mo)
#define cmm_smp_mb__before_uatomic_add() cmm_barrier()
#define cmm_smp_mb__after_uatomic_add() cmm_barrier()
#endif
-#define uatomic_sub_return(addr, v) \
- uatomic_add_return((addr), -(caa_cast_long_keep_sign(v)))
-#define uatomic_sub(addr, v) \
- uatomic_add((addr), -(caa_cast_long_keep_sign(v)))
+#define uatomic_sub_return_mo(addr, v, mo) \
+ uatomic_add_return_mo((addr), -(caa_cast_long_keep_sign(v)), mo)
+#define uatomic_sub_mo(addr, v, mo) \
+ uatomic_add_mo((addr), -(caa_cast_long_keep_sign(v)), mo)
#define cmm_smp_mb__before_uatomic_sub() cmm_smp_mb__before_uatomic_add()
#define cmm_smp_mb__after_uatomic_sub() cmm_smp_mb__after_uatomic_add()
-#ifndef uatomic_inc
-#define uatomic_inc(addr) uatomic_add((addr), 1)
+#ifndef uatomic_inc_mo
+#define uatomic_inc_mo(addr, mo) uatomic_add_mo((addr), 1, mo)
#define cmm_smp_mb__before_uatomic_inc() cmm_smp_mb__before_uatomic_add()
#define cmm_smp_mb__after_uatomic_inc() cmm_smp_mb__after_uatomic_add()
#endif
-#ifndef uatomic_dec
-#define uatomic_dec(addr) uatomic_add((addr), -1)
+#ifndef uatomic_dec_mo
+#define uatomic_dec_mo(addr, mo) uatomic_add((addr), -1, mo)
#define cmm_smp_mb__before_uatomic_dec() cmm_smp_mb__before_uatomic_add()
#define cmm_smp_mb__after_uatomic_dec() cmm_smp_mb__after_uatomic_add()
#endif
return 0;
}
-#define uatomic_xchg(addr, v) \
+#define uatomic_xchg_mo(addr, v, mo) \
((__typeof__(*(addr))) _uatomic_exchange((addr), \
caa_cast_long_keep_sign(v), \
sizeof(*(addr))))
}
-#define uatomic_cmpxchg(addr, old, _new) \
+#define uatomic_cmpxchg_mo(addr, old, _new, mos, mof) \
((__typeof__(*(addr))) _uatomic_cmpxchg((addr), \
caa_cast_long_keep_sign(old), \
caa_cast_long_keep_sign(_new),\
}
-#define uatomic_add_return(addr, v) \
+#define uatomic_add_return_mo(addr, v, mo) \
((__typeof__(*(addr))) _uatomic_add_return((addr), \
caa_cast_long_keep_sign(v), \
sizeof(*(addr))))
return 0;
}
-#define uatomic_xchg(addr, v) \
+#define uatomic_xchg_mo(addr, v, mo) \
(__typeof__(*(addr))) _uatomic_exchange((addr), \
caa_cast_long_keep_sign(v), \
sizeof(*(addr)))
return 0;
}
-#define uatomic_cmpxchg(addr, old, _new) \
+#define uatomic_cmpxchg_mo(addr, old, _new, mos, mof) \
(__typeof__(*(addr))) _uatomic_cmpxchg((addr), \
caa_cast_long_keep_sign(old), \
caa_cast_long_keep_sign(_new),\
}
-#define uatomic_cmpxchg(addr, old, _new) \
+#define uatomic_cmpxchg_mo(addr, old, _new, mos, mof) \
((__typeof__(*(addr))) _uatomic_cmpxchg((addr), \
caa_cast_long_keep_sign(old), \
caa_cast_long_keep_sign(_new), \
#define __hp(size, x) ((__hp_##size *)(x))
-#define _uatomic_set(addr, v) ((void) CMM_STORE_SHARED(*(addr), (v)))
-
/* cmpxchg */
static inline __attribute__((always_inline))
#define UATOMIC_COMPAT(insn) (_uatomic_##insn)
#endif
-/* Read is atomic even in compat mode */
-#define uatomic_set(addr, v) \
- UATOMIC_COMPAT(set(addr, v))
+/*
+ * All RMW operations have an implicit lock prefix. Thus, ignoring memory
+ * ordering for these operations, since they can all be respected by not
+ * emitting any memory barrier.
+ */
-#define uatomic_cmpxchg(addr, old, _new) \
+#define uatomic_cmpxchg_mo(addr, old, _new, mos, mof) \
UATOMIC_COMPAT(cmpxchg(addr, old, _new))
-#define uatomic_xchg(addr, v) \
+
+#define uatomic_xchg_mo(addr, v, mo) \
UATOMIC_COMPAT(xchg(addr, v))
-#define uatomic_and(addr, v) \
+#define uatomic_and_mo(addr, v, mo) \
UATOMIC_COMPAT(and(addr, v))
#define cmm_smp_mb__before_uatomic_and() cmm_barrier()
#define cmm_smp_mb__after_uatomic_and() cmm_barrier()
-#define uatomic_or(addr, v) \
+#define uatomic_or_mo(addr, v, mo) \
UATOMIC_COMPAT(or(addr, v))
#define cmm_smp_mb__before_uatomic_or() cmm_barrier()
#define cmm_smp_mb__after_uatomic_or() cmm_barrier()
-#define uatomic_add_return(addr, v) \
+#define uatomic_add_return_mo(addr, v, mo) \
UATOMIC_COMPAT(add_return(addr, v))
-#define uatomic_add(addr, v) UATOMIC_COMPAT(add(addr, v))
+#define uatomic_add_mo(addr, v, mo) UATOMIC_COMPAT(add(addr, v))
#define cmm_smp_mb__before_uatomic_add() cmm_barrier()
#define cmm_smp_mb__after_uatomic_add() cmm_barrier()
-#define uatomic_inc(addr) UATOMIC_COMPAT(inc(addr))
+#define uatomic_inc_mo(addr, mo) UATOMIC_COMPAT(inc(addr))
#define cmm_smp_mb__before_uatomic_inc() cmm_barrier()
#define cmm_smp_mb__after_uatomic_inc() cmm_barrier()
-#define uatomic_dec(addr) UATOMIC_COMPAT(dec(addr))
+#define uatomic_dec_mo(addr, mo) UATOMIC_COMPAT(dec(addr))
#define cmm_smp_mb__before_uatomic_dec() cmm_barrier()
#define cmm_smp_mb__after_uatomic_dec() cmm_barrier()
-static inline void _cmm_compat_c11_smp_mb__before_uatomic_read_mo(enum cmm_memorder mo)
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_load_mo(enum cmm_memorder mo)
{
/*
* A SMP barrier is not necessary for CMM_SEQ_CST because, only a
}
}
-static inline void _cmm_compat_c11_smp_mb__after_uatomic_read_mo(enum cmm_memorder mo)
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_load_mo(enum cmm_memorder mo)
{
/*
* A SMP barrier is not necessary for CMM_SEQ_CST because following
}
}
-static inline void _cmm_compat_c11_smp_mb__before_uatomic_xchg_mo(enum cmm_memorder mo)
-{
- /* NOP. uatomic_xchg has implicit lock prefix. */
- switch (mo) {
- case CMM_RELAXED: /* Fall-through */
- case CMM_ACQUIRE: /* Fall-through */
- case CMM_CONSUME: /* Fall-through */
- case CMM_RELEASE: /* Fall-through */
- case CMM_ACQ_REL: /* Fall-through */
- case CMM_SEQ_CST: /* Fall-through */
- case CMM_SEQ_CST_FENCE:
- break;
- default:
- abort();
- }
-}
-
-static inline void _cmm_compat_c11_smp_mb__after_uatomic_xchg_mo(enum cmm_memorder mo)
-{
- /* NOP. uatomic_xchg has implicit lock prefix. */
- switch (mo) {
- case CMM_RELAXED: /* Fall-through */
- case CMM_ACQUIRE: /* Fall-through */
- case CMM_CONSUME: /* Fall-through */
- case CMM_RELEASE: /* Fall-through */
- case CMM_ACQ_REL: /* Fall-through */
- case CMM_SEQ_CST: /* Fall-through */
- case CMM_SEQ_CST_FENCE:
- break;
- default:
- abort();
- }
-}
-
-static inline void _cmm_compat_c11_smp_mb__before_uatomic_cmpxchg_mo(enum cmm_memorder mo)
-{
- /* NOP. uatomic_cmpxchg has implicit lock prefix. */
- switch (mo) {
- case CMM_RELAXED: /* Fall-through */
- case CMM_ACQUIRE: /* Fall-through */
- case CMM_CONSUME: /* Fall-through */
- case CMM_RELEASE: /* Fall-through */
- case CMM_ACQ_REL: /* Fall-through */
- case CMM_SEQ_CST: /* Fall-through */
- case CMM_SEQ_CST_FENCE:
- break;
- default:
- abort();
- }
-}
-
-static inline void _cmm_compat_c11_smp_mb__after_uatomic_cmpxchg_mo(enum cmm_memorder mo)
-{
- /* NOP. uatomic_cmpxchg has implicit lock prefix. */
- switch (mo) {
- case CMM_RELAXED: /* Fall-through */
- case CMM_ACQUIRE: /* Fall-through */
- case CMM_CONSUME: /* Fall-through */
- case CMM_RELEASE: /* Fall-through */
- case CMM_ACQ_REL: /* Fall-through */
- case CMM_SEQ_CST: /* Fall-through */
- case CMM_SEQ_CST_FENCE:
- break;
- default:
- abort();
- }
-}
-
-static inline void _cmm_compat_c11_smp_mb__before_uatomic_and_mo(enum cmm_memorder mo)
-{
- /* NOP. uatomic_and has explicit lock prefix. */
- switch (mo) {
- case CMM_RELAXED: /* Fall-through */
- case CMM_ACQUIRE: /* Fall-through */
- case CMM_CONSUME: /* Fall-through */
- case CMM_RELEASE: /* Fall-through */
- case CMM_ACQ_REL: /* Fall-through */
- case CMM_SEQ_CST: /* Fall-through */
- case CMM_SEQ_CST_FENCE:
- break;
- default:
- abort();
- }
-}
-
-static inline void _cmm_compat_c11_smp_mb__after_uatomic_and_mo(enum cmm_memorder mo)
-{
- /* NOP. uatomic_and has explicit lock prefix. */
- switch (mo) {
- case CMM_RELAXED: /* Fall-through */
- case CMM_ACQUIRE: /* Fall-through */
- case CMM_CONSUME: /* Fall-through */
- case CMM_RELEASE: /* Fall-through */
- case CMM_ACQ_REL: /* Fall-through */
- case CMM_SEQ_CST: /* Fall-through */
- case CMM_SEQ_CST_FENCE:
- break;
- default:
- abort();
- }
-}
-
-static inline void _cmm_compat_c11_smp_mb__before_uatomic_or_mo(enum cmm_memorder mo)
-{
- /* NOP. uatomic_or has explicit lock prefix. */
- switch (mo) {
- case CMM_RELAXED: /* Fall-through */
- case CMM_ACQUIRE: /* Fall-through */
- case CMM_CONSUME: /* Fall-through */
- case CMM_RELEASE: /* Fall-through */
- case CMM_ACQ_REL: /* Fall-through */
- case CMM_SEQ_CST: /* Fall-through */
- case CMM_SEQ_CST_FENCE:
- break;
- default:
- abort();
- }
-}
-
-static inline void _cmm_compat_c11_smp_mb__after_uatomic_or_mo(enum cmm_memorder mo)
-{
- /* NOP. uatomic_or has explicit lock prefix. */
- switch (mo) {
- case CMM_RELAXED: /* Fall-through */
- case CMM_ACQUIRE: /* Fall-through */
- case CMM_CONSUME: /* Fall-through */
- case CMM_RELEASE: /* Fall-through */
- case CMM_ACQ_REL: /* Fall-through */
- case CMM_SEQ_CST: /* Fall-through */
- case CMM_SEQ_CST_FENCE:
- break;
- default:
- abort();
- }
-}
-
-static inline void _cmm_compat_c11_smp_mb__before_uatomic_add_mo(enum cmm_memorder mo)
-{
- /* NOP. uatomic_add has explicit lock prefix. */
- switch (mo) {
- case CMM_RELAXED: /* Fall-through */
- case CMM_ACQUIRE: /* Fall-through */
- case CMM_CONSUME: /* Fall-through */
- case CMM_RELEASE: /* Fall-through */
- case CMM_ACQ_REL: /* Fall-through */
- case CMM_SEQ_CST: /* Fall-through */
- case CMM_SEQ_CST_FENCE:
- break;
- default:
- abort();
- }
-}
-
-static inline void _cmm_compat_c11_smp_mb__after_uatomic_add_mo(enum cmm_memorder mo)
-{
- /* NOP. uatomic_add has explicit lock prefix. */
- switch (mo) {
- case CMM_RELAXED: /* Fall-through */
- case CMM_ACQUIRE: /* Fall-through */
- case CMM_CONSUME: /* Fall-through */
- case CMM_RELEASE: /* Fall-through */
- case CMM_ACQ_REL: /* Fall-through */
- case CMM_SEQ_CST: /* Fall-through */
- case CMM_SEQ_CST_FENCE:
- break;
- default:
- abort();
- }
-}
-
-static inline void _cmm_compat_c11_smp_mb__before_uatomic_sub_mo(enum cmm_memorder mo)
-{
- /* NOP. uatomic_sub has explicit lock prefix. */
- switch (mo) {
- case CMM_RELAXED: /* Fall-through */
- case CMM_ACQUIRE: /* Fall-through */
- case CMM_CONSUME: /* Fall-through */
- case CMM_RELEASE: /* Fall-through */
- case CMM_ACQ_REL: /* Fall-through */
- case CMM_SEQ_CST: /* Fall-through */
- case CMM_SEQ_CST_FENCE:
- break;
- default:
- abort();
- }
-}
-
-static inline void _cmm_compat_c11_smp_mb__after_uatomic_sub_mo(enum cmm_memorder mo)
-{
- /* NOP. uatomic_sub has explicit lock prefix. */
- switch (mo) {
- case CMM_RELAXED: /* Fall-through */
- case CMM_ACQUIRE: /* Fall-through */
- case CMM_CONSUME: /* Fall-through */
- case CMM_RELEASE: /* Fall-through */
- case CMM_ACQ_REL: /* Fall-through */
- case CMM_SEQ_CST: /* Fall-through */
- case CMM_SEQ_CST_FENCE:
- break;
- default:
- abort();
- }
-}
-
-static inline void _cmm_compat_c11_smp_mb__before_uatomic_inc_mo(enum cmm_memorder mo)
-{
- /* NOP. uatomic_inc has explicit lock prefix. */
- switch (mo) {
- case CMM_RELAXED: /* Fall-through */
- case CMM_ACQUIRE: /* Fall-through */
- case CMM_CONSUME: /* Fall-through */
- case CMM_RELEASE: /* Fall-through */
- case CMM_ACQ_REL: /* Fall-through */
- case CMM_SEQ_CST: /* Fall-through */
- case CMM_SEQ_CST_FENCE:
- break;
- default:
- abort();
- }
-}
-
-static inline void _cmm_compat_c11_smp_mb__after_uatomic_inc_mo(enum cmm_memorder mo)
-{
- /* NOP. uatomic_inc has explicit lock prefix. */
- switch (mo) {
- case CMM_RELAXED: /* Fall-through */
- case CMM_ACQUIRE: /* Fall-through */
- case CMM_CONSUME: /* Fall-through */
- case CMM_RELEASE: /* Fall-through */
- case CMM_ACQ_REL: /* Fall-through */
- case CMM_SEQ_CST: /* Fall-through */
- case CMM_SEQ_CST_FENCE:
- break;
- default:
- abort();
- }
-}
-
-static inline void _cmm_compat_c11_smp_mb__before_uatomic_dec_mo(enum cmm_memorder mo)
-{
- /* NOP. uatomic_dec has explicit lock prefix. */
- switch (mo) {
- case CMM_RELAXED: /* Fall-through */
- case CMM_ACQUIRE: /* Fall-through */
- case CMM_CONSUME: /* Fall-through */
- case CMM_RELEASE: /* Fall-through */
- case CMM_ACQ_REL: /* Fall-through */
- case CMM_SEQ_CST: /* Fall-through */
- case CMM_SEQ_CST_FENCE:
- break;
- default:
- abort();
- }
-}
-
-static inline void _cmm_compat_c11_smp_mb__after_uatomic_dec_mo(enum cmm_memorder mo)
-{
- /* NOP. uatomic_dec has explicit lock prefix. */
- switch (mo) {
- case CMM_RELAXED: /* Fall-through */
- case CMM_ACQUIRE: /* Fall-through */
- case CMM_CONSUME: /* Fall-through */
- case CMM_RELEASE: /* Fall-through */
- case CMM_ACQ_REL: /* Fall-through */
- case CMM_SEQ_CST: /* Fall-through */
- case CMM_SEQ_CST_FENCE:
- break;
- default:
- abort();
- }
-}
-
-static inline void _cmm_compat_c11_smp_mb__before_uatomic_add_return_mo(enum cmm_memorder mo)
-{
- /* NOP. uatomic_add_return has explicit lock prefix. */
- switch (mo) {
- case CMM_RELAXED: /* Fall-through */
- case CMM_ACQUIRE: /* Fall-through */
- case CMM_CONSUME: /* Fall-through */
- case CMM_RELEASE: /* Fall-through */
- case CMM_ACQ_REL: /* Fall-through */
- case CMM_SEQ_CST: /* Fall-through */
- case CMM_SEQ_CST_FENCE:
- break;
- default:
- abort();
- }
-}
-
-static inline void _cmm_compat_c11_smp_mb__after_uatomic_add_return_mo(enum cmm_memorder mo)
-{
- /* NOP. uatomic_add_return has explicit lock prefix. */
- switch (mo) {
- case CMM_RELAXED: /* Fall-through */
- case CMM_ACQUIRE: /* Fall-through */
- case CMM_CONSUME: /* Fall-through */
- case CMM_RELEASE: /* Fall-through */
- case CMM_ACQ_REL: /* Fall-through */
- case CMM_SEQ_CST: /* Fall-through */
- case CMM_SEQ_CST_FENCE:
- break;
- default:
- abort();
- }
-}
-
-static inline void _cmm_compat_c11_smp_mb__before_uatomic_sub_return_mo(enum cmm_memorder mo)
-{
- /* NOP. uatomic_sub_return has explicit lock prefix. */
- switch (mo) {
- case CMM_RELAXED: /* Fall-through */
- case CMM_ACQUIRE: /* Fall-through */
- case CMM_CONSUME: /* Fall-through */
- case CMM_RELEASE: /* Fall-through */
- case CMM_ACQ_REL: /* Fall-through */
- case CMM_SEQ_CST: /* Fall-through */
- case CMM_SEQ_CST_FENCE:
- break;
- default:
- abort();
- }
-}
-
-static inline void _cmm_compat_c11_smp_mb__after_uatomic_sub_return_mo(enum cmm_memorder mo)
-{
- /* NOP. uatomic_sub_return has explicit lock prefix. */
- switch (mo) {
- case CMM_RELAXED: /* Fall-through */
- case CMM_ACQUIRE: /* Fall-through */
- case CMM_CONSUME: /* Fall-through */
- case CMM_RELEASE: /* Fall-through */
- case CMM_ACQ_REL: /* Fall-through */
- case CMM_SEQ_CST: /* Fall-through */
- case CMM_SEQ_CST_FENCE:
- break;
- default:
- abort();
- }
-}
-
-#define _cmm_compat_c11_smp_mb__before_mo(operation, mo) \
- do { \
- _cmm_compat_c11_smp_mb__before_ ## operation ## _mo (mo); \
+#define _cmm_compat_c11_smp_mb__before_mo(operation, mo) \
+ do { \
+ _cmm_compat_c11_smp_mb__before_ ## operation ## _mo (mo); \
} while (0)
#define _cmm_compat_c11_smp_mb__after_mo(operation, mo) \
- do { \
- _cmm_compat_c11_smp_mb__after_ ## operation ## _mo (mo); \
+ do { \
+ _cmm_compat_c11_smp_mb__after_ ## operation ## _mo (mo); \
} while (0)