From: Olivier Dion Date: Mon, 21 Oct 2024 17:18:08 +0000 (-0400) Subject: Seperate uatomic and uatomic_mo X-Git-Url: https://git.lttng.org./?a=commitdiff_plain;h=438b2d8886b85dfd9ba2dd496479fdcc4f5ecac5;p=urcu.git Seperate uatomic and uatomic_mo The API for uatomic is now defined under `urcu/uatomic/api.h', which is included by `urcu/uatomic.h'. All definitions are macros that dispatch to their `_mo' counterpart. The default argument is set for the memory order for backward compatibility. This means that only the `uatomic_*_mo' must be implemented either generically or by architecture. This also remove the C11 compatibility layer in x86. Indeed, since RMW operations are always guaranteed to have a full fence, then it is safe to ignore the memory ordering. This is because all used sync operations are documented as been "considered as a full barrier". See https://gcc.gnu.org/onlinedocs/gcc/_005f_005fsync-Builtins.html. Change-Id: I6be8c45b1758f268e7406bb17ab0086f9e9f5d4e Signed-off-by: Olivier Dion Signed-off-by: Mathieu Desnoyers --- diff --git a/include/Makefile.am b/include/Makefile.am index 4c32a4c..9bc93b4 100644 --- a/include/Makefile.am +++ b/include/Makefile.am @@ -64,6 +64,7 @@ nobase_include_HEADERS = \ urcu/tls-compat.h \ urcu/uatomic/aarch64.h \ urcu/uatomic/alpha.h \ + urcu/uatomic/api.h \ urcu/uatomic_arch.h \ urcu/uatomic/arm.h \ urcu/uatomic/builtins.h \ diff --git a/include/urcu/uatomic.h b/include/urcu/uatomic.h index 561c829..69154d3 100644 --- a/include/urcu/uatomic.h +++ b/include/urcu/uatomic.h @@ -63,6 +63,8 @@ static inline int cmm_to_c11(int mo) return mo; } +#include + #if defined(CONFIG_RCU_USE_ATOMIC_BUILTINS) #include #elif defined(URCU_ARCH_X86) diff --git a/include/urcu/uatomic/api.h b/include/urcu/uatomic/api.h new file mode 100644 index 0000000..4b92464 --- /dev/null +++ b/include/urcu/uatomic/api.h @@ -0,0 +1,71 @@ +#ifndef _URCU_UATOMIC_API_H +#define _URCU_UATOMIC_API_H + +/* + * Select second argument. Use inside macros to implement optional last macro + * argument, such as: + * + * #define macro(_a, _b, _c, _optional...) \ + * _uatomic_select_arg1(_, ##_optional, do_default_macro()) + */ +#define _uatomic_select_arg1(arg0, arg1, ...) arg1 + +/* + * Like _uatomic_select_arg2(), but can be used for selecting a second optional + * argument. + */ +#define _uatomic_select_arg2(arg0, arg1, arg2, ...) arg2 + +#define _uatomic_default_mo(dflt, mo...) \ + _uatomic_select_arg1(_, ##mo, dflt) + +#define _uatomic_default_mo2(dflt, mo...) \ + _uatomic_select_arg2(_, ##mo, dflt, dflt) + +#define uatomic_load(addr, mo...) \ + uatomic_load_mo(addr, _uatomic_default_mo(CMM_RELAXED, ##mo)) + +#define uatomic_read(addr, mo...) \ + uatomic_load_mo(addr, _uatomic_default_mo(CMM_RELAXED, ##mo)) + +#define uatomic_store(addr, value, mo...) \ + uatomic_store_mo(addr, value, _uatomic_default_mo(CMM_RELAXED, ##mo)) + +#define uatomic_set(addr, value, mo...) \ + uatomic_store_mo(addr, value, _uatomic_default_mo(CMM_RELAXED, ##mo)) + +#define uatomic_add_return(addr, v, mo...) \ + uatomic_add_return_mo(addr, v, _uatomic_default_mo(CMM_SEQ_CST_FENCE, ##mo)) + +#define uatomic_sub_return(addr, v, mo...) \ + uatomic_sub_return_mo(addr, v, _uatomic_default_mo(CMM_SEQ_CST_FENCE, ##mo)) + +#define uatomic_and(addr, mask, mo...) \ + uatomic_and_mo(addr, mask, _uatomic_default_mo(CMM_SEQ_CST, ##mo)) + +#define uatomic_or(addr, mask, mo...) \ + uatomic_or_mo(addr, mask, _uatomic_default_mo(CMM_RELAXED, ##mo)) + +#define uatomic_add(addr, v, mo...) \ + uatomic_add_mo(addr, v, _uatomic_default_mo(CMM_RELAXED, ##mo)) + +#define uatomic_sub(addr, v, mo...) \ + uatomic_sub_mo(addr, v, _uatomic_default_mo(CMM_RELAXED, ##mo)) + +#define uatomic_inc(addr, mo...) \ + uatomic_inc_mo(addr, _uatomic_default_mo(CMM_RELAXED, ##mo)) + +#define uatomic_dec(addr, mo...) \ + uatomic_dec_mo(addr, _uatomic_default_mo(CMM_RELAXED, ##mo)) + +#define uatomic_xchg(addr, value, mo...) \ + uatomic_xchg_mo(addr, value, \ + _uatomic_default_mo(CMM_SEQ_CST_FENCE, ##mo)) + +#define uatomic_cmpxchg(addr, value, _new, mo...) \ + uatomic_cmpxchg_mo(addr, value, _new, \ + _uatomic_default_mo(CMM_SEQ_CST_FENCE, ##mo), \ + _uatomic_default_mo2(CMM_RELAXED, ##mo)) + + +#endif /* _URUC_UATOMIC_API_H */ diff --git a/include/urcu/uatomic/arm.h b/include/urcu/uatomic/arm.h index 5124a71..6923371 100644 --- a/include/urcu/uatomic/arm.h +++ b/include/urcu/uatomic/arm.h @@ -25,6 +25,23 @@ extern "C" { #endif /* xchg */ +static inline void _cmm_compat_c11_smp_mb__before_xchg_mo(enum cmm_memorder mo) +{ + switch (mo) { + case CMM_SEQ_CST_FENCE: + case CMM_SEQ_CST: + case CMM_ACQ_REL: + case CMM_RELEASE: + cmm_smp_mb(); + break; + case CMM_ACQUIRE: + case CMM_CONSUME: + case CMM_RELAXED: + break; + default: + abort(); + } +} /* * Based on [1], __sync_lock_test_and_set() is not a full barrier, but @@ -34,10 +51,10 @@ extern "C" { * * [1] https://gcc.gnu.org/onlinedocs/gcc-4.1.0/gcc/Atomic-Builtins.html */ -#define uatomic_xchg(addr, v) \ - ({ \ - cmm_smp_mb(); \ - __sync_lock_test_and_set(addr, v); \ +#define uatomic_xchg_mo(addr, v, mo) \ + ({ \ + _cmm_compat_c11_smp_mb__before_xchg_mo(mo); \ + __sync_lock_test_and_set(addr, v); \ }) #ifdef __cplusplus diff --git a/include/urcu/uatomic/builtins-generic.h b/include/urcu/uatomic/builtins-generic.h index 641ac53..ca11cf1 100644 --- a/include/urcu/uatomic/builtins-generic.h +++ b/include/urcu/uatomic/builtins-generic.h @@ -12,19 +12,14 @@ #include #include -#define uatomic_store(addr, v, mo) \ +#define uatomic_store_mo(addr, v, mo) \ do { \ __atomic_store_n(cmm_cast_volatile(addr), v, \ cmm_to_c11(mo)); \ cmm_seq_cst_fence_after_atomic(mo); \ } while (0) -#define uatomic_set(addr, v) \ - do { \ - uatomic_store(addr, v, CMM_RELAXED); \ - } while (0) - -#define uatomic_load(addr, mo) \ +#define uatomic_load_mo(addr, mo) \ __extension__ \ ({ \ __typeof__(*(addr)) _value = \ @@ -35,9 +30,6 @@ _value; \ }) -#define uatomic_read(addr) \ - uatomic_load(addr, CMM_RELAXED) - #define uatomic_cmpxchg_mo(addr, old, new, mos, mof) \ __extension__ \ ({ \ @@ -54,9 +46,6 @@ _old; \ }) -#define uatomic_cmpxchg(addr, old, new) \ - uatomic_cmpxchg_mo(addr, old, new, CMM_SEQ_CST_FENCE, CMM_RELAXED) - #define uatomic_xchg_mo(addr, v, mo) \ __extension__ \ ({ \ @@ -67,9 +56,6 @@ _old; \ }) -#define uatomic_xchg(addr, v) \ - uatomic_xchg_mo(addr, v, CMM_SEQ_CST_FENCE) - #define uatomic_add_return_mo(addr, v, mo) \ __extension__ \ ({ \ @@ -80,8 +66,6 @@ _old; \ }) -#define uatomic_add_return(addr, v) \ - uatomic_add_return_mo(addr, v, CMM_SEQ_CST_FENCE) #define uatomic_sub_return_mo(addr, v, mo) \ __extension__ \ @@ -93,8 +77,6 @@ _old; \ }) -#define uatomic_sub_return(addr, v) \ - uatomic_sub_return_mo(addr, v, CMM_SEQ_CST_FENCE) #define uatomic_and_mo(addr, mask, mo) \ do { \ @@ -103,8 +85,6 @@ cmm_seq_cst_fence_after_atomic(mo); \ } while (0) -#define uatomic_and(addr, mask) \ - uatomic_and_mo(addr, mask, CMM_SEQ_CST) #define uatomic_or_mo(addr, mask, mo) \ do { \ @@ -114,33 +94,18 @@ } while (0) -#define uatomic_or(addr, mask) \ - uatomic_or_mo(addr, mask, CMM_RELAXED) - #define uatomic_add_mo(addr, v, mo) \ (void) uatomic_add_return_mo(addr, v, mo) -#define uatomic_add(addr, v) \ - uatomic_add_mo(addr, v, CMM_RELAXED) - #define uatomic_sub_mo(addr, v, mo) \ (void) uatomic_sub_return_mo(addr, v, mo) -#define uatomic_sub(addr, v) \ - uatomic_sub_mo(addr, v, CMM_RELAXED) - #define uatomic_inc_mo(addr, mo) \ uatomic_add_mo(addr, 1, mo) -#define uatomic_inc(addr) \ - uatomic_inc_mo(addr, CMM_RELAXED) - #define uatomic_dec_mo(addr, mo) \ uatomic_sub_mo(addr, 1, mo) -#define uatomic_dec(addr) \ - uatomic_dec_mo(addr, CMM_RELAXED) - #define cmm_smp_mb__before_uatomic_and() cmm_smp_mb() #define cmm_smp_mb__after_uatomic_and() cmm_smp_mb() diff --git a/include/urcu/uatomic/generic.h b/include/urcu/uatomic/generic.h index ed655bb..24a025f 100644 --- a/include/urcu/uatomic/generic.h +++ b/include/urcu/uatomic/generic.h @@ -22,18 +22,33 @@ extern "C" { #endif -#ifndef uatomic_set -#define uatomic_set(addr, v) ((void) CMM_STORE_SHARED(*(addr), (v))) -#endif - /* * Can be defined for the architecture. * * What needs to be emitted _before_ the `operation' with memory ordering `mo'. */ #ifndef _cmm_compat_c11_smp_mb__before_mo -# define _cmm_compat_c11_smp_mb__before_mo(operation, mo) cmm_smp_mb() -#endif +# define _cmm_compat_c11_smp_mb__before_mo(operation, mo) \ + do { \ + switch (mo) { \ + case CMM_SEQ_CST_FENCE: \ + case CMM_SEQ_CST: \ + case CMM_ACQ_REL: \ + case CMM_RELEASE: \ + cmm_smp_mb(); \ + break; \ + case CMM_ACQUIRE: \ + case CMM_CONSUME: \ + case CMM_RELAXED: \ + break; \ + default: \ + abort(); \ + break; \ + \ + } \ + } while(0) + +#endif /* _cmm_compat_c11_smp_mb__before_mo */ /* * Can be defined for the architecture. @@ -41,83 +56,54 @@ extern "C" { * What needs to be emitted _after_ the `operation' with memory ordering `mo'. */ #ifndef _cmm_compat_c11_smp_mb__after_mo -# define _cmm_compat_c11_smp_mb__after_mo(operation, mo) cmm_smp_mb() -#endif - -#define uatomic_load_store_return_op(op, addr, v, mo) \ - __extension__ \ - ({ \ - _cmm_compat_c11_smp_mb__before_mo(op, mo); \ - __typeof__((*addr)) _value = op(addr, v); \ - _cmm_compat_c11_smp_mb__after_mo(op, mo); \ +# define _cmm_compat_c11_smp_mb__after_mo(operation, mo) \ + do { \ + switch (mo) { \ + case CMM_SEQ_CST_FENCE: \ + case CMM_SEQ_CST: \ + case CMM_ACQUIRE: \ + case CMM_CONSUME: \ + case CMM_ACQ_REL: \ + cmm_smp_mb(); \ + break; \ + case CMM_RELEASE: \ + case CMM_RELAXED: \ + break; \ + default: \ + abort(); \ + break; \ \ - _value; \ - }) + } \ + } while(0) +#endif /* _cmm_compat_c11_smp_mb__after_mo */ -#define uatomic_load_store_op(op, addr, v, mo) \ do { \ _cmm_compat_c11_smp_mb__before_mo(op, mo); \ op(addr, v); \ _cmm_compat_c11_smp_mb__after_mo(op, mo); \ } while (0) -#define uatomic_store(addr, v, mo) \ +#define uatomic_store_mo(addr, v, mo) \ do { \ _cmm_compat_c11_smp_mb__before_mo(uatomic_set, mo); \ uatomic_set(addr, v); \ _cmm_compat_c11_smp_mb__after_mo(uatomic_set, mo); \ } while (0) -#define uatomic_and_mo(addr, v, mo) \ - uatomic_load_store_op(uatomic_and, addr, v, mo) - -#define uatomic_or_mo(addr, v, mo) \ - uatomic_load_store_op(uatomic_or, addr, v, mo) - -#define uatomic_add_mo(addr, v, mo) \ - uatomic_load_store_op(uatomic_add, addr, v, mo) - -#define uatomic_sub_mo(addr, v, mo) \ - uatomic_load_store_op(uatomic_sub, addr, v, mo) - -#define uatomic_inc_mo(addr, mo) \ - uatomic_load_store_op(uatomic_add, addr, 1, mo) - -#define uatomic_dec_mo(addr, mo) \ - uatomic_load_store_op(uatomic_add, addr, -1, mo) /* - * NOTE: We can not just do switch (_value == (old) ? mos : mof) otherwise the - * compiler emit a -Wduplicated-cond warning. */ -#define uatomic_cmpxchg_mo(addr, old, new, mos, mof) \ __extension__ \ ({ \ - _cmm_compat_c11_smp_mb__before_mo(uatomic_cmpxchg, mos); \ - __typeof__(*(addr)) _value = uatomic_cmpxchg(addr, old, \ - new); \ + __typeof__(*(addr)) _value = \ + __atomic_load_n(cmm_cast_volatile(addr), \ + cmm_to_c11(mo)); \ + cmm_seq_cst_fence_after_atomic(mo); \ \ - if (_value == (old)) { \ - _cmm_compat_c11_smp_mb__after_mo(uatomic_cmpxchg, mos); \ - } else { \ - _cmm_compat_c11_smp_mb__after_mo(uatomic_cmpxchg, mof); \ - } \ _value; \ }) -#define uatomic_xchg_mo(addr, v, mo) \ - uatomic_load_store_return_op(uatomic_xchg, addr, v, mo) - -#define uatomic_add_return_mo(addr, v, mo) \ - uatomic_load_store_return_op(uatomic_add_return, addr, v) -#define uatomic_sub_return_mo(addr, v, mo) \ - uatomic_load_store_return_op(uatomic_sub_return, addr, v) - -#ifndef uatomic_read -#define uatomic_read(addr) CMM_LOAD_SHARED(*(addr)) -#endif - -#define uatomic_load(addr, mo) \ +#define uatomic_load_mo(addr, mo) \ __extension__ \ ({ \ _cmm_compat_c11_smp_mb__before_mo(uatomic_read, mo); \ @@ -146,13 +132,20 @@ void _uatomic_link_error(void) } #endif + +/* + * NOTE: All RMW operations are implemented using the `__sync' builtins. All + * builtins used are documented to be considered a "full barrier". Therefore, + * for RMW operations, nothing is emitted for any memory order. + */ + #else /* #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR */ extern void _uatomic_link_error(void); #endif /* #else #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR */ -/* cmpxchg */ +/* uatomic_cmpxchg_mo */ -#ifndef uatomic_cmpxchg +#ifndef uatomic_cmpxchg_mo static inline __attribute__((always_inline)) unsigned long _uatomic_cmpxchg(void *addr, unsigned long old, unsigned long _new, int len) @@ -181,17 +174,14 @@ unsigned long _uatomic_cmpxchg(void *addr, unsigned long old, return 0; } - -#define uatomic_cmpxchg(addr, old, _new) \ - ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), \ +#define uatomic_cmpxchg_mo(addr, old, _new, mos, mof) \ + ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), \ caa_cast_long_keep_sign(old), \ - caa_cast_long_keep_sign(_new),\ + caa_cast_long_keep_sign(_new), \ sizeof(*(addr)))) +/* uatomic_and_mo */ - -/* uatomic_and */ - -#ifndef uatomic_and +#ifndef uatomic_and_mo static inline __attribute__((always_inline)) void _uatomic_and(void *addr, unsigned long val, int len) @@ -219,7 +209,7 @@ void _uatomic_and(void *addr, unsigned long val, _uatomic_link_error(); } -#define uatomic_and(addr, v) \ +#define uatomic_and_mo(addr, v, mo) \ (_uatomic_and((addr), \ caa_cast_long_keep_sign(v), \ sizeof(*(addr)))) @@ -228,9 +218,9 @@ void _uatomic_and(void *addr, unsigned long val, #endif -/* uatomic_or */ +/* uatomic_or_mo */ -#ifndef uatomic_or +#ifndef uatomic_or_mo static inline __attribute__((always_inline)) void _uatomic_or(void *addr, unsigned long val, int len) @@ -259,7 +249,7 @@ void _uatomic_or(void *addr, unsigned long val, return; } -#define uatomic_or(addr, v) \ +#define uatomic_or_mo(addr, v, mo) \ (_uatomic_or((addr), \ caa_cast_long_keep_sign(v), \ sizeof(*(addr)))) @@ -269,9 +259,9 @@ void _uatomic_or(void *addr, unsigned long val, #endif -/* uatomic_add_return */ +/* uatomic_add_return_mo */ -#ifndef uatomic_add_return +#ifndef uatomic_add_return_mo static inline __attribute__((always_inline)) unsigned long _uatomic_add_return(void *addr, unsigned long val, int len) @@ -297,13 +287,13 @@ unsigned long _uatomic_add_return(void *addr, unsigned long val, } -#define uatomic_add_return(addr, v) \ +#define uatomic_add_return_mo(addr, v, mo) \ ((__typeof__(*(addr))) _uatomic_add_return((addr), \ caa_cast_long_keep_sign(v), \ sizeof(*(addr)))) #endif /* #ifndef uatomic_add_return */ -#ifndef uatomic_xchg +#ifndef uatomic_xchg_mo /* xchg */ static inline __attribute__((always_inline)) @@ -365,16 +355,16 @@ unsigned long _uatomic_exchange(void *addr, unsigned long val, int len) return 0; } -#define uatomic_xchg(addr, v) \ +#define uatomic_xchg_mo(addr, v, mo) \ ((__typeof__(*(addr))) _uatomic_exchange((addr), \ caa_cast_long_keep_sign(v), \ sizeof(*(addr)))) -#endif /* #ifndef uatomic_xchg */ +#endif /* #ifndef uatomic_xchg_mo */ -#else /* #ifndef uatomic_cmpxchg */ +#else /* #ifndef uatomic_cmpxchg_mo */ -#ifndef uatomic_and -/* uatomic_and */ +#ifndef uatomic_and_mo +/* uatomic_and_mo */ static inline __attribute__((always_inline)) void _uatomic_and(void *addr, unsigned long val, int len) @@ -436,17 +426,17 @@ void _uatomic_and(void *addr, unsigned long val, int len) _uatomic_link_error(); } -#define uatomic_and(addr, v) \ +#define uatomic_and_mo(addr, v, mo) \ (_uatomic_and((addr), \ caa_cast_long_keep_sign(v), \ sizeof(*(addr)))) #define cmm_smp_mb__before_uatomic_and() cmm_barrier() #define cmm_smp_mb__after_uatomic_and() cmm_barrier() -#endif /* #ifndef uatomic_and */ +#endif /* #ifndef uatomic_and_mo */ -#ifndef uatomic_or -/* uatomic_or */ +#ifndef uatomic_or_mo +/* uatomic_or_mo */ static inline __attribute__((always_inline)) void _uatomic_or(void *addr, unsigned long val, int len) @@ -510,17 +500,17 @@ void _uatomic_or(void *addr, unsigned long val, int len) _uatomic_link_error(); } -#define uatomic_or(addr, v) \ +#define uatomic_or_mo(addr, v, mo) \ (_uatomic_or((addr), \ caa_cast_long_keep_sign(v), \ sizeof(*(addr)))) #define cmm_smp_mb__before_uatomic_or() cmm_barrier() #define cmm_smp_mb__after_uatomic_or() cmm_barrier() -#endif /* #ifndef uatomic_or */ +#endif /* #ifndef uatomic_or_mo */ -#ifndef uatomic_add_return -/* uatomic_add_return */ +#ifndef uatomic_add_return_mo +/* uatomic_add_return_mo */ static inline __attribute__((always_inline)) unsigned long _uatomic_add_return(void *addr, unsigned long val, int len) @@ -589,14 +579,14 @@ unsigned long _uatomic_add_return(void *addr, unsigned long val, int len) return 0; } -#define uatomic_add_return(addr, v) \ - ((__typeof__(*(addr))) _uatomic_add_return((addr), \ +#define uatomic_add_return_mo(addr, v, mo) \ + ((__typeof__(*(addr))) _uatomic_add_return((addr), \ caa_cast_long_keep_sign(v), \ sizeof(*(addr)))) -#endif /* #ifndef uatomic_add_return */ +#endif /* #ifndef uatomic_add_return_mo */ -#ifndef uatomic_xchg -/* xchg */ +#ifndef uatomic_xchg_mo +/* uatomic_xchg_mo */ static inline __attribute__((always_inline)) unsigned long _uatomic_exchange(void *addr, unsigned long val, int len) @@ -665,37 +655,37 @@ unsigned long _uatomic_exchange(void *addr, unsigned long val, int len) return 0; } -#define uatomic_xchg(addr, v) \ - ((__typeof__(*(addr))) _uatomic_exchange((addr), \ +#define uatomic_xchg_mo(addr, v, mo) \ + ((__typeof__(*(addr))) _uatomic_exchange((addr), \ caa_cast_long_keep_sign(v), \ sizeof(*(addr)))) -#endif /* #ifndef uatomic_xchg */ +#endif /* #ifndef uatomic_xchg_mo */ -#endif /* #else #ifndef uatomic_cmpxchg */ +#endif /* #else #ifndef uatomic_cmpxchg_mo */ -/* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */ +/* uatomic_sub_return_mo, uatomic_add_mo, uatomic_sub_mo, uatomic_inc_mo, uatomic_dec_mo */ -#ifndef uatomic_add -#define uatomic_add(addr, v) (void)uatomic_add_return((addr), (v)) +#ifndef uatomic_add_mo +#define uatomic_add_mo(addr, v, mo) (void)uatomic_add_return_mo((addr), (v), mo) #define cmm_smp_mb__before_uatomic_add() cmm_barrier() #define cmm_smp_mb__after_uatomic_add() cmm_barrier() #endif -#define uatomic_sub_return(addr, v) \ - uatomic_add_return((addr), -(caa_cast_long_keep_sign(v))) -#define uatomic_sub(addr, v) \ - uatomic_add((addr), -(caa_cast_long_keep_sign(v))) +#define uatomic_sub_return_mo(addr, v, mo) \ + uatomic_add_return_mo((addr), -(caa_cast_long_keep_sign(v)), mo) +#define uatomic_sub_mo(addr, v, mo) \ + uatomic_add_mo((addr), -(caa_cast_long_keep_sign(v)), mo) #define cmm_smp_mb__before_uatomic_sub() cmm_smp_mb__before_uatomic_add() #define cmm_smp_mb__after_uatomic_sub() cmm_smp_mb__after_uatomic_add() -#ifndef uatomic_inc -#define uatomic_inc(addr) uatomic_add((addr), 1) +#ifndef uatomic_inc_mo +#define uatomic_inc_mo(addr, mo) uatomic_add_mo((addr), 1, mo) #define cmm_smp_mb__before_uatomic_inc() cmm_smp_mb__before_uatomic_add() #define cmm_smp_mb__after_uatomic_inc() cmm_smp_mb__after_uatomic_add() #endif -#ifndef uatomic_dec -#define uatomic_dec(addr) uatomic_add((addr), -1) +#ifndef uatomic_dec_mo +#define uatomic_dec_mo(addr, mo) uatomic_add((addr), -1, mo) #define cmm_smp_mb__before_uatomic_dec() cmm_smp_mb__before_uatomic_add() #define cmm_smp_mb__after_uatomic_dec() cmm_smp_mb__after_uatomic_add() #endif diff --git a/include/urcu/uatomic/ppc.h b/include/urcu/uatomic/ppc.h index dc59518..1f9a292 100644 --- a/include/urcu/uatomic/ppc.h +++ b/include/urcu/uatomic/ppc.h @@ -94,7 +94,7 @@ unsigned long _uatomic_exchange(void *addr, unsigned long val, int len) return 0; } -#define uatomic_xchg(addr, v) \ +#define uatomic_xchg_mo(addr, v, mo) \ ((__typeof__(*(addr))) _uatomic_exchange((addr), \ caa_cast_long_keep_sign(v), \ sizeof(*(addr)))) @@ -157,7 +157,7 @@ unsigned long _uatomic_cmpxchg(void *addr, unsigned long old, } -#define uatomic_cmpxchg(addr, old, _new) \ +#define uatomic_cmpxchg_mo(addr, old, _new, mos, mof) \ ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), \ caa_cast_long_keep_sign(old), \ caa_cast_long_keep_sign(_new),\ @@ -216,7 +216,7 @@ unsigned long _uatomic_add_return(void *addr, unsigned long val, } -#define uatomic_add_return(addr, v) \ +#define uatomic_add_return_mo(addr, v, mo) \ ((__typeof__(*(addr))) _uatomic_add_return((addr), \ caa_cast_long_keep_sign(v), \ sizeof(*(addr)))) diff --git a/include/urcu/uatomic/s390.h b/include/urcu/uatomic/s390.h index 2562696..40ad569 100644 --- a/include/urcu/uatomic/s390.h +++ b/include/urcu/uatomic/s390.h @@ -99,7 +99,7 @@ unsigned long _uatomic_exchange(volatile void *addr, unsigned long val, int len) return 0; } -#define uatomic_xchg(addr, v) \ +#define uatomic_xchg_mo(addr, v, mo) \ (__typeof__(*(addr))) _uatomic_exchange((addr), \ caa_cast_long_keep_sign(v), \ sizeof(*(addr))) @@ -140,7 +140,7 @@ unsigned long _uatomic_cmpxchg(void *addr, unsigned long old, return 0; } -#define uatomic_cmpxchg(addr, old, _new) \ +#define uatomic_cmpxchg_mo(addr, old, _new, mos, mof) \ (__typeof__(*(addr))) _uatomic_cmpxchg((addr), \ caa_cast_long_keep_sign(old), \ caa_cast_long_keep_sign(_new),\ diff --git a/include/urcu/uatomic/sparc64.h b/include/urcu/uatomic/sparc64.h index 97ecb46..c6baeac 100644 --- a/include/urcu/uatomic/sparc64.h +++ b/include/urcu/uatomic/sparc64.h @@ -59,7 +59,7 @@ unsigned long _uatomic_cmpxchg(void *addr, unsigned long old, } -#define uatomic_cmpxchg(addr, old, _new) \ +#define uatomic_cmpxchg_mo(addr, old, _new, mos, mof) \ ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), \ caa_cast_long_keep_sign(old), \ caa_cast_long_keep_sign(_new), \ diff --git a/include/urcu/uatomic/x86.h b/include/urcu/uatomic/x86.h index 616eee9..5afb447 100644 --- a/include/urcu/uatomic/x86.h +++ b/include/urcu/uatomic/x86.h @@ -47,8 +47,6 @@ typedef struct { char v[8]; } __hp_8; #define __hp(size, x) ((__hp_##size *)(x)) -#define _uatomic_set(addr, v) ((void) CMM_STORE_SHARED(*(addr), (v))) - /* cmpxchg */ static inline __attribute__((always_inline)) @@ -598,41 +596,45 @@ extern unsigned long _compat_uatomic_add_return(void *addr, #define UATOMIC_COMPAT(insn) (_uatomic_##insn) #endif -/* Read is atomic even in compat mode */ -#define uatomic_set(addr, v) \ - UATOMIC_COMPAT(set(addr, v)) +/* + * All RMW operations have an implicit lock prefix. Thus, ignoring memory + * ordering for these operations, since they can all be respected by not + * emitting any memory barrier. + */ -#define uatomic_cmpxchg(addr, old, _new) \ +#define uatomic_cmpxchg_mo(addr, old, _new, mos, mof) \ UATOMIC_COMPAT(cmpxchg(addr, old, _new)) -#define uatomic_xchg(addr, v) \ + +#define uatomic_xchg_mo(addr, v, mo) \ UATOMIC_COMPAT(xchg(addr, v)) -#define uatomic_and(addr, v) \ +#define uatomic_and_mo(addr, v, mo) \ UATOMIC_COMPAT(and(addr, v)) #define cmm_smp_mb__before_uatomic_and() cmm_barrier() #define cmm_smp_mb__after_uatomic_and() cmm_barrier() -#define uatomic_or(addr, v) \ +#define uatomic_or_mo(addr, v, mo) \ UATOMIC_COMPAT(or(addr, v)) #define cmm_smp_mb__before_uatomic_or() cmm_barrier() #define cmm_smp_mb__after_uatomic_or() cmm_barrier() -#define uatomic_add_return(addr, v) \ +#define uatomic_add_return_mo(addr, v, mo) \ UATOMIC_COMPAT(add_return(addr, v)) -#define uatomic_add(addr, v) UATOMIC_COMPAT(add(addr, v)) +#define uatomic_add_mo(addr, v, mo) UATOMIC_COMPAT(add(addr, v)) #define cmm_smp_mb__before_uatomic_add() cmm_barrier() #define cmm_smp_mb__after_uatomic_add() cmm_barrier() -#define uatomic_inc(addr) UATOMIC_COMPAT(inc(addr)) +#define uatomic_inc_mo(addr, mo) UATOMIC_COMPAT(inc(addr)) #define cmm_smp_mb__before_uatomic_inc() cmm_barrier() #define cmm_smp_mb__after_uatomic_inc() cmm_barrier() -#define uatomic_dec(addr) UATOMIC_COMPAT(dec(addr)) +#define uatomic_dec_mo(addr, mo) UATOMIC_COMPAT(dec(addr)) #define cmm_smp_mb__before_uatomic_dec() cmm_barrier() #define cmm_smp_mb__after_uatomic_dec() cmm_barrier() -static inline void _cmm_compat_c11_smp_mb__before_uatomic_read_mo(enum cmm_memorder mo) + +static inline void _cmm_compat_c11_smp_mb__before_uatomic_load_mo(enum cmm_memorder mo) { /* * A SMP barrier is not necessary for CMM_SEQ_CST because, only a @@ -660,7 +662,7 @@ static inline void _cmm_compat_c11_smp_mb__before_uatomic_read_mo(enum cmm_memor } } -static inline void _cmm_compat_c11_smp_mb__after_uatomic_read_mo(enum cmm_memorder mo) +static inline void _cmm_compat_c11_smp_mb__after_uatomic_load_mo(enum cmm_memorder mo) { /* * A SMP barrier is not necessary for CMM_SEQ_CST because following @@ -749,354 +751,14 @@ static inline void _cmm_compat_c11_smp_mb__after_uatomic_set_mo(enum cmm_memorde } } -static inline void _cmm_compat_c11_smp_mb__before_uatomic_xchg_mo(enum cmm_memorder mo) -{ - /* NOP. uatomic_xchg has implicit lock prefix. */ - switch (mo) { - case CMM_RELAXED: /* Fall-through */ - case CMM_ACQUIRE: /* Fall-through */ - case CMM_CONSUME: /* Fall-through */ - case CMM_RELEASE: /* Fall-through */ - case CMM_ACQ_REL: /* Fall-through */ - case CMM_SEQ_CST: /* Fall-through */ - case CMM_SEQ_CST_FENCE: - break; - default: - abort(); - } -} - -static inline void _cmm_compat_c11_smp_mb__after_uatomic_xchg_mo(enum cmm_memorder mo) -{ - /* NOP. uatomic_xchg has implicit lock prefix. */ - switch (mo) { - case CMM_RELAXED: /* Fall-through */ - case CMM_ACQUIRE: /* Fall-through */ - case CMM_CONSUME: /* Fall-through */ - case CMM_RELEASE: /* Fall-through */ - case CMM_ACQ_REL: /* Fall-through */ - case CMM_SEQ_CST: /* Fall-through */ - case CMM_SEQ_CST_FENCE: - break; - default: - abort(); - } -} - -static inline void _cmm_compat_c11_smp_mb__before_uatomic_cmpxchg_mo(enum cmm_memorder mo) -{ - /* NOP. uatomic_cmpxchg has implicit lock prefix. */ - switch (mo) { - case CMM_RELAXED: /* Fall-through */ - case CMM_ACQUIRE: /* Fall-through */ - case CMM_CONSUME: /* Fall-through */ - case CMM_RELEASE: /* Fall-through */ - case CMM_ACQ_REL: /* Fall-through */ - case CMM_SEQ_CST: /* Fall-through */ - case CMM_SEQ_CST_FENCE: - break; - default: - abort(); - } -} - -static inline void _cmm_compat_c11_smp_mb__after_uatomic_cmpxchg_mo(enum cmm_memorder mo) -{ - /* NOP. uatomic_cmpxchg has implicit lock prefix. */ - switch (mo) { - case CMM_RELAXED: /* Fall-through */ - case CMM_ACQUIRE: /* Fall-through */ - case CMM_CONSUME: /* Fall-through */ - case CMM_RELEASE: /* Fall-through */ - case CMM_ACQ_REL: /* Fall-through */ - case CMM_SEQ_CST: /* Fall-through */ - case CMM_SEQ_CST_FENCE: - break; - default: - abort(); - } -} - -static inline void _cmm_compat_c11_smp_mb__before_uatomic_and_mo(enum cmm_memorder mo) -{ - /* NOP. uatomic_and has explicit lock prefix. */ - switch (mo) { - case CMM_RELAXED: /* Fall-through */ - case CMM_ACQUIRE: /* Fall-through */ - case CMM_CONSUME: /* Fall-through */ - case CMM_RELEASE: /* Fall-through */ - case CMM_ACQ_REL: /* Fall-through */ - case CMM_SEQ_CST: /* Fall-through */ - case CMM_SEQ_CST_FENCE: - break; - default: - abort(); - } -} - -static inline void _cmm_compat_c11_smp_mb__after_uatomic_and_mo(enum cmm_memorder mo) -{ - /* NOP. uatomic_and has explicit lock prefix. */ - switch (mo) { - case CMM_RELAXED: /* Fall-through */ - case CMM_ACQUIRE: /* Fall-through */ - case CMM_CONSUME: /* Fall-through */ - case CMM_RELEASE: /* Fall-through */ - case CMM_ACQ_REL: /* Fall-through */ - case CMM_SEQ_CST: /* Fall-through */ - case CMM_SEQ_CST_FENCE: - break; - default: - abort(); - } -} - -static inline void _cmm_compat_c11_smp_mb__before_uatomic_or_mo(enum cmm_memorder mo) -{ - /* NOP. uatomic_or has explicit lock prefix. */ - switch (mo) { - case CMM_RELAXED: /* Fall-through */ - case CMM_ACQUIRE: /* Fall-through */ - case CMM_CONSUME: /* Fall-through */ - case CMM_RELEASE: /* Fall-through */ - case CMM_ACQ_REL: /* Fall-through */ - case CMM_SEQ_CST: /* Fall-through */ - case CMM_SEQ_CST_FENCE: - break; - default: - abort(); - } -} - -static inline void _cmm_compat_c11_smp_mb__after_uatomic_or_mo(enum cmm_memorder mo) -{ - /* NOP. uatomic_or has explicit lock prefix. */ - switch (mo) { - case CMM_RELAXED: /* Fall-through */ - case CMM_ACQUIRE: /* Fall-through */ - case CMM_CONSUME: /* Fall-through */ - case CMM_RELEASE: /* Fall-through */ - case CMM_ACQ_REL: /* Fall-through */ - case CMM_SEQ_CST: /* Fall-through */ - case CMM_SEQ_CST_FENCE: - break; - default: - abort(); - } -} - -static inline void _cmm_compat_c11_smp_mb__before_uatomic_add_mo(enum cmm_memorder mo) -{ - /* NOP. uatomic_add has explicit lock prefix. */ - switch (mo) { - case CMM_RELAXED: /* Fall-through */ - case CMM_ACQUIRE: /* Fall-through */ - case CMM_CONSUME: /* Fall-through */ - case CMM_RELEASE: /* Fall-through */ - case CMM_ACQ_REL: /* Fall-through */ - case CMM_SEQ_CST: /* Fall-through */ - case CMM_SEQ_CST_FENCE: - break; - default: - abort(); - } -} - -static inline void _cmm_compat_c11_smp_mb__after_uatomic_add_mo(enum cmm_memorder mo) -{ - /* NOP. uatomic_add has explicit lock prefix. */ - switch (mo) { - case CMM_RELAXED: /* Fall-through */ - case CMM_ACQUIRE: /* Fall-through */ - case CMM_CONSUME: /* Fall-through */ - case CMM_RELEASE: /* Fall-through */ - case CMM_ACQ_REL: /* Fall-through */ - case CMM_SEQ_CST: /* Fall-through */ - case CMM_SEQ_CST_FENCE: - break; - default: - abort(); - } -} - -static inline void _cmm_compat_c11_smp_mb__before_uatomic_sub_mo(enum cmm_memorder mo) -{ - /* NOP. uatomic_sub has explicit lock prefix. */ - switch (mo) { - case CMM_RELAXED: /* Fall-through */ - case CMM_ACQUIRE: /* Fall-through */ - case CMM_CONSUME: /* Fall-through */ - case CMM_RELEASE: /* Fall-through */ - case CMM_ACQ_REL: /* Fall-through */ - case CMM_SEQ_CST: /* Fall-through */ - case CMM_SEQ_CST_FENCE: - break; - default: - abort(); - } -} - -static inline void _cmm_compat_c11_smp_mb__after_uatomic_sub_mo(enum cmm_memorder mo) -{ - /* NOP. uatomic_sub has explicit lock prefix. */ - switch (mo) { - case CMM_RELAXED: /* Fall-through */ - case CMM_ACQUIRE: /* Fall-through */ - case CMM_CONSUME: /* Fall-through */ - case CMM_RELEASE: /* Fall-through */ - case CMM_ACQ_REL: /* Fall-through */ - case CMM_SEQ_CST: /* Fall-through */ - case CMM_SEQ_CST_FENCE: - break; - default: - abort(); - } -} - -static inline void _cmm_compat_c11_smp_mb__before_uatomic_inc_mo(enum cmm_memorder mo) -{ - /* NOP. uatomic_inc has explicit lock prefix. */ - switch (mo) { - case CMM_RELAXED: /* Fall-through */ - case CMM_ACQUIRE: /* Fall-through */ - case CMM_CONSUME: /* Fall-through */ - case CMM_RELEASE: /* Fall-through */ - case CMM_ACQ_REL: /* Fall-through */ - case CMM_SEQ_CST: /* Fall-through */ - case CMM_SEQ_CST_FENCE: - break; - default: - abort(); - } -} - -static inline void _cmm_compat_c11_smp_mb__after_uatomic_inc_mo(enum cmm_memorder mo) -{ - /* NOP. uatomic_inc has explicit lock prefix. */ - switch (mo) { - case CMM_RELAXED: /* Fall-through */ - case CMM_ACQUIRE: /* Fall-through */ - case CMM_CONSUME: /* Fall-through */ - case CMM_RELEASE: /* Fall-through */ - case CMM_ACQ_REL: /* Fall-through */ - case CMM_SEQ_CST: /* Fall-through */ - case CMM_SEQ_CST_FENCE: - break; - default: - abort(); - } -} - -static inline void _cmm_compat_c11_smp_mb__before_uatomic_dec_mo(enum cmm_memorder mo) -{ - /* NOP. uatomic_dec has explicit lock prefix. */ - switch (mo) { - case CMM_RELAXED: /* Fall-through */ - case CMM_ACQUIRE: /* Fall-through */ - case CMM_CONSUME: /* Fall-through */ - case CMM_RELEASE: /* Fall-through */ - case CMM_ACQ_REL: /* Fall-through */ - case CMM_SEQ_CST: /* Fall-through */ - case CMM_SEQ_CST_FENCE: - break; - default: - abort(); - } -} - -static inline void _cmm_compat_c11_smp_mb__after_uatomic_dec_mo(enum cmm_memorder mo) -{ - /* NOP. uatomic_dec has explicit lock prefix. */ - switch (mo) { - case CMM_RELAXED: /* Fall-through */ - case CMM_ACQUIRE: /* Fall-through */ - case CMM_CONSUME: /* Fall-through */ - case CMM_RELEASE: /* Fall-through */ - case CMM_ACQ_REL: /* Fall-through */ - case CMM_SEQ_CST: /* Fall-through */ - case CMM_SEQ_CST_FENCE: - break; - default: - abort(); - } -} - -static inline void _cmm_compat_c11_smp_mb__before_uatomic_add_return_mo(enum cmm_memorder mo) -{ - /* NOP. uatomic_add_return has explicit lock prefix. */ - switch (mo) { - case CMM_RELAXED: /* Fall-through */ - case CMM_ACQUIRE: /* Fall-through */ - case CMM_CONSUME: /* Fall-through */ - case CMM_RELEASE: /* Fall-through */ - case CMM_ACQ_REL: /* Fall-through */ - case CMM_SEQ_CST: /* Fall-through */ - case CMM_SEQ_CST_FENCE: - break; - default: - abort(); - } -} - -static inline void _cmm_compat_c11_smp_mb__after_uatomic_add_return_mo(enum cmm_memorder mo) -{ - /* NOP. uatomic_add_return has explicit lock prefix. */ - switch (mo) { - case CMM_RELAXED: /* Fall-through */ - case CMM_ACQUIRE: /* Fall-through */ - case CMM_CONSUME: /* Fall-through */ - case CMM_RELEASE: /* Fall-through */ - case CMM_ACQ_REL: /* Fall-through */ - case CMM_SEQ_CST: /* Fall-through */ - case CMM_SEQ_CST_FENCE: - break; - default: - abort(); - } -} - -static inline void _cmm_compat_c11_smp_mb__before_uatomic_sub_return_mo(enum cmm_memorder mo) -{ - /* NOP. uatomic_sub_return has explicit lock prefix. */ - switch (mo) { - case CMM_RELAXED: /* Fall-through */ - case CMM_ACQUIRE: /* Fall-through */ - case CMM_CONSUME: /* Fall-through */ - case CMM_RELEASE: /* Fall-through */ - case CMM_ACQ_REL: /* Fall-through */ - case CMM_SEQ_CST: /* Fall-through */ - case CMM_SEQ_CST_FENCE: - break; - default: - abort(); - } -} - -static inline void _cmm_compat_c11_smp_mb__after_uatomic_sub_return_mo(enum cmm_memorder mo) -{ - /* NOP. uatomic_sub_return has explicit lock prefix. */ - switch (mo) { - case CMM_RELAXED: /* Fall-through */ - case CMM_ACQUIRE: /* Fall-through */ - case CMM_CONSUME: /* Fall-through */ - case CMM_RELEASE: /* Fall-through */ - case CMM_ACQ_REL: /* Fall-through */ - case CMM_SEQ_CST: /* Fall-through */ - case CMM_SEQ_CST_FENCE: - break; - default: - abort(); - } -} - -#define _cmm_compat_c11_smp_mb__before_mo(operation, mo) \ - do { \ - _cmm_compat_c11_smp_mb__before_ ## operation ## _mo (mo); \ +#define _cmm_compat_c11_smp_mb__before_mo(operation, mo) \ + do { \ + _cmm_compat_c11_smp_mb__before_ ## operation ## _mo (mo); \ } while (0) #define _cmm_compat_c11_smp_mb__after_mo(operation, mo) \ - do { \ - _cmm_compat_c11_smp_mb__after_ ## operation ## _mo (mo); \ + do { \ + _cmm_compat_c11_smp_mb__after_ ## operation ## _mo (mo); \ } while (0)