From ca62305c8956ae7c953c5f99be356a813c60265a Mon Sep 17 00:00:00 2001 From: Olivier Dion Date: Mon, 2 Dec 2024 10:22:01 -0500 Subject: [PATCH] arm: Use atomic builtins for xchg if supported If the toolchain supports the C11 memory model, then implement `uatomic_xchg_mo' with `__atomic_exchange_n' instead of `__sync_lock_test_and_set'. This reduces the number of memory barriers except for the default memory order FULL_FENCE. Change-Id: I2261f93134071e37e152a23bb78b21332844429b Signed-off-by: Olivier Dion Signed-off-by: Mathieu Desnoyers --- include/urcu/uatomic/arm.h | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/include/urcu/uatomic/arm.h b/include/urcu/uatomic/arm.h index 6923371..d2ceb71 100644 --- a/include/urcu/uatomic/arm.h +++ b/include/urcu/uatomic/arm.h @@ -25,6 +25,25 @@ extern "C" { #endif /* xchg */ + +/* + * If the toolchain supports the C11 memory model, then it is safe to implement + * `uatomic_xchg()' in term of __atomic builtins. This has the effect of + * reducing the number of emitted memory barriers except for the + * CMM_SEQ_CST_FENCE memory order. + */ +#ifdef _CMM_TOOLCHAIN_SUPPORT_C11_MM +# define uatomic_xchg_mo(addr, v, mo) \ + __extension__ \ + ({ \ + __typeof__((*addr)) _old = \ + __atomic_exchange_n(cmm_cast_volatile(addr), v, \ + cmm_to_c11(mo)); \ + cmm_seq_cst_fence_after_atomic(mo); \ + _old; \ + }) +#else + static inline void _cmm_compat_c11_smp_mb__before_xchg_mo(enum cmm_memorder mo) { switch (mo) { @@ -51,11 +70,12 @@ static inline void _cmm_compat_c11_smp_mb__before_xchg_mo(enum cmm_memorder mo) * * [1] https://gcc.gnu.org/onlinedocs/gcc-4.1.0/gcc/Atomic-Builtins.html */ -#define uatomic_xchg_mo(addr, v, mo) \ +# define uatomic_xchg_mo(addr, v, mo) \ ({ \ _cmm_compat_c11_smp_mb__before_xchg_mo(mo); \ __sync_lock_test_and_set(addr, v); \ }) +#endif /* _CMM_TOOLCHAIN_SUPPORT_C11_MM */ #ifdef __cplusplus } -- 2.39.5