From: Mathieu Desnoyers Date: Tue, 26 May 2009 17:31:36 +0000 (-0400) Subject: sync_core to smp_mb transition X-Git-Tag: v0.1~210 X-Git-Url: https://git.lttng.org./?a=commitdiff_plain;h=5dba80f9bed985c23090ec74bb4173b31b28513a;p=userspace-rcu.git sync_core to smp_mb transition - move sync_core to smp_mb in urcu.c, update comments. - cpuid clashes with -fPIC because it clobbers ebx. Use mb() if ___PIC__ is defined. Signed-off-by: Mathieu Desnoyers --- diff --git a/arch_atomic_x86.h b/arch_atomic_x86.h index 8423ae3..f471a39 100644 --- a/arch_atomic_x86.h +++ b/arch_atomic_x86.h @@ -30,7 +30,7 @@ * Derived from AO_compare_and_swap() and AO_test_and_set_full(). */ -static __attribute__((always_inline)) +static inline __attribute__((always_inline)) unsigned int atomic_exchange_32(volatile unsigned int *addr, unsigned int val) { unsigned int result; @@ -47,7 +47,7 @@ unsigned int atomic_exchange_32(volatile unsigned int *addr, unsigned int val) #if (BITS_PER_LONG == 64) -static __attribute__((always_inline)) +static inline __attribute__((always_inline)) unsigned long atomic_exchange_64(volatile unsigned long *addr, unsigned long val) { @@ -65,7 +65,7 @@ unsigned long atomic_exchange_64(volatile unsigned long *addr, #endif -static __attribute__((always_inline)) +static inline __attribute__((always_inline)) unsigned long _atomic_exchange(volatile void *addr, unsigned long val, int len) { switch (len) { diff --git a/arch_x86.h b/arch_x86.h index e7d945e..8a57325 100644 --- a/arch_x86.h +++ b/arch_x86.h @@ -97,10 +97,22 @@ static inline void cpu_relax(void) /* * Serialize core instruction execution. Also acts as a compiler barrier. */ +#ifdef __PIC__ +/* + * Cannot use cpuid because it clobbers the ebx register and clashes + * with -fPIC : + * error: PIC register 'ebx' clobbered in 'asm' + */ +static inline void sync_core(void) +{ + mb(); +} +#else static inline void sync_core(void) { asm volatile("cpuid" : : : "memory", "eax", "ebx", "ecx", "edx"); } +#endif #define rdtscll(val) \ do { \ diff --git a/urcu.c b/urcu.c index 2a225c4..b71e162 100644 --- a/urcu.c +++ b/urcu.c @@ -254,12 +254,11 @@ void synchronize_rcu(void) */ /* - * Current RCU formal verification model assumes sequential execution of - * the write-side. Add core synchronization instructions. Can be removed - * if the formal model is extended to prove that reordering is still - * correct. + * Adding a smp_mb() which is _not_ formally required, but makes the + * model easier to understand. It does not have a big performance impact + * anyway, given this is the write-side. */ - sync_core(); /* Formal model assumes serialized execution */ + smp_mb(); /* * Wait for previous parity to be empty of readers. @@ -274,7 +273,12 @@ void synchronize_rcu(void) * Ensured by STORE_SHARED and LOAD_SHARED. */ - sync_core(); /* Formal model assumes serialized execution */ + /* + * Adding a smp_mb() which is _not_ formally required, but makes the + * model easier to understand. It does not have a big performance impact + * anyway, given this is the write-side. + */ + smp_mb(); switch_next_urcu_qparity(); /* 1 -> 0 */ @@ -286,7 +290,12 @@ void synchronize_rcu(void) * Ensured by STORE_SHARED and LOAD_SHARED. */ - sync_core(); /* Formal model assumes serialized execution */ + /* + * Adding a smp_mb() which is _not_ formally required, but makes the + * model easier to understand. It does not have a big performance impact + * anyway, given this is the write-side. + */ + smp_mb(); /* * Wait for previous parity to be empty of readers.