static
int rcu_bp_refcount;
+/*
+ * RCU_MEMBARRIER is only possibly available on Linux.
+ */
+#ifdef __linux__
+#include <urcu/syscall-compat.h>
+#endif
+
+/* If the headers do not support SYS_membarrier, fall back on RCU_MB */
+#ifdef SYS_membarrier
+# define membarrier(...) syscall(SYS_membarrier, __VA_ARGS__)
+#else
+# define membarrier(...) -ENOSYS
+#endif
+
+enum membarrier_cmd {
+ MEMBARRIER_CMD_QUERY = 0,
+ MEMBARRIER_CMD_SHARED = (1 << 0),
+};
+
static
void __attribute__((constructor)) rcu_bp_init(void);
static
void __attribute__((destructor)) rcu_bp_exit(void);
+int urcu_bp_has_sys_membarrier;
+
/*
* rcu_gp_lock ensures mutual exclusion between threads calling
* synchronize_rcu().
urcu_die(ret);
}
+static void smp_mb_master(void)
+{
+ if (caa_likely(urcu_bp_has_sys_membarrier))
+ (void) membarrier(MEMBARRIER_CMD_SHARED, 0);
+ else
+ cmm_smp_mb();
+}
+
/*
* Always called with rcu_registry lock held. Releases this lock between
* iterations and grabs it again. Holds the lock when it returns.
/* All threads should read qparity before accessing data structure
* where new ptr points to. */
/* Write new ptr before changing the qparity */
- cmm_smp_mb();
+ smp_mb_master();
/*
* Wait for readers to observe original parity or be quiescent.
* Finish waiting for reader threads before letting the old ptr being
* freed.
*/
- cmm_smp_mb();
+ smp_mb_master();
out:
mutex_unlock(&rcu_registry_lock);
mutex_unlock(&rcu_gp_lock);
urcu_bp_thread_exit_notifier);
if (ret)
abort();
+ ret = membarrier(MEMBARRIER_CMD_QUERY, 0);
+ if (ret >= 0 && (ret & MEMBARRIER_CMD_SHARED)) {
+ urcu_bp_has_sys_membarrier = 1;
+ }
initialized = 1;
}
mutex_unlock(&init_lock);
*/
extern DECLARE_URCU_TLS(struct rcu_reader *, rcu_reader);
+extern int urcu_bp_has_sys_membarrier;
+
+static inline void urcu_bp_smp_mb_slave(void)
+{
+ if (caa_likely(urcu_bp_has_sys_membarrier))
+ cmm_barrier();
+ else
+ cmm_smp_mb();
+}
+
static inline enum rcu_state rcu_reader_state(unsigned long *ctr)
{
unsigned long v;
{
if (caa_likely(!(tmp & RCU_GP_CTR_NEST_MASK))) {
_CMM_STORE_SHARED(URCU_TLS(rcu_reader)->ctr, _CMM_LOAD_SHARED(rcu_gp.ctr));
- cmm_smp_mb();
+ urcu_bp_smp_mb_slave();
} else
_CMM_STORE_SHARED(URCU_TLS(rcu_reader)->ctr, tmp + RCU_GP_COUNT);
}
tmp = URCU_TLS(rcu_reader)->ctr;
urcu_assert(tmp & RCU_GP_CTR_NEST_MASK);
/* Finish using rcu before decrementing the pointer. */
- cmm_smp_mb();
+ urcu_bp_smp_mb_slave();
_CMM_STORE_SHARED(URCU_TLS(rcu_reader)->ctr, tmp - RCU_GP_COUNT);
cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
}