}
#ifdef RCU_MEMBARRIER
-static void smp_mb_master(int group)
+static void smp_mb_master(void)
{
if (caa_likely(rcu_has_sys_membarrier))
(void) membarrier(MEMBARRIER_CMD_SHARED, 0);
#endif
#ifdef RCU_MB
-static void smp_mb_master(int group)
+static void smp_mb_master(void)
{
cmm_smp_mb();
}
cmm_smp_mb(); /* read ->need_mb before ending the barrier */
}
-static void smp_mb_master(int group)
+static void smp_mb_master(void)
{
force_mb_all_readers();
}
static void wait_gp(void)
{
/* Read reader_gp before read futex */
- smp_mb_master(RCU_MB_GROUP);
+ smp_mb_master();
if (uatomic_read(&rcu_gp.futex) != -1)
return;
while (futex_async(&rcu_gp.futex, FUTEX_WAIT, -1,
if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
uatomic_dec(&rcu_gp.futex);
/* Write futex before read reader_gp */
- smp_mb_master(RCU_MB_GROUP);
+ smp_mb_master();
}
cds_list_for_each_entry_safe(index, tmp, input_readers, node) {
if (cds_list_empty(input_readers)) {
if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
/* Read reader_gp before write futex */
- smp_mb_master(RCU_MB_GROUP);
+ smp_mb_master();
uatomic_set(&rcu_gp.futex, 0);
}
break;
if (cds_list_empty(input_readers)) {
if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
/* Read reader_gp before write futex */
- smp_mb_master(RCU_MB_GROUP);
+ smp_mb_master();
uatomic_set(&rcu_gp.futex, 0);
}
break;
} else {
if (wait_gp_loops == KICK_READER_LOOPS) {
- smp_mb_master(RCU_MB_GROUP);
+ smp_mb_master();
wait_gp_loops = 0;
}
/* Temporarily unlock the registry lock. */
* because it iterates on reader threads.
*/
/* Write new ptr before changing the qparity */
- smp_mb_master(RCU_MB_GROUP);
+ smp_mb_master();
/*
* Wait for readers to observe original parity or be quiescent.
* being freed. Must be done within rcu_registry_lock because it
* iterates on reader threads.
*/
- smp_mb_master(RCU_MB_GROUP);
+ smp_mb_master();
out:
mutex_unlock(&rcu_registry_lock);
mutex_unlock(&rcu_gp_lock);
};
/*
- * RCU memory barrier broadcast group. Currently, only broadcast to all process
- * threads is supported (group 0).
- *
* Slave barriers are only guaranteed to be ordered wrt master barriers.
*
* The pair ordering is detailed as (O: ordered, X: not ordered) :
* master O O
*/
-#define MB_GROUP_ALL 0
-#define RCU_MB_GROUP MB_GROUP_ALL
-
#ifdef RCU_MEMBARRIER
extern int rcu_has_sys_membarrier;
-static inline void smp_mb_slave(int group)
+static inline void smp_mb_slave(void)
{
if (caa_likely(rcu_has_sys_membarrier))
cmm_barrier();
#endif
#ifdef RCU_MB
-static inline void smp_mb_slave(int group)
+static inline void smp_mb_slave(void)
{
cmm_smp_mb();
}
#endif
#ifdef RCU_SIGNAL
-static inline void smp_mb_slave(int group)
+static inline void smp_mb_slave(void)
{
cmm_barrier();
}
{
if (caa_likely(!(tmp & RCU_GP_CTR_NEST_MASK))) {
_CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, _CMM_LOAD_SHARED(rcu_gp.ctr));
- smp_mb_slave(RCU_MB_GROUP);
+ smp_mb_slave();
} else
_CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, tmp + RCU_GP_COUNT);
}
static inline void _rcu_read_unlock_update_and_wakeup(unsigned long tmp)
{
if (caa_likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) {
- smp_mb_slave(RCU_MB_GROUP);
+ smp_mb_slave();
_CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, tmp - RCU_GP_COUNT);
- smp_mb_slave(RCU_MB_GROUP);
+ smp_mb_slave();
wake_up_gp();
} else
_CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, tmp - RCU_GP_COUNT);