}
#endif
+/*
+ * RCU memory barrier broadcast group. Currently, only broadcast to all process
+ * threads is supported (group 0).
+ *
+ * Slave barriers are only guaranteed to be ordered wrt master barriers.
+ *
+ * The pair ordering is detailed as (O: ordered, X: not ordered) :
+ * slave master
+ * slave X O
+ * master O O
+ */
+
+#define MB_GROUP_ALL 0
+#define RCU_MB_GROUP MB_GROUP_ALL
+
#ifdef RCU_MEMBARRIER
extern int has_sys_membarrier;
-static inline void smp_mb_light()
+static inline void smp_mb_slave(int group)
{
if (likely(has_sys_membarrier))
barrier();
#endif
#ifdef RCU_MB
-static inline void smp_mb_light()
+static inline void smp_mb_slave(int group)
{
smp_mb();
}
#endif
#ifdef RCU_SIGNAL
-static inline void smp_mb_light()
+static inline void smp_mb_slave(int group)
{
barrier();
}
_STORE_SHARED(rcu_reader.ctr, _LOAD_SHARED(rcu_gp_ctr));
/*
* Set active readers count for outermost nesting level before
- * accessing the pointer. See smp_mb_heavy().
+ * accessing the pointer. See smp_mb_master().
*/
- smp_mb_light();
+ smp_mb_slave(RCU_MB_GROUP);
} else {
_STORE_SHARED(rcu_reader.ctr, tmp + RCU_GP_COUNT);
}
tmp = rcu_reader.ctr;
/*
* Finish using rcu before decrementing the pointer.
- * See smp_mb_heavy().
+ * See smp_mb_master().
*/
if (likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) {
- smp_mb_light();
+ smp_mb_slave(RCU_MB_GROUP);
_STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT);
/* write rcu_reader.ctr before read futex */
- smp_mb_light();
+ smp_mb_slave(RCU_MB_GROUP);
wake_up_gp();
} else {
_STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT);
}
#ifdef RCU_MEMBARRIER
-static void smp_mb_heavy(void)
+static void smp_mb_master(int group)
{
if (likely(has_sys_membarrier))
membarrier(MEMBARRIER_EXPEDITED);
#endif
#ifdef RCU_MB
-static void smp_mb_heavy(void)
+static void smp_mb_master(int group)
{
smp_mb();
}
smp_mb(); /* read ->need_mb before ending the barrier */
}
-static void smp_mb_heavy(void)
+static void smp_mb_master(int group)
{
force_mb_all_readers();
}
static void wait_gp(void)
{
/* Read reader_gp before read futex */
- smp_mb_heavy();
+ smp_mb_master(RCU_MB_GROUP);
if (uatomic_read(&gp_futex) == -1)
futex_async(&gp_futex, FUTEX_WAIT, -1,
NULL, NULL, 0);
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
uatomic_dec(&gp_futex);
/* Write futex before read reader_gp */
- smp_mb_heavy();
+ smp_mb_master(RCU_MB_GROUP);
}
list_for_each_entry_safe(index, tmp, ®istry, head) {
if (list_empty(®istry)) {
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
/* Read reader_gp before write futex */
- smp_mb_heavy();
+ smp_mb_master(RCU_MB_GROUP);
uatomic_set(&gp_futex, 0);
}
break;
if (list_empty(®istry)) {
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
/* Read reader_gp before write futex */
- smp_mb_heavy();
+ smp_mb_master(RCU_MB_GROUP);
uatomic_set(&gp_futex, 0);
}
break;
wait_gp();
break; /* only escape switch */
case KICK_READER_LOOPS:
- smp_mb_heavy();
+ smp_mb_master(RCU_MB_GROUP);
wait_loops = 0;
break; /* only escape switch */
default:
* where new ptr points to. Must be done within rcu_gp_lock because it
* iterates on reader threads.*/
/* Write new ptr before changing the qparity */
- smp_mb_heavy();
+ smp_mb_master(RCU_MB_GROUP);
/*
* Wait for previous parity to be empty of readers.
/* Finish waiting for reader threads before letting the old ptr being
* freed. Must be done within rcu_gp_lock because it iterates on reader
* threads. */
- smp_mb_heavy();
+ smp_mb_master(RCU_MB_GROUP);
out:
mutex_unlock(&rcu_gp_lock);
}