_STORE_SHARED(rcu_reader.ctr, _LOAD_SHARED(rcu_gp_ctr));
/*
* Set active readers count for outermost nesting level before
- * accessing the pointer. See force_mb_all_threads().
+ * accessing the pointer. See force_mb_all_readers().
*/
reader_barrier();
} else {
tmp = rcu_reader.ctr;
/*
* Finish using rcu before decrementing the pointer.
- * See force_mb_all_threads().
+ * See force_mb_all_readers().
*/
if (likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) {
reader_barrier();
}
#endif //0
-static void force_mb_all_threads(void)
+static void force_mb_all_readers(void)
{
smp_mb();
}
}
#endif //0
-static void force_mb_all_threads(void)
+static void force_mb_all_readers(void)
{
struct rcu_reader *index;
static void wait_gp(void)
{
/* Read reader_gp before read futex */
- force_mb_all_threads();
+ force_mb_all_readers();
if (uatomic_read(&gp_futex) == -1)
futex_async(&gp_futex, FUTEX_WAIT, -1,
NULL, NULL, 0);
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
uatomic_dec(&gp_futex);
/* Write futex before read reader_gp */
- force_mb_all_threads();
+ force_mb_all_readers();
}
list_for_each_entry_safe(index, tmp, ®istry, head) {
if (list_empty(®istry)) {
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
/* Read reader_gp before write futex */
- force_mb_all_threads();
+ force_mb_all_readers();
uatomic_set(&gp_futex, 0);
}
break;
if (list_empty(®istry)) {
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
/* Read reader_gp before write futex */
- force_mb_all_threads();
+ force_mb_all_readers();
uatomic_set(&gp_futex, 0);
}
break;
wait_gp();
break; /* only escape switch */
case KICK_READER_LOOPS:
- force_mb_all_threads();
+ force_mb_all_readers();
wait_loops = 0;
break; /* only escape switch */
default:
* where new ptr points to. Must be done within internal_rcu_lock
* because it iterates on reader threads.*/
/* Write new ptr before changing the qparity */
- force_mb_all_threads();
+ force_mb_all_readers();
switch_next_rcu_qparity(); /* 0 -> 1 */
/* Finish waiting for reader threads before letting the old ptr being
* freed. Must be done within internal_rcu_lock because it iterates on
* reader threads. */
- force_mb_all_threads();
+ force_mb_all_readers();
internal_rcu_unlock();
}