#endif
#ifdef RCU_MB
-static inline void reader_barrier()
+static inline void smp_mb_light()
{
smp_mb();
}
#else
-static inline void reader_barrier()
+static inline void smp_mb_light()
{
barrier();
}
_STORE_SHARED(rcu_reader.ctr, _LOAD_SHARED(rcu_gp_ctr));
/*
* Set active readers count for outermost nesting level before
- * accessing the pointer. See force_mb_all_readers().
+ * accessing the pointer. See smp_mb_heavy().
*/
- reader_barrier();
+ smp_mb_light();
} else {
_STORE_SHARED(rcu_reader.ctr, tmp + RCU_GP_COUNT);
}
tmp = rcu_reader.ctr;
/*
* Finish using rcu before decrementing the pointer.
- * See force_mb_all_readers().
+ * See smp_mb_heavy().
*/
if (likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) {
- reader_barrier();
+ smp_mb_light();
_STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT);
/* write rcu_reader.ctr before read futex */
- reader_barrier();
+ smp_mb_light();
wake_up_gp();
} else {
_STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT);
}
#ifdef RCU_MB
-#if 0 /* unused */
-static void force_mb_single_thread(struct rcu_reader *index)
+static void smp_mb_heavy()
{
smp_mb();
}
-#endif //0
-
-static void force_mb_all_readers(void)
-{
- smp_mb();
-}
-#else /* #ifdef RCU_MB */
-#if 0 /* unused */
-static void force_mb_single_thread(struct rcu_reader *index)
-{
- assert(!list_empty(®istry));
- /*
- * pthread_kill has a smp_mb(). But beware, we assume it performs
- * a cache flush on architectures with non-coherent cache. Let's play
- * safe and don't assume anything : we use smp_mc() to make sure the
- * cache flush is enforced.
- */
- index->need_mb = 1;
- smp_mc(); /* write ->need_mb before sending the signals */
- pthread_kill(index->tid, SIGRCU);
- smp_mb();
- /*
- * Wait for sighandler (and thus mb()) to execute on every thread.
- * BUSY-LOOP.
- */
- while (index->need_mb) {
- poll(NULL, 0, 1);
- }
- smp_mb(); /* read ->need_mb before ending the barrier */
-}
-#endif //0
-
+#else
static void force_mb_all_readers(void)
{
struct rcu_reader *index;
}
smp_mb(); /* read ->need_mb before ending the barrier */
}
+
+static void smp_mb_heavy()
+{
+ force_mb_all_readers();
+}
#endif /* #else #ifdef RCU_MB */
/*
static void wait_gp(void)
{
/* Read reader_gp before read futex */
- force_mb_all_readers();
+ smp_mb_heavy();
if (uatomic_read(&gp_futex) == -1)
futex_async(&gp_futex, FUTEX_WAIT, -1,
NULL, NULL, 0);
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
uatomic_dec(&gp_futex);
/* Write futex before read reader_gp */
- force_mb_all_readers();
+ smp_mb_heavy();
}
list_for_each_entry_safe(index, tmp, ®istry, head) {
if (list_empty(®istry)) {
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
/* Read reader_gp before write futex */
- force_mb_all_readers();
+ smp_mb_heavy();
uatomic_set(&gp_futex, 0);
}
break;
if (list_empty(®istry)) {
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
/* Read reader_gp before write futex */
- force_mb_all_readers();
+ smp_mb_heavy();
uatomic_set(&gp_futex, 0);
}
break;
wait_gp();
break; /* only escape switch */
case KICK_READER_LOOPS:
- force_mb_all_readers();
+ smp_mb_heavy();
wait_loops = 0;
break; /* only escape switch */
default:
* where new ptr points to. Must be done within internal_rcu_lock
* because it iterates on reader threads.*/
/* Write new ptr before changing the qparity */
- force_mb_all_readers();
+ smp_mb_heavy();
switch_next_rcu_qparity(); /* 0 -> 1 */
/* Finish waiting for reader threads before letting the old ptr being
* freed. Must be done within internal_rcu_lock because it iterates on
* reader threads. */
- force_mb_all_readers();
+ smp_mb_heavy();
internal_rcu_unlock();
}