From: Mathieu Desnoyers Date: Wed, 13 Jan 2010 16:59:00 +0000 (-0500) Subject: urcu signal-based renames X-Git-Tag: v0.4.0~10 X-Git-Url: https://git.lttng.org./?a=commitdiff_plain;h=9d7e3f89772f08cca26d727f47d44ecd47c94401;p=urcu.git urcu signal-based renames reader_barrier renamed to smp_mb_light force_mb_all_readers renamed to smp_mb_heavy Signed-off-by: Mathieu Desnoyers --- diff --git a/urcu-static.h b/urcu-static.h index 0708df7..d466131 100644 --- a/urcu-static.h +++ b/urcu-static.h @@ -136,12 +136,12 @@ static inline void debug_yield_init(void) #endif #ifdef RCU_MB -static inline void reader_barrier() +static inline void smp_mb_light() { smp_mb(); } #else -static inline void reader_barrier() +static inline void smp_mb_light() { barrier(); } @@ -216,9 +216,9 @@ static inline void _rcu_read_lock(void) _STORE_SHARED(rcu_reader.ctr, _LOAD_SHARED(rcu_gp_ctr)); /* * Set active readers count for outermost nesting level before - * accessing the pointer. See force_mb_all_readers(). + * accessing the pointer. See smp_mb_heavy(). */ - reader_barrier(); + smp_mb_light(); } else { _STORE_SHARED(rcu_reader.ctr, tmp + RCU_GP_COUNT); } @@ -231,13 +231,13 @@ static inline void _rcu_read_unlock(void) tmp = rcu_reader.ctr; /* * Finish using rcu before decrementing the pointer. - * See force_mb_all_readers(). + * See smp_mb_heavy(). */ if (likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) { - reader_barrier(); + smp_mb_light(); _STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT); /* write rcu_reader.ctr before read futex */ - reader_barrier(); + smp_mb_light(); wake_up_gp(); } else { _STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT); diff --git a/urcu.c b/urcu.c index 3132e59..53dfbd5 100644 --- a/urcu.c +++ b/urcu.c @@ -119,43 +119,11 @@ static void switch_next_rcu_qparity(void) } #ifdef RCU_MB -#if 0 /* unused */ -static void force_mb_single_thread(struct rcu_reader *index) +static void smp_mb_heavy() { smp_mb(); } -#endif //0 - -static void force_mb_all_readers(void) -{ - smp_mb(); -} -#else /* #ifdef RCU_MB */ -#if 0 /* unused */ -static void force_mb_single_thread(struct rcu_reader *index) -{ - assert(!list_empty(®istry)); - /* - * pthread_kill has a smp_mb(). But beware, we assume it performs - * a cache flush on architectures with non-coherent cache. Let's play - * safe and don't assume anything : we use smp_mc() to make sure the - * cache flush is enforced. - */ - index->need_mb = 1; - smp_mc(); /* write ->need_mb before sending the signals */ - pthread_kill(index->tid, SIGRCU); - smp_mb(); - /* - * Wait for sighandler (and thus mb()) to execute on every thread. - * BUSY-LOOP. - */ - while (index->need_mb) { - poll(NULL, 0, 1); - } - smp_mb(); /* read ->need_mb before ending the barrier */ -} -#endif //0 - +#else static void force_mb_all_readers(void) { struct rcu_reader *index; @@ -198,6 +166,11 @@ static void force_mb_all_readers(void) } smp_mb(); /* read ->need_mb before ending the barrier */ } + +static void smp_mb_heavy() +{ + force_mb_all_readers(); +} #endif /* #else #ifdef RCU_MB */ /* @@ -206,7 +179,7 @@ static void force_mb_all_readers(void) static void wait_gp(void) { /* Read reader_gp before read futex */ - force_mb_all_readers(); + smp_mb_heavy(); if (uatomic_read(&gp_futex) == -1) futex_async(&gp_futex, FUTEX_WAIT, -1, NULL, NULL, 0); @@ -228,7 +201,7 @@ void wait_for_quiescent_state(void) if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) { uatomic_dec(&gp_futex); /* Write futex before read reader_gp */ - force_mb_all_readers(); + smp_mb_heavy(); } list_for_each_entry_safe(index, tmp, ®istry, head) { @@ -240,7 +213,7 @@ void wait_for_quiescent_state(void) if (list_empty(®istry)) { if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) { /* Read reader_gp before write futex */ - force_mb_all_readers(); + smp_mb_heavy(); uatomic_set(&gp_futex, 0); } break; @@ -258,7 +231,7 @@ void wait_for_quiescent_state(void) if (list_empty(®istry)) { if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) { /* Read reader_gp before write futex */ - force_mb_all_readers(); + smp_mb_heavy(); uatomic_set(&gp_futex, 0); } break; @@ -268,7 +241,7 @@ void wait_for_quiescent_state(void) wait_gp(); break; /* only escape switch */ case KICK_READER_LOOPS: - force_mb_all_readers(); + smp_mb_heavy(); wait_loops = 0; break; /* only escape switch */ default: @@ -289,7 +262,7 @@ void synchronize_rcu(void) * where new ptr points to. Must be done within internal_rcu_lock * because it iterates on reader threads.*/ /* Write new ptr before changing the qparity */ - force_mb_all_readers(); + smp_mb_heavy(); switch_next_rcu_qparity(); /* 0 -> 1 */ @@ -353,7 +326,7 @@ void synchronize_rcu(void) /* Finish waiting for reader threads before letting the old ptr being * freed. Must be done within internal_rcu_lock because it iterates on * reader threads. */ - force_mb_all_readers(); + smp_mb_heavy(); internal_rcu_unlock(); }