From 8b25e300d2b440c75397448444d2c37fa0634a12 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Sat, 26 Sep 2009 13:55:30 -0400 Subject: [PATCH] Only make the threads for which we are waiting call sched_yield() Signed-off-by: Mathieu Desnoyers --- urcu-qsbr-static.h | 32 +++++++++++++++++++++----------- urcu-qsbr.c | 28 ++++++++++++++++------------ urcu-static.h | 26 +++++++++++++++++++------- urcu.c | 21 +++++++++++++-------- 4 files changed, 69 insertions(+), 38 deletions(-) diff --git a/urcu-qsbr-static.h b/urcu-qsbr-static.h index c39ab81..1cb4246 100644 --- a/urcu-qsbr-static.h +++ b/urcu-qsbr-static.h @@ -98,7 +98,7 @@ /* * If a reader is really non-cooperative and refuses to commit its - * rcu_reader_qs_gp count to memory (there is no barrier in the reader + * rcu_reader qs_gp count to memory (there is no barrier in the reader * per-se), kick it after a few loops waiting for it. */ #define KICK_READER_LOOPS 10000 @@ -178,7 +178,12 @@ static inline void reader_barrier() */ extern unsigned long urcu_gp_ctr; -extern unsigned long __thread rcu_reader_qs_gp; +struct urcu_reader_status { + unsigned long qs_gp; + unsigned long gp_waiting; +}; + +extern struct urcu_reader_status __thread urcu_reader_status; #if (BITS_PER_LONG < 64) static inline int rcu_gp_ongoing(unsigned long *value) @@ -204,7 +209,7 @@ static inline int rcu_gp_ongoing(unsigned long *value) static inline void _rcu_read_lock(void) { - rcu_assert(rcu_reader_qs_gp); + rcu_assert(urcu_reader_status.qs_gp); } static inline void _rcu_read_unlock(void) @@ -216,31 +221,36 @@ static inline void _rcu_quiescent_state(void) long gp_ctr; smp_mb(); - gp_ctr = LOAD_SHARED(urcu_gp_ctr); - if (unlikely(gp_ctr & RCU_GP_ONGOING)) { + /* + * volatile accesses can be reordered by the compiler when put in the + * same expression. + */ + if (unlikely((gp_ctr = LOAD_SHARED(urcu_gp_ctr)) & RCU_GP_ONGOING) && + unlikely(urcu_reader_status.gp_waiting)) { + _STORE_SHARED(urcu_reader_status.qs_gp, gp_ctr); sched_yield(); - gp_ctr = LOAD_SHARED(urcu_gp_ctr); + } else { + _STORE_SHARED(urcu_reader_status.qs_gp, gp_ctr); } - _STORE_SHARED(rcu_reader_qs_gp, gp_ctr); smp_mb(); } static inline void _rcu_thread_offline(void) { smp_mb(); - STORE_SHARED(rcu_reader_qs_gp, 0); + STORE_SHARED(urcu_reader_status.qs_gp, 0); } static inline void _rcu_thread_online(void) { long gp_ctr; - gp_ctr = LOAD_SHARED(urcu_gp_ctr); - if (unlikely(gp_ctr & RCU_GP_ONGOING)) { + if (unlikely((gp_ctr = LOAD_SHARED(urcu_gp_ctr)) & RCU_GP_ONGOING) && + unlikely(urcu_reader_status.gp_waiting)) { sched_yield(); gp_ctr = LOAD_SHARED(urcu_gp_ctr); } - _STORE_SHARED(rcu_reader_qs_gp, gp_ctr); + _STORE_SHARED(urcu_reader_status.qs_gp, gp_ctr); smp_mb(); } diff --git a/urcu-qsbr.c b/urcu-qsbr.c index b42d7c4..8828ae8 100644 --- a/urcu-qsbr.c +++ b/urcu-qsbr.c @@ -48,14 +48,14 @@ unsigned long urcu_gp_ctr = RCU_GP_ONLINE; * Written to only by each individual reader. Read by both the reader and the * writers. */ -unsigned long __thread rcu_reader_qs_gp; +struct urcu_reader_status __thread urcu_reader_status; /* Thread IDs of registered readers */ #define INIT_NUM_THREADS 4 struct reader_registry { pthread_t tid; - unsigned long *rcu_reader_qs_gp; + struct urcu_reader_status *urcu_reader_status; }; #ifdef DEBUG_YIELD @@ -109,12 +109,13 @@ static void wait_for_quiescent_state(void) if (!registry) return; /* - * Wait for each thread rcu_reader_qs_gp count to become 0. + * Wait for each thread rcu_reader qs_gp count to become 0. */ for (index = registry; index < registry + num_readers; index++) { int wait_loops = 0; - while (rcu_gp_ongoing(index->rcu_reader_qs_gp)) { + index->urcu_reader_status->gp_waiting = 1; + while (rcu_gp_ongoing(&index->urcu_reader_status->qs_gp)) { if (wait_loops++ == RCU_QS_ACTIVE_ATTEMPTS) { sched_yield(); /* ideally sched_yield_to() */ } else { @@ -125,6 +126,7 @@ static void wait_for_quiescent_state(void) #endif /* #else #ifndef HAS_INCOHERENT_CACHES */ } } + index->urcu_reader_status->gp_waiting = 0; } } @@ -146,7 +148,7 @@ void synchronize_rcu(void) { unsigned long was_online; - was_online = rcu_reader_qs_gp; + was_online = urcu_reader_status.qs_gp; /* All threads should read qparity before accessing data structure * where new ptr points to. @@ -160,7 +162,7 @@ void synchronize_rcu(void) * threads registered as readers. */ if (was_online) - STORE_SHARED(rcu_reader_qs_gp, 0); + STORE_SHARED(urcu_reader_status.qs_gp, 0); internal_urcu_lock(); @@ -213,7 +215,8 @@ void synchronize_rcu(void) * freed. */ if (was_online) - _STORE_SHARED(rcu_reader_qs_gp, LOAD_SHARED(urcu_gp_ctr)); + _STORE_SHARED(urcu_reader_status.qs_gp, + LOAD_SHARED(urcu_gp_ctr)); smp_mb(); } #else /* !(BITS_PER_LONG < 64) */ @@ -221,7 +224,7 @@ void synchronize_rcu(void) { unsigned long was_online; - was_online = rcu_reader_qs_gp; + was_online = urcu_reader_status.qs_gp; /* * Mark the writer thread offline to make sure we don't wait for @@ -230,7 +233,7 @@ void synchronize_rcu(void) */ smp_mb(); if (was_online) - STORE_SHARED(rcu_reader_qs_gp, 0); + STORE_SHARED(urcu_reader_status.qs_gp, 0); internal_urcu_lock(); STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr ^ RCU_GP_ONGOING); @@ -240,7 +243,8 @@ void synchronize_rcu(void) internal_urcu_unlock(); if (was_online) - _STORE_SHARED(rcu_reader_qs_gp, LOAD_SHARED(urcu_gp_ctr)); + _STORE_SHARED(urcu_reader_status.qs_gp, + LOAD_SHARED(urcu_gp_ctr)); smp_mb(); } #endif /* !(BITS_PER_LONG < 64) */ @@ -327,7 +331,7 @@ static void rcu_add_reader(pthread_t id) } registry[num_readers].tid = id; /* reference to the TLS of _this_ reader thread. */ - registry[num_readers].rcu_reader_qs_gp = &rcu_reader_qs_gp; + registry[num_readers].urcu_reader_status = &urcu_reader_status; num_readers++; } @@ -345,7 +349,7 @@ static void rcu_remove_reader(pthread_t id) memcpy(index, ®istry[num_readers - 1], sizeof(struct reader_registry)); registry[num_readers - 1].tid = 0; - registry[num_readers - 1].rcu_reader_qs_gp = NULL; + registry[num_readers - 1].urcu_reader_status = NULL; num_readers--; return; } diff --git a/urcu-static.h b/urcu-static.h index 7bde5ba..efb8225 100644 --- a/urcu-static.h +++ b/urcu-static.h @@ -214,7 +214,12 @@ static inline void reader_barrier() */ extern long urcu_gp_ctr; -extern long __thread urcu_active_readers; +struct urcu_reader_status { + long active_readers; + long gp_waiting; +}; + +extern struct urcu_reader_status __thread urcu_reader_status; static inline int rcu_old_gp_ongoing(long *value) { @@ -235,22 +240,28 @@ static inline void _rcu_read_lock(void) { long tmp, gp_ctr; - tmp = urcu_active_readers; + tmp = urcu_reader_status.active_readers; /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */ if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) { - gp_ctr = _LOAD_SHARED(urcu_gp_ctr); - if (unlikely(gp_ctr & RCU_GP_ONGOING)) { + /* + * volatile accesses can be reordered and optimized when within + * the same statement. + */ + if (unlikely((gp_ctr = _LOAD_SHARED(urcu_gp_ctr)) + & RCU_GP_ONGOING) && + unlikely(LOAD_SHARED(urcu_reader_status.gp_waiting))) { sched_yield(); gp_ctr = _LOAD_SHARED(urcu_gp_ctr); } - _STORE_SHARED(urcu_active_readers, gp_ctr); + _STORE_SHARED(urcu_reader_status.active_readers, gp_ctr); /* * Set active readers count for outermost nesting level before * accessing the pointer. See force_mb_all_threads(). */ reader_barrier(); } else { - _STORE_SHARED(urcu_active_readers, tmp + RCU_GP_COUNT); + _STORE_SHARED(urcu_reader_status.active_readers, + tmp + RCU_GP_COUNT); } } @@ -264,7 +275,8 @@ static inline void _rcu_read_unlock(void) * (no nesting). */ reader_barrier(); - _STORE_SHARED(urcu_active_readers, urcu_active_readers - RCU_GP_COUNT); + _STORE_SHARED(urcu_reader_status.active_readers, + urcu_reader_status.active_readers - RCU_GP_COUNT); } /** diff --git a/urcu.c b/urcu.c index d960497..a9ebf59 100644 --- a/urcu.c +++ b/urcu.c @@ -61,14 +61,14 @@ long urcu_gp_ctr = RCU_GP_COUNT; * Written to only by each individual reader. Read by both the reader and the * writers. */ -long __thread urcu_active_readers; +struct urcu_reader_status __thread urcu_reader_status; /* Thread IDs of registered readers */ #define INIT_NUM_THREADS 4 struct reader_registry { pthread_t tid; - long *urcu_active_readers; + struct urcu_reader_status *urcu_reader_status; char *need_mb; }; @@ -215,12 +215,15 @@ void wait_for_quiescent_state(void) if (!registry) return; /* - * Wait for each thread urcu_active_readers count to become 0. + * Wait for each thread active_readers count to become 0. */ for (index = registry; index < registry + num_readers; index++) { int wait_loops = 0; + + index->urcu_reader_status->gp_waiting = 1; #ifndef HAS_INCOHERENT_CACHES - while (rcu_old_gp_ongoing(index->urcu_active_readers)) { + while (rcu_old_gp_ongoing( + &index->urcu_reader_status->active_readers)) { if (wait_loops++ == RCU_QS_ACTIVE_ATTEMPTS) { sched_yield(); /* ideally sched_yield_to() */ } else { @@ -230,9 +233,10 @@ void wait_for_quiescent_state(void) #else /* #ifndef HAS_INCOHERENT_CACHES */ /* * BUSY-LOOP. Force the reader thread to commit its - * urcu_active_readers update to memory if we wait for too long. + * active_readers update to memory if we wait for too long. */ - while (rcu_old_gp_ongoing(index->urcu_active_readers)) { + while (rcu_old_gp_ongoing( + &index->urcu_reader_status->active_readers)) { switch (wait_loops++) { case RCU_QS_ACTIVE_ATTEMPTS: sched_yield(); /* ideally sched_yield_to() */ @@ -246,6 +250,7 @@ void wait_for_quiescent_state(void) } } #endif /* #else #ifndef HAS_INCOHERENT_CACHES */ + index->urcu_reader_status->gp_waiting = 0; } } @@ -397,7 +402,7 @@ static void rcu_add_reader(pthread_t id) } registry[num_readers].tid = id; /* reference to the TLS of _this_ reader thread. */ - registry[num_readers].urcu_active_readers = &urcu_active_readers; + registry[num_readers].urcu_reader_status = &urcu_reader_status; registry[num_readers].need_mb = &need_mb; num_readers++; } @@ -416,7 +421,7 @@ static void rcu_remove_reader(pthread_t id) memcpy(index, ®istry[num_readers - 1], sizeof(struct reader_registry)); registry[num_readers - 1].tid = 0; - registry[num_readers - 1].urcu_active_readers = NULL; + registry[num_readers - 1].urcu_reader_status = NULL; num_readers--; return; } -- 2.34.1