Only make the threads for which we are waiting call sched_yield()
authorMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Sat, 26 Sep 2009 17:55:30 +0000 (13:55 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Sat, 26 Sep 2009 17:55:30 +0000 (13:55 -0400)
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
urcu-qsbr-static.h
urcu-qsbr.c
urcu-static.h
urcu.c

index c39ab817727f4ad6c28c68a4a890b9f7ddcea7a9..1cb424664cbaf0502ea36d7c687f1818024d2456 100644 (file)
@@ -98,7 +98,7 @@
 
 /*
  * If a reader is really non-cooperative and refuses to commit its
- * rcu_reader_qs_gp count to memory (there is no barrier in the reader
+ * rcu_reader qs_gp count to memory (there is no barrier in the reader
  * per-se), kick it after a few loops waiting for it.
  */
 #define KICK_READER_LOOPS 10000
@@ -178,7 +178,12 @@ static inline void reader_barrier()
  */
 extern unsigned long urcu_gp_ctr;
 
-extern unsigned long __thread rcu_reader_qs_gp;
+struct urcu_reader_status {
+       unsigned long qs_gp;
+       unsigned long gp_waiting;
+};
+
+extern struct urcu_reader_status __thread urcu_reader_status;
 
 #if (BITS_PER_LONG < 64)
 static inline int rcu_gp_ongoing(unsigned long *value)
@@ -204,7 +209,7 @@ static inline int rcu_gp_ongoing(unsigned long *value)
 
 static inline void _rcu_read_lock(void)
 {
-       rcu_assert(rcu_reader_qs_gp);
+       rcu_assert(urcu_reader_status.qs_gp);
 }
 
 static inline void _rcu_read_unlock(void)
@@ -216,31 +221,36 @@ static inline void _rcu_quiescent_state(void)
        long gp_ctr;
 
        smp_mb();
-       gp_ctr = LOAD_SHARED(urcu_gp_ctr);
-       if (unlikely(gp_ctr & RCU_GP_ONGOING)) {
+       /*
+        * volatile accesses can be reordered by the compiler when put in the
+        * same expression.
+        */
+       if (unlikely((gp_ctr = LOAD_SHARED(urcu_gp_ctr)) & RCU_GP_ONGOING) &&
+           unlikely(urcu_reader_status.gp_waiting)) {
+               _STORE_SHARED(urcu_reader_status.qs_gp, gp_ctr);
                sched_yield();
-               gp_ctr = LOAD_SHARED(urcu_gp_ctr);
+       } else {
+               _STORE_SHARED(urcu_reader_status.qs_gp, gp_ctr);
        }
-       _STORE_SHARED(rcu_reader_qs_gp, gp_ctr);
        smp_mb();
 }
 
 static inline void _rcu_thread_offline(void)
 {
        smp_mb();
-       STORE_SHARED(rcu_reader_qs_gp, 0);
+       STORE_SHARED(urcu_reader_status.qs_gp, 0);
 }
 
 static inline void _rcu_thread_online(void)
 {
        long gp_ctr;
 
-       gp_ctr = LOAD_SHARED(urcu_gp_ctr);
-       if (unlikely(gp_ctr & RCU_GP_ONGOING)) {
+       if (unlikely((gp_ctr = LOAD_SHARED(urcu_gp_ctr)) & RCU_GP_ONGOING) &&
+           unlikely(urcu_reader_status.gp_waiting)) {
                sched_yield();
                gp_ctr = LOAD_SHARED(urcu_gp_ctr);
        }
-       _STORE_SHARED(rcu_reader_qs_gp, gp_ctr);
+       _STORE_SHARED(urcu_reader_status.qs_gp, gp_ctr);
        smp_mb();
 }
 
index b42d7c4a42e3a4aa4e334ca457066327e969a85a..8828ae8ce30ed1657607a835f6a9459440a7fd21 100644 (file)
@@ -48,14 +48,14 @@ unsigned long urcu_gp_ctr = RCU_GP_ONLINE;
  * Written to only by each individual reader. Read by both the reader and the
  * writers.
  */
-unsigned long __thread rcu_reader_qs_gp;
+struct urcu_reader_status __thread urcu_reader_status;
 
 /* Thread IDs of registered readers */
 #define INIT_NUM_THREADS 4
 
 struct reader_registry {
        pthread_t tid;
-       unsigned long *rcu_reader_qs_gp;
+       struct urcu_reader_status *urcu_reader_status;
 };
 
 #ifdef DEBUG_YIELD
@@ -109,12 +109,13 @@ static void wait_for_quiescent_state(void)
        if (!registry)
                return;
        /*
-        * Wait for each thread rcu_reader_qs_gp count to become 0.
+        * Wait for each thread rcu_reader qs_gp count to become 0.
         */
        for (index = registry; index < registry + num_readers; index++) {
                int wait_loops = 0;
 
-               while (rcu_gp_ongoing(index->rcu_reader_qs_gp)) {
+               index->urcu_reader_status->gp_waiting = 1;
+               while (rcu_gp_ongoing(&index->urcu_reader_status->qs_gp)) {
                        if (wait_loops++ == RCU_QS_ACTIVE_ATTEMPTS) {
                                sched_yield();  /* ideally sched_yield_to() */
                        } else {
@@ -125,6 +126,7 @@ static void wait_for_quiescent_state(void)
 #endif /* #else #ifndef HAS_INCOHERENT_CACHES */
                        }
                }
+               index->urcu_reader_status->gp_waiting = 0;
        }
 }
 
@@ -146,7 +148,7 @@ void synchronize_rcu(void)
 {
        unsigned long was_online;
 
-       was_online = rcu_reader_qs_gp;
+       was_online = urcu_reader_status.qs_gp;
 
        /* All threads should read qparity before accessing data structure
         * where new ptr points to.
@@ -160,7 +162,7 @@ void synchronize_rcu(void)
         * threads registered as readers.
         */
        if (was_online)
-               STORE_SHARED(rcu_reader_qs_gp, 0);
+               STORE_SHARED(urcu_reader_status.qs_gp, 0);
 
        internal_urcu_lock();
 
@@ -213,7 +215,8 @@ void synchronize_rcu(void)
         * freed.
         */
        if (was_online)
-               _STORE_SHARED(rcu_reader_qs_gp, LOAD_SHARED(urcu_gp_ctr));
+               _STORE_SHARED(urcu_reader_status.qs_gp,
+                             LOAD_SHARED(urcu_gp_ctr));
        smp_mb();
 }
 #else /* !(BITS_PER_LONG < 64) */
@@ -221,7 +224,7 @@ void synchronize_rcu(void)
 {
        unsigned long was_online;
 
-       was_online = rcu_reader_qs_gp;
+       was_online = urcu_reader_status.qs_gp;
 
        /*
         * Mark the writer thread offline to make sure we don't wait for
@@ -230,7 +233,7 @@ void synchronize_rcu(void)
         */
        smp_mb();
        if (was_online)
-               STORE_SHARED(rcu_reader_qs_gp, 0);
+               STORE_SHARED(urcu_reader_status.qs_gp, 0);
 
        internal_urcu_lock();
        STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr ^ RCU_GP_ONGOING);
@@ -240,7 +243,8 @@ void synchronize_rcu(void)
        internal_urcu_unlock();
 
        if (was_online)
-               _STORE_SHARED(rcu_reader_qs_gp, LOAD_SHARED(urcu_gp_ctr));
+               _STORE_SHARED(urcu_reader_status.qs_gp,
+                             LOAD_SHARED(urcu_gp_ctr));
        smp_mb();
 }
 #endif  /* !(BITS_PER_LONG < 64) */
@@ -327,7 +331,7 @@ static void rcu_add_reader(pthread_t id)
        }
        registry[num_readers].tid = id;
        /* reference to the TLS of _this_ reader thread. */
-       registry[num_readers].rcu_reader_qs_gp = &rcu_reader_qs_gp;
+       registry[num_readers].urcu_reader_status = &urcu_reader_status;
        num_readers++;
 }
 
@@ -345,7 +349,7 @@ static void rcu_remove_reader(pthread_t id)
                        memcpy(index, &registry[num_readers - 1],
                                sizeof(struct reader_registry));
                        registry[num_readers - 1].tid = 0;
-                       registry[num_readers - 1].rcu_reader_qs_gp = NULL;
+                       registry[num_readers - 1].urcu_reader_status = NULL;
                        num_readers--;
                        return;
                }
index 7bde5ba5340f2cd0cb4b985fa91b9f04a3ae3e2f..efb8225161b2c3c99c450d68a83289f1acb0379d 100644 (file)
@@ -214,7 +214,12 @@ static inline void reader_barrier()
  */
 extern long urcu_gp_ctr;
 
-extern long __thread urcu_active_readers;
+struct urcu_reader_status {
+       long active_readers;
+       long gp_waiting;
+};
+
+extern struct urcu_reader_status __thread urcu_reader_status;
 
 static inline int rcu_old_gp_ongoing(long *value)
 {
@@ -235,22 +240,28 @@ static inline void _rcu_read_lock(void)
 {
        long tmp, gp_ctr;
 
-       tmp = urcu_active_readers;
+       tmp = urcu_reader_status.active_readers;
        /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */
        if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) {
-               gp_ctr = _LOAD_SHARED(urcu_gp_ctr);
-               if (unlikely(gp_ctr & RCU_GP_ONGOING)) {
+               /*
+                * volatile accesses can be reordered and optimized when within
+                * the same statement.
+                */
+               if (unlikely((gp_ctr = _LOAD_SHARED(urcu_gp_ctr))
+                               & RCU_GP_ONGOING) &&
+                   unlikely(LOAD_SHARED(urcu_reader_status.gp_waiting))) {
                        sched_yield();
                        gp_ctr = _LOAD_SHARED(urcu_gp_ctr);
                }
-               _STORE_SHARED(urcu_active_readers, gp_ctr);
+               _STORE_SHARED(urcu_reader_status.active_readers, gp_ctr);
                /*
                 * Set active readers count for outermost nesting level before
                 * accessing the pointer. See force_mb_all_threads().
                 */
                reader_barrier();
        } else {
-               _STORE_SHARED(urcu_active_readers, tmp + RCU_GP_COUNT);
+               _STORE_SHARED(urcu_reader_status.active_readers,
+                             tmp + RCU_GP_COUNT);
        }
 }
 
@@ -264,7 +275,8 @@ static inline void _rcu_read_unlock(void)
         * (no nesting).
         */
        reader_barrier();
-       _STORE_SHARED(urcu_active_readers, urcu_active_readers - RCU_GP_COUNT);
+       _STORE_SHARED(urcu_reader_status.active_readers,
+                     urcu_reader_status.active_readers - RCU_GP_COUNT);
 }
 
 /**
diff --git a/urcu.c b/urcu.c
index d9604973e37f850a1036599dd0763519b42ed6b5..a9ebf597858b063d20223b6ac48331a7a3a730db 100644 (file)
--- a/urcu.c
+++ b/urcu.c
@@ -61,14 +61,14 @@ long urcu_gp_ctr = RCU_GP_COUNT;
  * Written to only by each individual reader. Read by both the reader and the
  * writers.
  */
-long __thread urcu_active_readers;
+struct urcu_reader_status __thread urcu_reader_status;
 
 /* Thread IDs of registered readers */
 #define INIT_NUM_THREADS 4
 
 struct reader_registry {
        pthread_t tid;
-       long *urcu_active_readers;
+       struct urcu_reader_status *urcu_reader_status;
        char *need_mb;
 };
 
@@ -215,12 +215,15 @@ void wait_for_quiescent_state(void)
        if (!registry)
                return;
        /*
-        * Wait for each thread urcu_active_readers count to become 0.
+        * Wait for each thread active_readers count to become 0.
         */
        for (index = registry; index < registry + num_readers; index++) {
                int wait_loops = 0;
+
+               index->urcu_reader_status->gp_waiting = 1;
 #ifndef HAS_INCOHERENT_CACHES
-               while (rcu_old_gp_ongoing(index->urcu_active_readers)) {
+               while (rcu_old_gp_ongoing(
+                               &index->urcu_reader_status->active_readers)) {
                        if (wait_loops++ == RCU_QS_ACTIVE_ATTEMPTS) {
                                sched_yield();  /* ideally sched_yield_to() */
                        } else {
@@ -230,9 +233,10 @@ void wait_for_quiescent_state(void)
 #else /* #ifndef HAS_INCOHERENT_CACHES */
                /*
                 * BUSY-LOOP. Force the reader thread to commit its
-                * urcu_active_readers update to memory if we wait for too long.
+                * active_readers update to memory if we wait for too long.
                 */
-               while (rcu_old_gp_ongoing(index->urcu_active_readers)) {
+               while (rcu_old_gp_ongoing(
+                               &index->urcu_reader_status->active_readers)) {
                        switch (wait_loops++) {
                        case RCU_QS_ACTIVE_ATTEMPTS:
                                sched_yield();  /* ideally sched_yield_to() */
@@ -246,6 +250,7 @@ void wait_for_quiescent_state(void)
                        }
                }
 #endif /* #else #ifndef HAS_INCOHERENT_CACHES */
+               index->urcu_reader_status->gp_waiting = 0;
        }
 }
 
@@ -397,7 +402,7 @@ static void rcu_add_reader(pthread_t id)
        }
        registry[num_readers].tid = id;
        /* reference to the TLS of _this_ reader thread. */
-       registry[num_readers].urcu_active_readers = &urcu_active_readers;
+       registry[num_readers].urcu_reader_status = &urcu_reader_status;
        registry[num_readers].need_mb = &need_mb;
        num_readers++;
 }
@@ -416,7 +421,7 @@ static void rcu_remove_reader(pthread_t id)
                        memcpy(index, &registry[num_readers - 1],
                                sizeof(struct reader_registry));
                        registry[num_readers - 1].tid = 0;
-                       registry[num_readers - 1].urcu_active_readers = NULL;
+                       registry[num_readers - 1].urcu_reader_status = NULL;
                        num_readers--;
                        return;
                }
This page took 0.038805 seconds and 4 git commands to generate.