QSBR: Implement 2-phase grace period for 32-bit arch
authorMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Fri, 18 Sep 2009 13:05:03 +0000 (09:05 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Fri, 18 Sep 2009 13:05:03 +0000 (09:05 -0400)
Ensures we never run in overflow on 32-bit arch.

Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
urcu-qsbr-static.h
urcu-qsbr.c

index 57f0c50a8b899b2db0fc77084b052beb534d9d5e..22a93b2fd7a823d21c76cbbd1ad5c6bf3b0495e4 100644 (file)
@@ -173,6 +173,17 @@ extern unsigned long urcu_gp_ctr;
 
 extern unsigned long __thread rcu_reader_qs_gp;
 
+#if (BITS_PER_LONG < 64)
+static inline int rcu_gp_ongoing(unsigned long *value)
+{
+       unsigned long reader_gp;
+
+       if (value == NULL)
+               return 0;
+       reader_gp = LOAD_SHARED(*value);
+       return reader_gp && ((reader_gp ^ urcu_gp_ctr) & RCU_GP_CTR);
+}
+#else /* !(BITS_PER_LONG < 64) */
 static inline int rcu_gp_ongoing(unsigned long *value)
 {
        unsigned long reader_gp;
@@ -182,6 +193,7 @@ static inline int rcu_gp_ongoing(unsigned long *value)
        reader_gp = LOAD_SHARED(*value);
        return reader_gp && (reader_gp - urcu_gp_ctr > ULONG_MAX / 2);
 }
+#endif  /* !(BITS_PER_LONG < 64) */
 
 static inline void _rcu_read_lock(void)
 {
index ba6e02af27f3a18b60f26d56779bb5d4f869524e..a86f6e94d5ae13eb24e5d34610a3d9c5954373b4 100644 (file)
@@ -99,13 +99,6 @@ static void internal_urcu_unlock(void)
        }
 }
 
-#ifdef HAS_INCOHERENT_CACHES
-static void force_mb_single_thread(struct reader_registry *index)
-{
-       smp_mb();
-}
-#endif /* #ifdef HAS_INCOHERENT_CACHES */
-
 static void wait_for_quiescent_state(void)
 {
        struct reader_registry *index;
@@ -120,23 +113,82 @@ static void wait_for_quiescent_state(void)
                while (rcu_gp_ongoing(index->rcu_reader_qs_gp))
                        cpu_relax();
 #else /* #ifndef HAS_INCOHERENT_CACHES */
-               int wait_loops = 0;
-               /*
-                * BUSY-LOOP. Force the reader thread to commit its
-                * rcu_reader_qs_gp update to memory if we wait for too long.
-                */
-               while (rcu_gp_ongoing(index->rcu_reader_qs_gp)) {
-                       if (wait_loops++ == KICK_READER_LOOPS) {
-                               force_mb_single_thread(index);
-                               wait_loops = 0;
-                       } else {
-                               cpu_relax();
-                       }
-               }
+               while (rcu_gp_ongoing(index->rcu_reader_qs_gp))
+                       smp_mb();
 #endif /* #else #ifndef HAS_INCOHERENT_CACHES */
        }
 }
 
+/*
+ * Using a two-subphases algorithm for architectures with smaller than 64-bit
+ * long-size to ensure we do not encounter an overflow bug.
+ */
+
+#if (BITS_PER_LONG < 64)
+/*
+ * called with urcu_mutex held.
+ */
+static void switch_next_urcu_qparity(void)
+{
+       STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr ^ RCU_GP_CTR);
+}
+
+void synchronize_rcu(void)
+{
+       /* All threads should read qparity before accessing data structure
+        * where new ptr points to.
+        */
+       /* Write new ptr before changing the qparity */
+       smp_mb();
+
+       internal_urcu_lock();
+
+       switch_next_urcu_qparity();     /* 0 -> 1 */
+
+       /*
+        * Must commit qparity update to memory before waiting for parity
+        * 0 quiescent state. Failure to do so could result in the writer
+        * waiting forever while new readers are always accessing data (no
+        * progress).
+        * Ensured by STORE_SHARED and LOAD_SHARED.
+        */
+
+       /*
+        * Wait for previous parity to be empty of readers.
+        */
+       wait_for_quiescent_state();     /* Wait readers in parity 0 */
+
+       /*
+        * Must finish waiting for quiescent state for parity 0 before
+        * committing qparity update to memory. Failure to do so could result in
+        * the writer waiting forever while new readers are always accessing
+        * data (no progress).
+        * Ensured by STORE_SHARED and LOAD_SHARED.
+        */
+
+       switch_next_urcu_qparity();     /* 1 -> 0 */
+
+       /*
+        * Must commit qparity update to memory before waiting for parity
+        * 1 quiescent state. Failure to do so could result in the writer
+        * waiting forever while new readers are always accessing data (no
+        * progress).
+        * Ensured by STORE_SHARED and LOAD_SHARED.
+        */
+
+       /*
+        * Wait for previous parity to be empty of readers.
+        */
+       wait_for_quiescent_state();     /* Wait readers in parity 1 */
+
+       internal_urcu_unlock();
+
+       /* Finish waiting for reader threads before letting the old ptr being
+        * freed.
+        */
+       smp_mb();
+}
+#else /* !(BITS_PER_LONG < 64) */
 void synchronize_rcu(void)
 {
        unsigned long was_online;
@@ -161,6 +213,7 @@ void synchronize_rcu(void)
                _STORE_SHARED(rcu_reader_qs_gp, LOAD_SHARED(urcu_gp_ctr));
        smp_mb();
 }
+#endif  /* !(BITS_PER_LONG < 64) */
 
 /*
  * library wrappers to be used by non-LGPL compatible source code.
This page took 0.035914 seconds and 4 git commands to generate.