Add _STORE_SHARED() and _LOAD_SHARED()
authorMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Fri, 13 Feb 2009 15:37:58 +0000 (10:37 -0500)
committerMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Fri, 13 Feb 2009 15:37:58 +0000 (10:37 -0500)
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
urcu.c
urcu.h

diff --git a/urcu.c b/urcu.c
index 8cb32a117bb065405ca3f45e0224dfe8fbebbf5d..5e2d2afff5641c57a80548c1f78469c52ccf7b09 100644 (file)
--- a/urcu.c
+++ b/urcu.c
@@ -23,9 +23,14 @@ pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER;
  * Global grace period counter.
  * Contains the current RCU_GP_CTR_BIT.
  * Also has a RCU_GP_CTR_BIT of 1, to accelerate the reader fast path.
+ * Written to only by writer with mutex taken. Read by both writer and readers.
  */
 long urcu_gp_ctr = RCU_GP_COUNT;
 
+/*
+ * Written to only by each individual reader. Read by both the reader and the
+ * writers.
+ */
 long __thread urcu_active_readers;
 
 /* Thread IDs of registered readers */
@@ -73,7 +78,7 @@ void internal_urcu_unlock(void)
  */
 static void switch_next_urcu_qparity(void)
 {
-       urcu_gp_ctr ^= RCU_GP_CTR_BIT;
+       STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr ^ RCU_GP_CTR_BIT);
 }
 
 #ifdef DEBUG_FULL_MB
@@ -183,8 +188,8 @@ void synchronize_rcu(void)
         * 0 quiescent state. Failure to do so could result in the writer
         * waiting forever while new readers are always accessing data (no
         * progress).
+        * Ensured by STORE_SHARED and LOAD_SHARED.
         */
-       smp_mc();
 
        /*
         * Wait for previous parity to be empty of readers.
@@ -196,8 +201,8 @@ void synchronize_rcu(void)
         * committing qparity update to memory. Failure to do so could result in
         * the writer waiting forever while new readers are always accessing
         * data (no progress).
+        * Ensured by STORE_SHARED and LOAD_SHARED.
         */
-       smp_mc();
 
        switch_next_urcu_qparity();     /* 1 -> 0 */
 
@@ -206,8 +211,8 @@ void synchronize_rcu(void)
         * 1 quiescent state. Failure to do so could result in the writer
         * waiting forever while new readers are always accessing data (no
         * progress).
+        * Ensured by STORE_SHARED and LOAD_SHARED.
         */
-       smp_mc();
 
        /*
         * Wait for previous parity to be empty of readers.
diff --git a/urcu.h b/urcu.h
index df270326675dafdf78a237ac72bceebc6b673274..539b1d5588adc64c40bf5affdcef1fca6825f4b1 100644 (file)
--- a/urcu.h
+++ b/urcu.h
@@ -179,14 +179,28 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
  */
 #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
 
+/*
+ * Identify a shared load. A smp_rmc() or smp_mc() should come before the load.
+ */
+#define _LOAD_SHARED(p)               ACCESS_ONCE(p)
+
 /*
  * Load a data from shared memory, doing a cache flush if required.
  */
-#define LOAD_SHARED(p)        ({ \
-                               smp_rmc(); \
-                               typeof(p) _________p1 = ACCESS_ONCE(p); \
-                               (_________p1); \
-                               })
+#define LOAD_SHARED(p) \
+       ({ \
+               smp_rmc(); \
+               _LOAD_SHARED(p); \
+       })
+
+
+/*
+ * Identify a shared store. A smp_wmc() or smp_mc() should follow the store.
+ */
+#define _STORE_SHARED(x, v) \
+       do { \
+               (x) = (v); \
+       } while (0)
 
 /*
  * Store v into x, where x is located in shared memory. Performs the required
@@ -194,8 +208,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
  */
 #define STORE_SHARED(x, v) \
        do { \
-               (x) = (v); \
-               smp_wmc; \
+               _STORE_SHARED(x, v); \
+               smp_wmc(); \
        } while (0)
 
 /**
@@ -214,8 +228,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
                                (_________p1); \
                                })
 
-
-
 #define SIGURCU SIGUSR1
 
 /*
@@ -331,15 +343,15 @@ static inline void rcu_read_lock(void)
        /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */
        /*
         * The data dependency "read urcu_gp_ctr, write urcu_active_readers",
-        * serializes those two memory operations. We are not using STORE_SHARED
-        * and LOAD_SHARED here (although we should) because the writer will
-        * wake us up with a signal which does a flush in its handler to perform
-        * urcu_gp_ctr re-read and urcu_active_readers commit to main memory.
+        * serializes those two memory operations. The memory barrier in the
+        * signal handler ensures we receive the proper memory commit barriers
+        * required by _STORE_SHARED and _LOAD_SHARED whenever communication
+        * with the writer is needed.
         */
        if (likely(!(tmp & RCU_GP_CTR_NEST_MASK)))
-               urcu_active_readers = ACCESS_ONCE(urcu_gp_ctr);
+               _STORE_SHARED(urcu_active_readers, _LOAD_SHARED(urcu_gp_ctr));
        else
-               urcu_active_readers = tmp + RCU_GP_COUNT;
+               _STORE_SHARED(urcu_active_readers, tmp + RCU_GP_COUNT);
        /*
         * Increment active readers count before accessing the pointer.
         * See force_mb_all_threads().
@@ -354,7 +366,7 @@ static inline void rcu_read_unlock(void)
         * Finish using rcu before decrementing the pointer.
         * See force_mb_all_threads().
         */
-       urcu_active_readers -= RCU_GP_COUNT;
+       _STORE_SHARED(urcu_active_readers, urcu_active_readers - RCU_GP_COUNT);
 }
 
 /**
@@ -375,8 +387,7 @@ static inline void rcu_read_unlock(void)
                if (!__builtin_constant_p(v) || \
                    ((v) != NULL)) \
                        wmb(); \
-               (p) = (v); \
-               smp_wmc(); \
+               STORE_SHARED(p, v); \
        })
 
 #define rcu_xchg_pointer(p, v) \
This page took 0.028638 seconds and 4 git commands to generate.