URCU: rename light/heavy barriers to slave/master
authorMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Sat, 30 Jan 2010 18:28:35 +0000 (13:28 -0500)
committerMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Sat, 30 Jan 2010 18:28:35 +0000 (13:28 -0500)
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
urcu-static.h
urcu.c

index 5a75dde18630a84908f82b3096cb166a875b8ae6..f2c452a64edd4c2aa781e5dd867276f8e2f46525 100644 (file)
@@ -160,10 +160,25 @@ static inline void debug_yield_init(void)
 }
 #endif
 
+/*
+ * RCU memory barrier broadcast group. Currently, only broadcast to all process
+ * threads is supported (group 0).
+ *
+ * Slave barriers are only guaranteed to be ordered wrt master barriers.
+ *
+ * The pair ordering is detailed as (O: ordered, X: not ordered) :
+ *               slave  master
+ *        slave    X      O
+ *        master   O      O
+ */
+
+#define MB_GROUP_ALL           0
+#define RCU_MB_GROUP           MB_GROUP_ALL
+
 #ifdef RCU_MEMBARRIER
 extern int has_sys_membarrier;
 
-static inline void smp_mb_light()
+static inline void smp_mb_slave(int group)
 {
        if (likely(has_sys_membarrier))
                barrier();
@@ -173,14 +188,14 @@ static inline void smp_mb_light()
 #endif
 
 #ifdef RCU_MB
-static inline void smp_mb_light()
+static inline void smp_mb_slave(int group)
 {
        smp_mb();
 }
 #endif
 
 #ifdef RCU_SIGNAL
-static inline void smp_mb_light()
+static inline void smp_mb_slave(int group)
 {
        barrier();
 }
@@ -255,9 +270,9 @@ static inline void _rcu_read_lock(void)
                _STORE_SHARED(rcu_reader.ctr, _LOAD_SHARED(rcu_gp_ctr));
                /*
                 * Set active readers count for outermost nesting level before
-                * accessing the pointer. See smp_mb_heavy().
+                * accessing the pointer. See smp_mb_master().
                 */
-               smp_mb_light();
+               smp_mb_slave(RCU_MB_GROUP);
        } else {
                _STORE_SHARED(rcu_reader.ctr, tmp + RCU_GP_COUNT);
        }
@@ -270,13 +285,13 @@ static inline void _rcu_read_unlock(void)
        tmp = rcu_reader.ctr;
        /*
         * Finish using rcu before decrementing the pointer.
-        * See smp_mb_heavy().
+        * See smp_mb_master().
         */
        if (likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) {
-               smp_mb_light();
+               smp_mb_slave(RCU_MB_GROUP);
                _STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT);
                /* write rcu_reader.ctr before read futex */
-               smp_mb_light();
+               smp_mb_slave(RCU_MB_GROUP);
                wake_up_gp();
        } else {
                _STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT);
diff --git a/urcu.c b/urcu.c
index ffdab3c2fc1eacbad2c4e73f5e89e83261fdb8d5..5e8c6127a8c2f6413d56e95f66eba8c827595c85 100644 (file)
--- a/urcu.c
+++ b/urcu.c
@@ -121,7 +121,7 @@ static void mutex_unlock(pthread_mutex_t *mutex)
 }
 
 #ifdef RCU_MEMBARRIER
-static void smp_mb_heavy(void)
+static void smp_mb_master(int group)
 {
        if (likely(has_sys_membarrier))
                membarrier(MEMBARRIER_EXPEDITED);
@@ -131,7 +131,7 @@ static void smp_mb_heavy(void)
 #endif
 
 #ifdef RCU_MB
-static void smp_mb_heavy(void)
+static void smp_mb_master(int group)
 {
        smp_mb();
 }
@@ -181,7 +181,7 @@ static void force_mb_all_readers(void)
        smp_mb();       /* read ->need_mb before ending the barrier */
 }
 
-static void smp_mb_heavy(void)
+static void smp_mb_master(int group)
 {
        force_mb_all_readers();
 }
@@ -193,7 +193,7 @@ static void smp_mb_heavy(void)
 static void wait_gp(void)
 {
        /* Read reader_gp before read futex */
-       smp_mb_heavy();
+       smp_mb_master(RCU_MB_GROUP);
        if (uatomic_read(&gp_futex) == -1)
                futex_async(&gp_futex, FUTEX_WAIT, -1,
                      NULL, NULL, 0);
@@ -230,7 +230,7 @@ void update_counter_and_wait(void)
                if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
                        uatomic_dec(&gp_futex);
                        /* Write futex before read reader_gp */
-                       smp_mb_heavy();
+                       smp_mb_master(RCU_MB_GROUP);
                }
 
                list_for_each_entry_safe(index, tmp, &registry, head) {
@@ -242,7 +242,7 @@ void update_counter_and_wait(void)
                if (list_empty(&registry)) {
                        if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
                                /* Read reader_gp before write futex */
-                               smp_mb_heavy();
+                               smp_mb_master(RCU_MB_GROUP);
                                uatomic_set(&gp_futex, 0);
                        }
                        break;
@@ -260,7 +260,7 @@ void update_counter_and_wait(void)
                if (list_empty(&registry)) {
                        if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
                                /* Read reader_gp before write futex */
-                               smp_mb_heavy();
+                               smp_mb_master(RCU_MB_GROUP);
                                uatomic_set(&gp_futex, 0);
                        }
                        break;
@@ -270,7 +270,7 @@ void update_counter_and_wait(void)
                                wait_gp();
                                break; /* only escape switch */
                        case KICK_READER_LOOPS:
-                               smp_mb_heavy();
+                               smp_mb_master(RCU_MB_GROUP);
                                wait_loops = 0;
                                break; /* only escape switch */
                        default:
@@ -294,7 +294,7 @@ void synchronize_rcu(void)
         * where new ptr points to. Must be done within rcu_gp_lock because it
         * iterates on reader threads.*/
        /* Write new ptr before changing the qparity */
-       smp_mb_heavy();
+       smp_mb_master(RCU_MB_GROUP);
 
        /*
         * Wait for previous parity to be empty of readers.
@@ -324,7 +324,7 @@ void synchronize_rcu(void)
        /* Finish waiting for reader threads before letting the old ptr being
         * freed. Must be done within rcu_gp_lock because it iterates on reader
         * threads. */
-       smp_mb_heavy();
+       smp_mb_master(RCU_MB_GROUP);
 out:
        mutex_unlock(&rcu_gp_lock);
 }
This page took 0.028643 seconds and 4 git commands to generate.