LOAD_SHARED and STORE_SHARED should have CMM_ prefix
authorMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Fri, 19 Nov 2010 02:44:59 +0000 (21:44 -0500)
committerMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Fri, 19 Nov 2010 02:44:59 +0000 (21:44 -0500)
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
14 files changed:
urcu-bp-static.h
urcu-bp.c
urcu-defer.c
urcu-pointer-static.h
urcu-qsbr-static.h
urcu-qsbr.c
urcu-static.h
urcu.c
urcu/compiler.h
urcu/system.h
urcu/uatomic_arch_x86.h
urcu/uatomic_generic.h
urcu/wfqueue-static.h
urcu/wfstack-static.h

index 2049ee6dd06311d133a1f6e805fe2ad39d616abe..14c6cfecac3af01a3bc4680c36381736f37a30f7 100644 (file)
@@ -162,7 +162,7 @@ static inline int rcu_old_gp_ongoing(long *value)
         * Make sure both tests below are done on the same version of *value
         * to insure consistency.
         */
-       v = CAA_LOAD_SHARED(*value);
+       v = CMM_LOAD_SHARED(*value);
        return (v & RCU_GP_CTR_NEST_MASK) &&
                 ((v ^ rcu_gp_ctr) & RCU_GP_CTR_PHASE);
 }
@@ -182,14 +182,14 @@ static inline void _rcu_read_lock(void)
         *   RCU_GP_COUNT | (~RCU_GP_CTR_PHASE or RCU_GP_CTR_PHASE)
         */
        if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) {
-               _CAA_STORE_SHARED(rcu_reader->ctr, _CAA_LOAD_SHARED(rcu_gp_ctr));
+               _CMM_STORE_SHARED(rcu_reader->ctr, _CMM_LOAD_SHARED(rcu_gp_ctr));
                /*
                 * Set active readers count for outermost nesting level before
                 * accessing the pointer.
                 */
                cmm_smp_mb();
        } else {
-               _CAA_STORE_SHARED(rcu_reader->ctr, tmp + RCU_GP_COUNT);
+               _CMM_STORE_SHARED(rcu_reader->ctr, tmp + RCU_GP_COUNT);
        }
 }
 
@@ -199,7 +199,7 @@ static inline void _rcu_read_unlock(void)
         * Finish using rcu before decrementing the pointer.
         */
        cmm_smp_mb();
-       _CAA_STORE_SHARED(rcu_reader->ctr, rcu_reader->ctr - RCU_GP_COUNT);
+       _CMM_STORE_SHARED(rcu_reader->ctr, rcu_reader->ctr - RCU_GP_COUNT);
        cmm_barrier();  /* Ensure the compiler does not reorder us with mutex */
 }
 
index 136f97d3cf6746eef6d39a9f2a22ca1139a342fc..62d270412823062ac175aaeb4f412b1518c34c8f 100644 (file)
--- a/urcu-bp.c
+++ b/urcu-bp.c
@@ -123,13 +123,13 @@ void update_counter_and_wait(void)
        struct rcu_reader *index, *tmp;
 
        /* Switch parity: 0 -> 1, 1 -> 0 */
-       CAA_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE);
+       CMM_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE);
 
        /*
         * Must commit qparity update to memory before waiting for other parity
         * quiescent state. Failure to do so could result in the writer waiting
         * forever while new readers are always accessing data (no progress).
-        * Ensured by CAA_STORE_SHARED and CAA_LOAD_SHARED.
+        * Ensured by CMM_STORE_SHARED and CMM_LOAD_SHARED.
         */
 
        /*
index 796e7e10088ce3ab5063b6a14ef0b72d24d57fa3..28bb18ccc2f33f607f1d9b4dfc9c4c4da4d0eeef 100644 (file)
@@ -110,7 +110,7 @@ static unsigned long rcu_defer_num_callbacks(void)
 
        mutex_lock(&rcu_defer_mutex);
        cds_list_for_each_entry(index, &registry, list) {
-               head = CAA_LOAD_SHARED(index->head);
+               head = CMM_LOAD_SHARED(index->head);
                num_items += head - index->tail;
        }
        mutex_unlock(&rcu_defer_mutex);
@@ -153,21 +153,21 @@ static void rcu_defer_barrier_queue(struct defer_queue *queue,
 
        for (i = queue->tail; i != head;) {
                cmm_smp_rmb();       /* read head before q[]. */
-               p = CAA_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
+               p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
                if (unlikely(DQ_IS_FCT_BIT(p))) {
                        DQ_CLEAR_FCT_BIT(p);
                        queue->last_fct_out = p;
-                       p = CAA_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
+                       p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
                } else if (unlikely(p == DQ_FCT_MARK)) {
-                       p = CAA_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
+                       p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
                        queue->last_fct_out = p;
-                       p = CAA_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
+                       p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
                }
                fct = queue->last_fct_out;
                fct(p);
        }
        cmm_smp_mb();   /* push tail after having used q[] */
-       CAA_STORE_SHARED(queue->tail, i);
+       CMM_STORE_SHARED(queue->tail, i);
 }
 
 static void _rcu_defer_barrier_thread(void)
@@ -212,7 +212,7 @@ void rcu_defer_barrier(void)
 
        mutex_lock(&rcu_defer_mutex);
        cds_list_for_each_entry(index, &registry, list) {
-               index->last_head = CAA_LOAD_SHARED(index->head);
+               index->last_head = CMM_LOAD_SHARED(index->head);
                num_items += index->last_head - index->tail;
        }
        if (likely(!num_items)) {
@@ -241,7 +241,7 @@ void _defer_rcu(void (*fct)(void *p), void *p)
         * thread.
         */
        head = defer_queue.head;
-       tail = CAA_LOAD_SHARED(defer_queue.tail);
+       tail = CMM_LOAD_SHARED(defer_queue.tail);
 
        /*
         * If queue is full, or reached threshold. Empty queue ourself.
@@ -250,7 +250,7 @@ void _defer_rcu(void (*fct)(void *p), void *p)
        if (unlikely(head - tail >= DEFER_QUEUE_SIZE - 2)) {
                assert(head - tail <= DEFER_QUEUE_SIZE);
                rcu_defer_barrier_thread();
-               assert(head - CAA_LOAD_SHARED(defer_queue.tail) == 0);
+               assert(head - CMM_LOAD_SHARED(defer_queue.tail) == 0);
        }
 
        if (unlikely(defer_queue.last_fct_in != fct)) {
@@ -261,13 +261,13 @@ void _defer_rcu(void (*fct)(void *p), void *p)
                         * marker, write DQ_FCT_MARK followed by the function
                         * pointer.
                         */
-                       _CAA_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
+                       _CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
                                      DQ_FCT_MARK);
-                       _CAA_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
+                       _CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
                                      fct);
                } else {
                        DQ_SET_FCT_BIT(fct);
-                       _CAA_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
+                       _CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
                                      fct);
                }
        } else {
@@ -276,16 +276,16 @@ void _defer_rcu(void (*fct)(void *p), void *p)
                         * If the data to encode is not aligned or the marker,
                         * write DQ_FCT_MARK followed by the function pointer.
                         */
-                       _CAA_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
+                       _CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
                                      DQ_FCT_MARK);
-                       _CAA_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
+                       _CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
                                      fct);
                }
        }
-       _CAA_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], p);
+       _CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], p);
        cmm_smp_wmb();  /* Publish new pointer before head */
                        /* Write q[] before head. */
-       CAA_STORE_SHARED(defer_queue.head, head);
+       CMM_STORE_SHARED(defer_queue.head, head);
        cmm_smp_mb();   /* Write queue head before read futex */
        /*
         * Wake-up any waiting defer thread.
index 5a1e0e4d12957fe984e6050aeabb06427425e264..b6444860f51143dca1bb3d5eb34e4bbd4a6ce1cd 100644 (file)
@@ -49,7 +49,7 @@ extern "C" {
  * Inserts memory barriers on architectures that require them (currently only
  * Alpha) and documents which pointers are protected by RCU.
  *
- * The compiler memory barrier in CAA_LOAD_SHARED() ensures that value-speculative
+ * The compiler memory barrier in CMM_LOAD_SHARED() ensures that value-speculative
  * optimizations (e.g. VSS: Value Speculation Scheduling) does not perform the
  * data read before the pointer read by speculating the value of the pointer.
  * Correct ordering is ensured because the pointer is read as a volatile access.
@@ -62,7 +62,7 @@ extern "C" {
  */
 
 #define _rcu_dereference(p)     ({                                     \
-                               typeof(p) _________p1 = CAA_LOAD_SHARED(p); \
+                               typeof(p) _________p1 = CMM_LOAD_SHARED(p); \
                                cmm_smp_read_barrier_depends();         \
                                (_________p1);                          \
                                })
index da4a782b61d17f123e7060aaa08a17d9bd59900b..e0b12be15c2ebdf540446dbbaa16cdce8ff5f4fe 100644 (file)
@@ -159,7 +159,7 @@ static inline int rcu_gp_ongoing(unsigned long *ctr)
 {
        unsigned long v;
 
-       v = CAA_LOAD_SHARED(*ctr);
+       v = CMM_LOAD_SHARED(*ctr);
        return v && (v != rcu_gp_ctr);
 }
 
@@ -175,7 +175,7 @@ static inline void _rcu_read_unlock(void)
 static inline void _rcu_quiescent_state(void)
 {
        cmm_smp_mb();
-       _CAA_STORE_SHARED(rcu_reader.ctr, _CAA_LOAD_SHARED(rcu_gp_ctr));
+       _CMM_STORE_SHARED(rcu_reader.ctr, _CMM_LOAD_SHARED(rcu_gp_ctr));
        cmm_smp_mb();   /* write rcu_reader.ctr before read futex */
        wake_up_gp();
        cmm_smp_mb();
@@ -184,7 +184,7 @@ static inline void _rcu_quiescent_state(void)
 static inline void _rcu_thread_offline(void)
 {
        cmm_smp_mb();
-       CAA_STORE_SHARED(rcu_reader.ctr, 0);
+       CMM_STORE_SHARED(rcu_reader.ctr, 0);
        cmm_smp_mb();   /* write rcu_reader.ctr before read futex */
        wake_up_gp();
        cmm_barrier();  /* Ensure the compiler does not reorder us with mutex */
@@ -193,7 +193,7 @@ static inline void _rcu_thread_offline(void)
 static inline void _rcu_thread_online(void)
 {
        cmm_barrier();  /* Ensure the compiler does not reorder us with mutex */
-       _CAA_STORE_SHARED(rcu_reader.ctr, CAA_LOAD_SHARED(rcu_gp_ctr));
+       _CMM_STORE_SHARED(rcu_reader.ctr, CMM_LOAD_SHARED(rcu_gp_ctr));
        cmm_smp_mb();
 }
 
index 607e6aee1f44adbb48d8f569e2be26b414d6c93c..69effd5ad8b52e3be4ee263ede2cafe81816f38b 100644 (file)
@@ -114,10 +114,10 @@ static void update_counter_and_wait(void)
 
 #if (CAA_BITS_PER_LONG < 64)
        /* Switch parity: 0 -> 1, 1 -> 0 */
-       CAA_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
+       CMM_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
 #else  /* !(CAA_BITS_PER_LONG < 64) */
        /* Increment current G.P. */
-       CAA_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR);
+       CMM_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR);
 #endif /* !(CAA_BITS_PER_LONG < 64) */
 
        /*
@@ -198,7 +198,7 @@ void synchronize_rcu(void)
         * threads registered as readers.
         */
        if (was_online)
-               CAA_STORE_SHARED(rcu_reader.ctr, 0);
+               CMM_STORE_SHARED(rcu_reader.ctr, 0);
 
        mutex_lock(&rcu_gp_lock);
 
@@ -238,7 +238,7 @@ out:
         * freed.
         */
        if (was_online)
-               _CAA_STORE_SHARED(rcu_reader.ctr, CAA_LOAD_SHARED(rcu_gp_ctr));
+               _CMM_STORE_SHARED(rcu_reader.ctr, CMM_LOAD_SHARED(rcu_gp_ctr));
        cmm_smp_mb();
 }
 #else /* !(CAA_BITS_PER_LONG < 64) */
@@ -255,7 +255,7 @@ void synchronize_rcu(void)
         */
        cmm_smp_mb();
        if (was_online)
-               CAA_STORE_SHARED(rcu_reader.ctr, 0);
+               CMM_STORE_SHARED(rcu_reader.ctr, 0);
 
        mutex_lock(&rcu_gp_lock);
        if (cds_list_empty(&registry))
@@ -265,7 +265,7 @@ out:
        mutex_unlock(&rcu_gp_lock);
 
        if (was_online)
-               _CAA_STORE_SHARED(rcu_reader.ctr, CAA_LOAD_SHARED(rcu_gp_ctr));
+               _CMM_STORE_SHARED(rcu_reader.ctr, CMM_LOAD_SHARED(rcu_gp_ctr));
        cmm_smp_mb();
 }
 #endif  /* !(CAA_BITS_PER_LONG < 64) */
index 58dfabbdab7c85a944d77b4569e87322a70c556e..18e4826a5a31789c7cb408832421cbddb7d83c22 100644 (file)
@@ -250,7 +250,7 @@ static inline int rcu_gp_ongoing(unsigned long *ctr)
         * Make sure both tests below are done on the same version of *value
         * to insure consistency.
         */
-       v = CAA_LOAD_SHARED(*ctr);
+       v = CMM_LOAD_SHARED(*ctr);
        return (v & RCU_GP_CTR_NEST_MASK) &&
                 ((v ^ rcu_gp_ctr) & RCU_GP_CTR_PHASE);
 }
@@ -266,14 +266,14 @@ static inline void _rcu_read_lock(void)
         *   RCU_GP_COUNT | (~RCU_GP_CTR_PHASE or RCU_GP_CTR_PHASE)
         */
        if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) {
-               _CAA_STORE_SHARED(rcu_reader.ctr, _CAA_LOAD_SHARED(rcu_gp_ctr));
+               _CMM_STORE_SHARED(rcu_reader.ctr, _CMM_LOAD_SHARED(rcu_gp_ctr));
                /*
                 * Set active readers count for outermost nesting level before
                 * accessing the pointer. See smp_mb_master().
                 */
                smp_mb_slave(RCU_MB_GROUP);
        } else {
-               _CAA_STORE_SHARED(rcu_reader.ctr, tmp + RCU_GP_COUNT);
+               _CMM_STORE_SHARED(rcu_reader.ctr, tmp + RCU_GP_COUNT);
        }
 }
 
@@ -288,12 +288,12 @@ static inline void _rcu_read_unlock(void)
         */
        if (likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) {
                smp_mb_slave(RCU_MB_GROUP);
-               _CAA_STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT);
+               _CMM_STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT);
                /* write rcu_reader.ctr before read futex */
                smp_mb_slave(RCU_MB_GROUP);
                wake_up_gp();
        } else {
-               _CAA_STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT);
+               _CMM_STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT);
        }
        cmm_barrier();  /* Ensure the compiler does not reorder us with mutex */
 }
diff --git a/urcu.c b/urcu.c
index 5a9c2f0ae761d9a4d7cd97ed7af4bfc42998edb7..e529ac0d5168447cb690162217b7118990996cc1 100644 (file)
--- a/urcu.c
+++ b/urcu.c
@@ -99,9 +99,9 @@ static void mutex_lock(pthread_mutex_t *mutex)
                        perror("Error in pthread mutex lock");
                        exit(-1);
                }
-               if (CAA_LOAD_SHARED(rcu_reader.need_mb)) {
+               if (CMM_LOAD_SHARED(rcu_reader.need_mb)) {
                        cmm_smp_mb();
-                       _CAA_STORE_SHARED(rcu_reader.need_mb, 0);
+                       _CMM_STORE_SHARED(rcu_reader.need_mb, 0);
                        cmm_smp_mb();
                }
                poll(NULL,0,10);
@@ -155,7 +155,7 @@ static void force_mb_all_readers(void)
         * cache flush is enforced.
         */
        cds_list_for_each_entry(index, &registry, node) {
-               CAA_STORE_SHARED(index->need_mb, 1);
+               CMM_STORE_SHARED(index->need_mb, 1);
                pthread_kill(index->tid, SIGRCU);
        }
        /*
@@ -172,7 +172,7 @@ static void force_mb_all_readers(void)
         * the Linux Test Project (LTP).
         */
        cds_list_for_each_entry(index, &registry, node) {
-               while (CAA_LOAD_SHARED(index->need_mb)) {
+               while (CMM_LOAD_SHARED(index->need_mb)) {
                        pthread_kill(index->tid, SIGRCU);
                        poll(NULL, 0, 1);
                }
@@ -205,7 +205,7 @@ void update_counter_and_wait(void)
        struct rcu_reader *index, *tmp;
 
        /* Switch parity: 0 -> 1, 1 -> 0 */
-       CAA_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE);
+       CMM_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE);
 
        /*
         * Must commit rcu_gp_ctr update to memory before waiting for quiescent
@@ -384,7 +384,7 @@ static void sigrcu_handler(int signo, siginfo_t *siginfo, void *context)
         * executed on.
         */
        cmm_smp_mb();
-       _CAA_STORE_SHARED(rcu_reader.need_mb, 0);
+       _CMM_STORE_SHARED(rcu_reader.need_mb, 0);
        cmm_smp_mb();
 }
 
index ca32debce01d47a49614bd281ab495355949f4aa..64d12d344a26a1fdca56fd0b1201437241b9967a 100644 (file)
 /*
  * Instruct the compiler to perform only a single access to a variable
  * (prohibits merging and refetching). The compiler is also forbidden to reorder
- * successive instances of CAA_ACCESS_ONCE(), but only when the compiler is aware of
+ * successive instances of CMM_ACCESS_ONCE(), but only when the compiler is aware of
  * particular ordering. Compiler ordering can be ensured, for example, by
- * putting two CAA_ACCESS_ONCE() in separate C statements.
+ * putting two CMM_ACCESS_ONCE() in separate C statements.
  *
  * This macro does absolutely -nothing- to prevent the CPU from reordering,
  * merging, or refetching absolutely anything at any time.  Its main intended
  * use is to mediate communication between process-level code and irq/NMI
  * handlers, all running on the same CPU.
  */
-#define CAA_ACCESS_ONCE(x)     (*(volatile typeof(x) *)&(x))
+#define CMM_ACCESS_ONCE(x)     (*(volatile typeof(x) *)&(x))
 
 #ifndef max
 #define max(a,b) ((a)>(b)?(a):(b))
index e0186066db0975930f4d778c1e45de46246deb5e..86ee6b197997017577bdf413af01b5bb2f9e7ddb 100644 (file)
 /*
  * Identify a shared load. A cmm_smp_rmc() or cmm_smp_mc() should come before the load.
  */
-#define _CAA_LOAD_SHARED(p)           CAA_ACCESS_ONCE(p)
+#define _CMM_LOAD_SHARED(p)           CAA_ACCESS_ONCE(p)
 
 /*
  * Load a data from shared memory, doing a cache flush if required.
  */
-#define CAA_LOAD_SHARED(p)                     \
+#define CMM_LOAD_SHARED(p)                     \
        ({                              \
                cmm_smp_rmc();          \
-               _CAA_LOAD_SHARED(p);    \
+               _CMM_LOAD_SHARED(p);    \
        })
 
 /*
  * Identify a shared store. A cmm_smp_wmc() or cmm_smp_mc() should follow the store.
  */
-#define _CAA_STORE_SHARED(x, v)        ({ CAA_ACCESS_ONCE(x) = (v); })
+#define _CMM_STORE_SHARED(x, v)        ({ CAA_ACCESS_ONCE(x) = (v); })
 
 /*
  * Store v into x, where x is located in shared memory. Performs the required
  * cache flush after writing. Returns v.
  */
-#define CAA_STORE_SHARED(x, v)         \
+#define CMM_STORE_SHARED(x, v)         \
        ({                              \
-               typeof(x) _v = _CAA_STORE_SHARED(x, v); \
+               typeof(x) _v = _CMM_STORE_SHARED(x, v); \
                cmm_smp_wmc();          \
                _v;                     \
        })
index ceb7a179ba5eff5d042257be439a42a2e464ad39..aed513b67b4146f8f50bf52f133de0079611d97c 100644 (file)
@@ -39,7 +39,7 @@ struct __uatomic_dummy {
 };
 #define __hp(x)        ((struct __uatomic_dummy *)(x))
 
-#define _uatomic_set(addr, v)  CAA_STORE_SHARED(*(addr), (v))
+#define _uatomic_set(addr, v)  CMM_STORE_SHARED(*(addr), (v))
 
 /* cmpxchg */
 
index ced61913c5b0552a1da7580333347902ce0ba6b0..347e73f849bc9a354c7c023ece400b7837ff1048 100644 (file)
@@ -29,11 +29,11 @@ extern "C" {
 #endif
 
 #ifndef uatomic_set
-#define uatomic_set(addr, v)   CAA_STORE_SHARED(*(addr), (v))
+#define uatomic_set(addr, v)   CMM_STORE_SHARED(*(addr), (v))
 #endif
 
 #ifndef uatomic_read
-#define uatomic_read(addr)     CAA_LOAD_SHARED(*(addr))
+#define uatomic_read(addr)     CMM_LOAD_SHARED(*(addr))
 #endif
 
 #if !defined __OPTIMIZE__  || defined UATOMIC_NO_LINK_ERROR
index 4839c47a32d43030f4f39bad27b7b888c158a544..30d6e963f321f9c7f836e8ae2f9a0b2d5a7e857d 100644 (file)
@@ -79,7 +79,7 @@ void _cds_wfq_enqueue(struct cds_wfq_queue *q, struct cds_wfq_node *node)
         * that the queue is being appended to. The following store will append
         * "node" to the queue from a dequeuer perspective.
         */
-       CAA_STORE_SHARED(*old_tail, node);
+       CMM_STORE_SHARED(*old_tail, node);
 }
 
 /*
@@ -99,14 +99,14 @@ ___cds_wfq_dequeue_blocking(struct cds_wfq_queue *q)
        /*
         * Queue is empty if it only contains the dummy node.
         */
-       if (q->head == &q->dummy && CAA_LOAD_SHARED(q->tail) == &q->dummy.next)
+       if (q->head == &q->dummy && CMM_LOAD_SHARED(q->tail) == &q->dummy.next)
                return NULL;
        node = q->head;
 
        /*
         * Adaptative busy-looping waiting for enqueuer to complete enqueue.
         */
-       while ((next = CAA_LOAD_SHARED(node->next)) == NULL) {
+       while ((next = CMM_LOAD_SHARED(node->next)) == NULL) {
                if (++attempt >= WFQ_ADAPT_ATTEMPTS) {
                        poll(NULL, 0, WFQ_WAIT);        /* Wait for 10ms */
                        attempt = 0;
index cecdde1c6551b42e65ac6d2543755dae08c7ed5a..eed83da334a173fa4e3a0d062c89463473ebfb94 100644 (file)
@@ -67,7 +67,7 @@ void _cds_wfs_push(struct cds_wfs_stack *s, struct cds_wfs_node *node)
         * At this point, dequeuers see a NULL node->next, they should busy-wait
         * until node->next is set to old_head.
         */
-       CAA_STORE_SHARED(node->next, old_head);
+       CMM_STORE_SHARED(node->next, old_head);
 }
 
 /*
@@ -80,13 +80,13 @@ ___cds_wfs_pop_blocking(struct cds_wfs_stack *s)
        int attempt = 0;
 
 retry:
-       head = CAA_LOAD_SHARED(s->head);
+       head = CMM_LOAD_SHARED(s->head);
        if (head == CDS_WF_STACK_END)
                return NULL;
        /*
         * Adaptative busy-looping waiting for push to complete.
         */
-       while ((next = CAA_LOAD_SHARED(head->next)) == NULL) {
+       while ((next = CMM_LOAD_SHARED(head->next)) == NULL) {
                if (++attempt >= CDS_WFS_ADAPT_ATTEMPTS) {
                        poll(NULL, 0, CDS_WFS_WAIT);    /* Wait for 10ms */
                        attempt = 0;
This page took 0.037636 seconds and 4 git commands to generate.