Rename all arch primitives with prefix caa_
authorDavid Goulet <david.goulet@polymtl.ca>
Thu, 18 Nov 2010 20:46:38 +0000 (15:46 -0500)
committerMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Thu, 18 Nov 2010 20:46:38 +0000 (15:46 -0500)
This is the second patch for reducing namespace pollution.
The caa_ prefix stands for Concurrent Architecture Abstraction.
Again, suggested by Mathieu Desnoyers and Paul E. Mckenney.

Every define, macro and function specific to the architecture
abstraction of liburcu is modified with that prefix

Signed-off-by: David Goulet <david.goulet@polymtl.ca>
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
43 files changed:
tests/api_gcc.h
tests/api_ppc.h
tests/api_x86.h
tests/rcutorture.h
tests/test_cycles_per_loop.c
tests/test_looplen.c
tests/test_mutex.c
tests/test_perthreadlock.c
tests/test_perthreadlock_timing.c
tests/test_qsbr.c
tests/test_qsbr_gc.c
tests/test_qsbr_timing.c
tests/test_rwlock.c
tests/test_rwlock_timing.c
tests/test_urcu.c
tests/test_urcu_assign.c
tests/test_urcu_bp.c
tests/test_urcu_defer.c
tests/test_urcu_gc.c
tests/test_urcu_lfq.c
tests/test_urcu_lfs.c
tests/test_urcu_timing.c
tests/test_urcu_wfq.c
tests/test_urcu_wfs.c
urcu-bp-static.h
urcu-bp.c
urcu-defer.c
urcu-pointer-static.h
urcu-qsbr-static.h
urcu-qsbr.c
urcu-static.h
urcu.c
urcu/arch_generic.h
urcu/arch_ppc.h
urcu/arch_s390.h
urcu/arch_sparc64.h
urcu/arch_x86.h
urcu/compiler.h
urcu/system.h
urcu/uatomic_arch_x86.h
urcu/uatomic_generic.h
urcu/wfqueue-static.h
urcu/wfstack-static.h

index b23110d7594325a70a2f2bffb2591112a2f0aa9e..632bdd5b4ce132132a08592a5b6c2a64dffc9aa5 100644 (file)
@@ -73,7 +73,7 @@
  * Machine parameters.
  */
 
-/* #define CACHE_LINE_SIZE 64 */
+/* #define CAA_CACHE_LINE_SIZE 64 */
 #define ____cacheline_internodealigned_in_smp \
        __attribute__((__aligned__(1 << 6)))
 
@@ -295,9 +295,9 @@ cmpxchg(volatile long *ptr, long oldval, long newval)
  * Default machine parameters.
  */
 
-#ifndef CACHE_LINE_SIZE
-/* #define CACHE_LINE_SIZE 128 */
-#endif /* #ifndef CACHE_LINE_SIZE */
+#ifndef CAA_CACHE_LINE_SIZE
+/* #define CAA_CACHE_LINE_SIZE 128 */
+#endif /* #ifndef CAA_CACHE_LINE_SIZE */
 
 /*
  * Exclusive locking primitives.
@@ -497,7 +497,7 @@ long long get_microseconds(void)
 #define DEFINE_PER_THREAD(type, name) \
        struct { \
                __typeof__(type) v \
-                       __attribute__((__aligned__(CACHE_LINE_SIZE))); \
+                       __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))); \
        } __per_thread_##name[NR_THREADS];
 #define DECLARE_PER_THREAD(type, name) extern DEFINE_PER_THREAD(type, name)
 
@@ -531,7 +531,7 @@ long long get_microseconds(void)
 #define DEFINE_PER_CPU(type, name) \
        struct { \
                __typeof__(type) v \
-                       __attribute__((__aligned__(CACHE_LINE_SIZE))); \
+                       __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))); \
        } __per_cpu_##name[NR_CPUS]
 #define DECLARE_PER_CPU(type, name) extern DEFINE_PER_CPU(type, name)
 
@@ -972,7 +972,7 @@ static inline void list_splice_tail_init(struct list_head *list,
  * @member:    the name of the list_struct within the struct.
  */
 #define list_entry(ptr, type, member) \
-       container_of(ptr, type, member)
+       caa_container_of(ptr, type, member)
 
 /**
  * list_first_entry - get the first element from a list
@@ -1274,7 +1274,7 @@ static inline void hlist_move_list(struct hlist_head *old,
        old->first = NULL;
 }
 
-#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
+#define hlist_entry(ptr, type, member) caa_container_of(ptr,type,member)
 
 #define hlist_for_each(pos, head) \
        for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
index 9773500d8049ab6a0120f5282b817eceec5da231..5c5f6e99fbcb6af47bb3f5f962bd4104d1f194e5 100644 (file)
@@ -76,7 +76,7 @@
 
 #define CONFIG_PPC64
 
-/*#define CACHE_LINE_SIZE 128 */
+/*#define CAA_CACHE_LINE_SIZE 128 */
 #define ____cacheline_internodealigned_in_smp \
        __attribute__((__aligned__(1 << 7)))
 
@@ -660,9 +660,9 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
  * Default machine parameters.
  */
 
-#ifndef CACHE_LINE_SIZE
-/* #define CACHE_LINE_SIZE 128 */
-#endif /* #ifndef CACHE_LINE_SIZE */
+#ifndef CAA_CACHE_LINE_SIZE
+/* #define CAA_CACHE_LINE_SIZE 128 */
+#endif /* #ifndef CAA_CACHE_LINE_SIZE */
 
 /*
  * Exclusive locking primitives.
@@ -855,7 +855,7 @@ long long get_microseconds(void)
 #define DEFINE_PER_THREAD(type, name) \
        struct { \
                __typeof__(type) v \
-                       __attribute__((__aligned__(CACHE_LINE_SIZE))); \
+                       __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))); \
        } __per_thread_##name[NR_THREADS];
 #define DECLARE_PER_THREAD(type, name) extern DEFINE_PER_THREAD(type, name)
 
@@ -889,7 +889,7 @@ long long get_microseconds(void)
 #define DEFINE_PER_CPU(type, name) \
        struct { \
                __typeof__(type) v \
-                       __attribute__((__aligned__(CACHE_LINE_SIZE))); \
+                       __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))); \
        } __per_cpu_##name[NR_CPUS]
 #define DECLARE_PER_CPU(type, name) extern DEFINE_PER_CPU(type, name)
 
@@ -1331,7 +1331,7 @@ static inline void list_splice_tail_init(struct list_head *list,
  * @member:    the name of the list_struct within the struct.
  */
 #define list_entry(ptr, type, member) \
-       container_of(ptr, type, member)
+       caa_container_of(ptr, type, member)
 
 /**
  * list_first_entry - get the first element from a list
@@ -1633,7 +1633,7 @@ static inline void hlist_move_list(struct hlist_head *old,
        old->first = NULL;
 }
 
-#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
+#define hlist_entry(ptr, type, member) caa_container_of(ptr,type,member)
 
 #define hlist_for_each(pos, head) \
        for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
index fe00a3599ab9a9e657e9daa6bb91cad8bc4ffac7..03654329a670ca8e72d197f83dd83bfaadc3e3b7 100644 (file)
@@ -76,7 +76,7 @@
  * Machine parameters.
  */
 
-/* #define CACHE_LINE_SIZE 64 */
+/* #define CAA_CACHE_LINE_SIZE 64 */
 #define ____cacheline_internodealigned_in_smp \
        __attribute__((__aligned__(1 << 6)))
 
@@ -356,9 +356,9 @@ __asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \
  * Default machine parameters.
  */
 
-#ifndef CACHE_LINE_SIZE
-/* #define CACHE_LINE_SIZE 128 */
-#endif /* #ifndef CACHE_LINE_SIZE */
+#ifndef CAA_CACHE_LINE_SIZE
+/* #define CAA_CACHE_LINE_SIZE 128 */
+#endif /* #ifndef CAA_CACHE_LINE_SIZE */
 
 /*
  * Exclusive locking primitives.
@@ -558,7 +558,7 @@ long long get_microseconds(void)
 #define DEFINE_PER_THREAD(type, name) \
        struct { \
                __typeof__(type) v \
-                       __attribute__((__aligned__(CACHE_LINE_SIZE))); \
+                       __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))); \
        } __per_thread_##name[NR_THREADS];
 #define DECLARE_PER_THREAD(type, name) extern DEFINE_PER_THREAD(type, name)
 
@@ -592,7 +592,7 @@ long long get_microseconds(void)
 #define DEFINE_PER_CPU(type, name) \
        struct { \
                __typeof__(type) v \
-                       __attribute__((__aligned__(CACHE_LINE_SIZE))); \
+                       __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))); \
        } __per_cpu_##name[NR_CPUS]
 #define DECLARE_PER_CPU(type, name) extern DEFINE_PER_CPU(type, name)
 
@@ -1034,7 +1034,7 @@ static inline void list_splice_tail_init(struct list_head *list,
  * @member:    the name of the list_struct within the struct.
  */
 #define list_entry(ptr, type, member) \
-       container_of(ptr, type, member)
+       caa_container_of(ptr, type, member)
 
 /**
  * list_first_entry - get the first element from a list
@@ -1336,7 +1336,7 @@ static inline void hlist_move_list(struct hlist_head *old,
        old->first = NULL;
 }
 
-#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
+#define hlist_entry(ptr, type, member) caa_container_of(ptr,type,member)
 
 #define hlist_for_each(pos, head) \
        for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
index 4192bd0643f02415c64fc2d1fd3d08b71198f85f..4dac2f224faa9605f309195bea5e62e818080da0 100644 (file)
@@ -77,7 +77,7 @@ char argsbuf[64];
 #define GOFLAG_RUN  1
 #define GOFLAG_STOP 2
 
-int goflag __attribute__((__aligned__(CACHE_LINE_SIZE))) = GOFLAG_INIT;
+int goflag __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))) = GOFLAG_INIT;
 
 #define RCU_READ_RUN 1000
 
index 64b160b775ca63233569c9b680f8997dcb8bbc45..05a6ea72cf1c9943ae33a027c792731fee8bcc37 100644 (file)
@@ -6,16 +6,16 @@
 static inline void loop_sleep(unsigned long l)
 {
        while(l-- != 0)
-               cpu_relax();
+               caa_cpu_relax();
 }
 
 int main()
 {
        cycles_t time1, time2;
 
-       time1 = get_cycles();
+       time1 = caa_get_cycles();
        loop_sleep(NR_LOOPS);
-       time2 = get_cycles();
+       time2 = caa_get_cycles();
        printf("CPU clock cycles per loop: %g\n", (time2 - time1) /
                                                  (double)NR_LOOPS);
 }
index 53d39cd9edb6d559bebeb9794b0599591154cfa8..01394e0520292a892673d7d67f07210d4acd29e3 100644 (file)
@@ -61,7 +61,7 @@ static inline pid_t gettid(void)
 static inline void loop_sleep(unsigned long l)
 {
        while(l-- != 0)
-               cpu_relax();
+               caa_cpu_relax();
 }
 
 #define LOOPS 1048576
@@ -75,9 +75,9 @@ int main(int argc, char **argv)
        double cpl;
 
        for (i = 0; i < TESTS; i++) {
-               time1 = get_cycles();
+               time1 = caa_get_cycles();
                loop_sleep(LOOPS);
-               time2 = get_cycles();
+               time2 = caa_get_cycles();
                time_tot += time2 - time1;
        }
        cpl = ((double)time_tot) / (double)TESTS / (double)LOOPS;
index 7dd1e3173fab465f23ece128645012bad7ecf8af..747bcc442dec2a63a507b0bf0be075fe5ee024b7 100644 (file)
@@ -85,7 +85,7 @@ static unsigned long wduration;
 static inline void loop_sleep(unsigned long l)
 {
        while(l-- != 0)
-               cpu_relax();
+               caa_cpu_relax();
 }
 
 static int verbose_mode;
@@ -156,9 +156,9 @@ static unsigned long long __thread nr_writes;
 static unsigned long long __thread nr_reads;
 
 static
-unsigned long long __attribute__((aligned(CACHE_LINE_SIZE))) *tot_nr_writes;
+unsigned long long __attribute__((aligned(CAA_CACHE_LINE_SIZE))) *tot_nr_writes;
 static
-unsigned long long __attribute__((aligned(CACHE_LINE_SIZE))) *tot_nr_reads;
+unsigned long long __attribute__((aligned(CAA_CACHE_LINE_SIZE))) *tot_nr_reads;
 
 static unsigned int nr_readers;
 static unsigned int nr_writers;
index 9de8ced9ff1747e26c5f6874f27adef73017e444..c233724299d07fab562771ae6bb3eaa74dc57706 100644 (file)
@@ -68,7 +68,7 @@ struct test_array {
 
 struct per_thread_lock {
        pthread_mutex_t lock;
-} __attribute__((aligned(CACHE_LINE_SIZE)));   /* cache-line aligned */
+} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));       /* cache-line aligned */
 
 static struct per_thread_lock *per_thread_lock;
 
@@ -89,7 +89,7 @@ static unsigned long wduration;
 static inline void loop_sleep(unsigned long l)
 {
        while(l-- != 0)
-               cpu_relax();
+               caa_cpu_relax();
 }
 
 static int verbose_mode;
@@ -160,9 +160,9 @@ static unsigned long long __thread nr_writes;
 static unsigned long long __thread nr_reads;
 
 static
-unsigned long long __attribute__((aligned(CACHE_LINE_SIZE))) *tot_nr_writes;
+unsigned long long __attribute__((aligned(CAA_CACHE_LINE_SIZE))) *tot_nr_writes;
 static
-unsigned long long __attribute__((aligned(CACHE_LINE_SIZE))) *tot_nr_reads;
+unsigned long long __attribute__((aligned(CAA_CACHE_LINE_SIZE))) *tot_nr_reads;
 
 static unsigned int nr_readers;
 static unsigned int nr_writers;
index 9eb1c1357b65c84fe833a16a1a1c991889d3ebc6..26b642af1cbca9a17b012ea57ac852c360243aa3 100644 (file)
@@ -60,7 +60,7 @@ static struct test_array test_array = { 8 };
 
 struct per_thread_lock {
        pthread_mutex_t lock;
-} __attribute__((aligned(CACHE_LINE_SIZE)));   /* cache-line aligned */
+} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));       /* cache-line aligned */
 
 static struct per_thread_lock *per_thread_lock;
 
@@ -78,8 +78,8 @@ static int num_write;
 #define NR_READ num_read
 #define NR_WRITE num_write
 
-static cycles_t __attribute__((aligned(CACHE_LINE_SIZE))) *reader_time;
-static cycles_t __attribute__((aligned(CACHE_LINE_SIZE))) *writer_time;
+static cycles_t __attribute__((aligned(CAA_CACHE_LINE_SIZE))) *reader_time;
+static cycles_t __attribute__((aligned(CAA_CACHE_LINE_SIZE))) *writer_time;
 
 void *thr_reader(void *arg)
 {
@@ -91,7 +91,7 @@ void *thr_reader(void *arg)
                        "reader", pthread_self(), (unsigned long)gettid());
        sleep(2);
 
-       time1 = get_cycles();
+       time1 = caa_get_cycles();
        for (i = 0; i < OUTER_READ_LOOP; i++) {
                for (j = 0; j < INNER_READ_LOOP; j++) {
                        pthread_mutex_lock(&per_thread_lock[tidx].lock);
@@ -99,7 +99,7 @@ void *thr_reader(void *arg)
                        pthread_mutex_unlock(&per_thread_lock[tidx].lock);
                }
        }
-       time2 = get_cycles();
+       time2 = caa_get_cycles();
 
        reader_time[tidx] = time2 - time1;
 
@@ -122,7 +122,7 @@ void *thr_writer(void *arg)
 
        for (i = 0; i < OUTER_WRITE_LOOP; i++) {
                for (j = 0; j < INNER_WRITE_LOOP; j++) {
-                       time1 = get_cycles();
+                       time1 = caa_get_cycles();
                        for (tidx = 0; tidx < NR_READ; tidx++) {
                                pthread_mutex_lock(&per_thread_lock[tidx].lock);
                        }
@@ -130,7 +130,7 @@ void *thr_writer(void *arg)
                        for (tidx = NR_READ - 1; tidx >= 0; tidx--) {
                                pthread_mutex_unlock(&per_thread_lock[tidx].lock);
                        }
-                       time2 = get_cycles();
+                       time2 = caa_get_cycles();
                        writer_time[(unsigned long)arg] += time2 - time1;
                        usleep(1);
                }
index 4f19c72aa3c7d0bdb292d55a3525f0fcea458077..1ef8c265895025a51021bf005b25eedeae546f98 100644 (file)
@@ -83,7 +83,7 @@ static unsigned long wduration;
 static inline void loop_sleep(unsigned long l)
 {
        while(l-- != 0)
-               cpu_relax();
+               caa_cpu_relax();
 }
 
 static int verbose_mode;
index 875fd36c487113c58b37aa6909c6b64c9c835238..c9b3f4a8d78d25776a3267d7b6fa91c264b15801 100644 (file)
@@ -88,7 +88,7 @@ static unsigned long wduration;
 static inline void loop_sleep(unsigned long l)
 {
        while(l-- != 0)
-               cpu_relax();
+               caa_cpu_relax();
 }
 
 static int verbose_mode;
@@ -164,7 +164,7 @@ static unsigned int nr_writers;
 
 pthread_mutex_t rcu_copy_mutex = PTHREAD_MUTEX_INITIALIZER;
 static
-unsigned long long __attribute__((aligned(CACHE_LINE_SIZE))) *tot_nr_writes;
+unsigned long long __attribute__((aligned(CAA_CACHE_LINE_SIZE))) *tot_nr_writes;
 
 
 void rcu_copy_mutex_lock(void)
@@ -419,9 +419,9 @@ int main(int argc, char **argv)
        tot_nr_writes = malloc(sizeof(*tot_nr_writes) * nr_writers);
        pending_reclaims = malloc(sizeof(*pending_reclaims) * nr_writers);
        if (reclaim_batch * sizeof(*pending_reclaims[i].queue)
-                       < CACHE_LINE_SIZE)
+                       < CAA_CACHE_LINE_SIZE)
                for (i = 0; i < nr_writers; i++)
-                       pending_reclaims[i].queue = calloc(1, CACHE_LINE_SIZE);
+                       pending_reclaims[i].queue = calloc(1, CAA_CACHE_LINE_SIZE);
        else
                for (i = 0; i < nr_writers; i++)
                        pending_reclaims[i].queue = calloc(reclaim_batch,
index 69193bff72dbea41cde65f528dcad9372bead384..f9f450445882c0fb462b082989a730fe71bb6f52 100644 (file)
@@ -95,8 +95,8 @@ static int num_write;
 #define NR_READ num_read
 #define NR_WRITE num_write
 
-static cycles_t __attribute__((aligned(CACHE_LINE_SIZE))) *reader_time;
-static cycles_t __attribute__((aligned(CACHE_LINE_SIZE))) *writer_time;
+static cycles_t __attribute__((aligned(CAA_CACHE_LINE_SIZE))) *reader_time;
+static cycles_t __attribute__((aligned(CAA_CACHE_LINE_SIZE))) *writer_time;
 
 void *thr_reader(void *arg)
 {
@@ -110,7 +110,7 @@ void *thr_reader(void *arg)
 
        rcu_register_thread();
 
-       time1 = get_cycles();
+       time1 = caa_get_cycles();
        for (i = 0; i < OUTER_READ_LOOP; i++) {
                for (j = 0; j < INNER_READ_LOOP; j++) {
                        _rcu_read_lock();
@@ -122,7 +122,7 @@ void *thr_reader(void *arg)
                }
                _rcu_quiescent_state();
        }
-       time2 = get_cycles();
+       time2 = caa_get_cycles();
 
        rcu_unregister_thread();
 
@@ -147,7 +147,7 @@ void *thr_writer(void *arg)
 
        for (i = 0; i < OUTER_WRITE_LOOP; i++) {
                for (j = 0; j < INNER_WRITE_LOOP; j++) {
-                       time1 = get_cycles();
+                       time1 = caa_get_cycles();
                        new = malloc(sizeof(struct test_array));
                        rcu_copy_mutex_lock();
                        old = test_rcu_pointer;
@@ -163,7 +163,7 @@ void *thr_writer(void *arg)
                                old->a = 0;
                        }
                        free(old);
-                       time2 = get_cycles();
+                       time2 = caa_get_cycles();
                        writer_time[(unsigned long)arg] += time2 - time1;
                        usleep(1);
                }
index 445ce958bb464a6383aa2cc8d11817e361341133..deca53bd75f437539cf9a44131002293b3d96e00 100644 (file)
@@ -85,7 +85,7 @@ static unsigned long wduration;
 static inline void loop_sleep(unsigned long l)
 {
        while(l-- != 0)
-               cpu_relax();
+               caa_cpu_relax();
 }
 
 static int verbose_mode;
index 7d1680606d20373515ca0224193582a6c737a4b2..96269c08b46a580ce65a7acb0cd523d1b75873e0 100644 (file)
@@ -75,8 +75,8 @@ static int num_write;
 #define NR_READ num_read
 #define NR_WRITE num_write
 
-static cycles_t __attribute__((aligned(CACHE_LINE_SIZE))) *reader_time;
-static cycles_t __attribute__((aligned(CACHE_LINE_SIZE))) *writer_time;
+static cycles_t __attribute__((aligned(CAA_CACHE_LINE_SIZE))) *reader_time;
+static cycles_t __attribute__((aligned(CAA_CACHE_LINE_SIZE))) *writer_time;
 
 void *thr_reader(void *arg)
 {
@@ -87,7 +87,7 @@ void *thr_reader(void *arg)
                        "reader", pthread_self(), (unsigned long)gettid());
        sleep(2);
 
-       time1 = get_cycles();
+       time1 = caa_get_cycles();
        for (i = 0; i < OUTER_READ_LOOP; i++) {
                for (j = 0; j < INNER_READ_LOOP; j++) {
                        pthread_rwlock_rdlock(&lock);
@@ -95,7 +95,7 @@ void *thr_reader(void *arg)
                        pthread_rwlock_unlock(&lock);
                }
        }
-       time2 = get_cycles();
+       time2 = caa_get_cycles();
 
        reader_time[(unsigned long)arg] = time2 - time1;
 
@@ -117,11 +117,11 @@ void *thr_writer(void *arg)
 
        for (i = 0; i < OUTER_WRITE_LOOP; i++) {
                for (j = 0; j < INNER_WRITE_LOOP; j++) {
-                       time1 = get_cycles();
+                       time1 = caa_get_cycles();
                        pthread_rwlock_wrlock(&lock);
                        test_array.a = 8;
                        pthread_rwlock_unlock(&lock);
-                       time2 = get_cycles();
+                       time2 = caa_get_cycles();
                        writer_time[(unsigned long)arg] += time2 - time1;
                        usleep(1);
                }
index eeea7f55e2253853323525ad94076a070390b14e..e6a648981ed8b6b5a0abb679771d98d5e695dd9b 100644 (file)
@@ -83,7 +83,7 @@ static unsigned long wduration;
 static inline void loop_sleep(unsigned long l)
 {
        while(l-- != 0)
-               cpu_relax();
+               caa_cpu_relax();
 }
 
 static int verbose_mode;
index 481cdd671a1a31ac5fea8213aa900c9e5ff6b8c3..24a704b242583de6f6894b4d534426124d4f9aa9 100644 (file)
@@ -83,7 +83,7 @@ static unsigned long wduration;
 static inline void loop_sleep(unsigned long l)
 {
        while(l-- != 0)
-               cpu_relax();
+               caa_cpu_relax();
 }
 
 static int verbose_mode;
index e8318241cc62e1c4d574834ee95b719c05ca8c17..7f20a6a060233c4795ae5ed01d0a9aef4891c748 100644 (file)
@@ -83,7 +83,7 @@ static unsigned long wduration;
 static inline void loop_sleep(unsigned long l)
 {
        while(l-- != 0)
-               cpu_relax();
+               caa_cpu_relax();
 }
 
 static int verbose_mode;
index 2cbb0416739a44aaf6d8dbd9ae2523ffbe843816..c333964ce9a998c094c5c831476189acce791409 100644 (file)
@@ -84,7 +84,7 @@ static unsigned long wduration;
 static inline void loop_sleep(unsigned long l)
 {
        while(l-- != 0)
-               cpu_relax();
+               caa_cpu_relax();
 }
 
 static int verbose_mode;
@@ -156,7 +156,7 @@ static unsigned long long __thread nr_writes;
 static unsigned long long __thread nr_reads;
 
 static
-unsigned long long __attribute__((aligned(CACHE_LINE_SIZE))) *tot_nr_writes;
+unsigned long long __attribute__((aligned(CAA_CACHE_LINE_SIZE))) *tot_nr_writes;
 
 static unsigned int nr_readers;
 static unsigned int nr_writers;
index ddafb871536ff1b76f1c69a703f4852a9bb38509..d0f7e6e8e3f2466269c295b3394eb08fa16a4d37 100644 (file)
@@ -92,7 +92,7 @@ static unsigned long wduration;
 static inline void loop_sleep(unsigned long l)
 {
        while(l-- != 0)
-               cpu_relax();
+               caa_cpu_relax();
 }
 
 static int verbose_mode;
@@ -164,7 +164,7 @@ static unsigned long long __thread nr_writes;
 static unsigned long long __thread nr_reads;
 
 static
-unsigned long long __attribute__((aligned(CACHE_LINE_SIZE))) *tot_nr_writes;
+unsigned long long __attribute__((aligned(CAA_CACHE_LINE_SIZE))) *tot_nr_writes;
 
 static unsigned int nr_readers;
 static unsigned int nr_writers;
@@ -419,9 +419,9 @@ int main(int argc, char **argv)
        tot_nr_writes = malloc(sizeof(*tot_nr_writes) * nr_writers);
        pending_reclaims = malloc(sizeof(*pending_reclaims) * nr_writers);
        if (reclaim_batch * sizeof(*pending_reclaims[i].queue)
-                       < CACHE_LINE_SIZE)
+                       < CAA_CACHE_LINE_SIZE)
                for (i = 0; i < nr_writers; i++)
-                       pending_reclaims[i].queue = calloc(1, CACHE_LINE_SIZE);
+                       pending_reclaims[i].queue = calloc(1, CAA_CACHE_LINE_SIZE);
        else
                for (i = 0; i < nr_writers; i++)
                        pending_reclaims[i].queue = calloc(reclaim_batch,
index 901bcaec9c7dc2602c578276fe589eb33d9c5141..aed9be0b5214b2be94890dbace9b96dc341b3ad6 100644 (file)
@@ -77,7 +77,7 @@ static unsigned long wdelay;
 static inline void loop_sleep(unsigned long l)
 {
        while(l-- != 0)
-               cpu_relax();
+               caa_cpu_relax();
 }
 
 static int verbose_mode;
@@ -202,7 +202,7 @@ fail:
 
 static void rcu_release_node(struct urcu_ref *ref)
 {
-       struct rcu_lfq_node *node = container_of(ref, struct rcu_lfq_node, ref);
+       struct rcu_lfq_node *node = caa_container_of(ref, struct rcu_lfq_node, ref);
        defer_rcu(free, node);
        //synchronize_rcu();
        //free(node);
@@ -255,7 +255,7 @@ void *thr_dequeuer(void *_count)
 
 static void release_node(struct urcu_ref *ref)
 {
-       struct rcu_lfq_node *node = container_of(ref, struct rcu_lfq_node, ref);
+       struct rcu_lfq_node *node = caa_container_of(ref, struct rcu_lfq_node, ref);
        free(node);
 }
 
index 8249ebaeb05295cb43435cf3869a6ebfc74d9457..02be2d4d61a75e7248e4d80f79df6066f65e76db 100644 (file)
@@ -77,7 +77,7 @@ static unsigned long wdelay;
 static inline void loop_sleep(unsigned long l)
 {
        while(l-- != 0)
-               cpu_relax();
+               caa_cpu_relax();
 }
 
 static int verbose_mode;
index 94ac0ae4dff32043d45f7bab97d629665d4932aa..8e1a8104f6ee649409537edbfc9cab7f97798d00 100644 (file)
@@ -94,8 +94,8 @@ static int num_write;
 #define NR_READ num_read
 #define NR_WRITE num_write
 
-static cycles_t __attribute__((aligned(CACHE_LINE_SIZE))) *reader_time;
-static cycles_t __attribute__((aligned(CACHE_LINE_SIZE))) *writer_time;
+static cycles_t __attribute__((aligned(CAA_CACHE_LINE_SIZE))) *reader_time;
+static cycles_t __attribute__((aligned(CAA_CACHE_LINE_SIZE))) *writer_time;
 
 void *thr_reader(void *arg)
 {
@@ -109,7 +109,7 @@ void *thr_reader(void *arg)
 
        rcu_register_thread();
 
-       time1 = get_cycles();
+       time1 = caa_get_cycles();
        for (i = 0; i < OUTER_READ_LOOP; i++) {
                for (j = 0; j < INNER_READ_LOOP; j++) {
                        rcu_read_lock();
@@ -120,7 +120,7 @@ void *thr_reader(void *arg)
                        rcu_read_unlock();
                }
        }
-       time2 = get_cycles();
+       time2 = caa_get_cycles();
 
        rcu_unregister_thread();
 
@@ -145,7 +145,7 @@ void *thr_writer(void *arg)
 
        for (i = 0; i < OUTER_WRITE_LOOP; i++) {
                for (j = 0; j < INNER_WRITE_LOOP; j++) {
-                       time1 = get_cycles();
+                       time1 = caa_get_cycles();
                        new = malloc(sizeof(struct test_array));
                        rcu_copy_mutex_lock();
                        old = test_rcu_pointer;
@@ -161,7 +161,7 @@ void *thr_writer(void *arg)
                                old->a = 0;
                        }
                        free(old);
-                       time2 = get_cycles();
+                       time2 = caa_get_cycles();
                        writer_time[(unsigned long)arg] += time2 - time1;
                        usleep(1);
                }
index d446e47bb763d13d29350b06ed135866582e47dc..cb49454e7b06d995f77af55321fc55a385180097 100644 (file)
@@ -76,7 +76,7 @@ static unsigned long wdelay;
 static inline void loop_sleep(unsigned long l)
 {
        while(l-- != 0)
-               cpu_relax();
+               caa_cpu_relax();
 }
 
 static int verbose_mode;
index 294e95530c5444ce40fef2b65bf220cdc6e33640..49f92184e6a1b53351e9946d28d20478e7548cc8 100644 (file)
@@ -76,7 +76,7 @@ static unsigned long wdelay;
 static inline void loop_sleep(unsigned long l)
 {
        while(l-- != 0)
-               cpu_relax();
+               caa_cpu_relax();
 }
 
 static int verbose_mode;
index 394476b232d5aff5e52aeb1c283627e1249c1f4a..ea2c3765e485fa9b86e7ad261a1f1411d676ad4c 100644 (file)
@@ -140,7 +140,7 @@ struct rcu_reader {
        /* Data used by both reader and synchronize_rcu() */
        long ctr;
        /* Data used for registry */
-       struct list_head node __attribute__((aligned(CACHE_LINE_SIZE)));
+       struct list_head node __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
        pthread_t tid;
        int alloc;      /* registry entry allocated */
 };
@@ -162,7 +162,7 @@ static inline int rcu_old_gp_ongoing(long *value)
         * Make sure both tests below are done on the same version of *value
         * to insure consistency.
         */
-       v = LOAD_SHARED(*value);
+       v = CAA_LOAD_SHARED(*value);
        return (v & RCU_GP_CTR_NEST_MASK) &&
                 ((v ^ rcu_gp_ctr) & RCU_GP_CTR_PHASE);
 }
@@ -182,14 +182,14 @@ static inline void _rcu_read_lock(void)
         *   RCU_GP_COUNT | (~RCU_GP_CTR_PHASE or RCU_GP_CTR_PHASE)
         */
        if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) {
-               _STORE_SHARED(rcu_reader->ctr, _LOAD_SHARED(rcu_gp_ctr));
+               _CAA_STORE_SHARED(rcu_reader->ctr, _CAA_LOAD_SHARED(rcu_gp_ctr));
                /*
                 * Set active readers count for outermost nesting level before
                 * accessing the pointer.
                 */
                cmm_smp_mb();
        } else {
-               _STORE_SHARED(rcu_reader->ctr, tmp + RCU_GP_COUNT);
+               _CAA_STORE_SHARED(rcu_reader->ctr, tmp + RCU_GP_COUNT);
        }
 }
 
@@ -199,7 +199,7 @@ static inline void _rcu_read_unlock(void)
         * Finish using rcu before decrementing the pointer.
         */
        cmm_smp_mb();
-       _STORE_SHARED(rcu_reader->ctr, rcu_reader->ctr - RCU_GP_COUNT);
+       _CAA_STORE_SHARED(rcu_reader->ctr, rcu_reader->ctr - RCU_GP_COUNT);
        cmm_barrier();  /* Ensure the compiler does not reorder us with mutex */
 }
 
index 33352c2913d876b6d8ab5d556c2bfff5554807a9..b457d2bc8f6e49fee5b55565107219aaf02d6ee9 100644 (file)
--- a/urcu-bp.c
+++ b/urcu-bp.c
@@ -123,13 +123,13 @@ void update_counter_and_wait(void)
        struct rcu_reader *index, *tmp;
 
        /* Switch parity: 0 -> 1, 1 -> 0 */
-       STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE);
+       CAA_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE);
 
        /*
         * Must commit qparity update to memory before waiting for other parity
         * quiescent state. Failure to do so could result in the writer waiting
         * forever while new readers are always accessing data (no progress).
-        * Ensured by STORE_SHARED and LOAD_SHARED.
+        * Ensured by CAA_STORE_SHARED and CAA_LOAD_SHARED.
         */
 
        /*
@@ -155,7 +155,7 @@ void update_counter_and_wait(void)
                        if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS)
                                usleep(RCU_SLEEP_DELAY);
                        else
-                               cpu_relax();
+                               caa_cpu_relax();
                }
        }
        /* put back the reader list in the registry */
index 6dc08a383f0f310ee0d71358242ed3a1bf081a20..c28e8488354e4138f24114200258eb3abdf7f3d3 100644 (file)
@@ -110,7 +110,7 @@ static unsigned long rcu_defer_num_callbacks(void)
 
        mutex_lock(&rcu_defer_mutex);
        list_for_each_entry(index, &registry, list) {
-               head = LOAD_SHARED(index->head);
+               head = CAA_LOAD_SHARED(index->head);
                num_items += head - index->tail;
        }
        mutex_unlock(&rcu_defer_mutex);
@@ -153,21 +153,21 @@ static void rcu_defer_barrier_queue(struct defer_queue *queue,
 
        for (i = queue->tail; i != head;) {
                cmm_smp_rmb();       /* read head before q[]. */
-               p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
+               p = CAA_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
                if (unlikely(DQ_IS_FCT_BIT(p))) {
                        DQ_CLEAR_FCT_BIT(p);
                        queue->last_fct_out = p;
-                       p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
+                       p = CAA_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
                } else if (unlikely(p == DQ_FCT_MARK)) {
-                       p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
+                       p = CAA_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
                        queue->last_fct_out = p;
-                       p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
+                       p = CAA_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
                }
                fct = queue->last_fct_out;
                fct(p);
        }
        cmm_smp_mb();   /* push tail after having used q[] */
-       STORE_SHARED(queue->tail, i);
+       CAA_STORE_SHARED(queue->tail, i);
 }
 
 static void _rcu_defer_barrier_thread(void)
@@ -212,7 +212,7 @@ void rcu_defer_barrier(void)
 
        mutex_lock(&rcu_defer_mutex);
        list_for_each_entry(index, &registry, list) {
-               index->last_head = LOAD_SHARED(index->head);
+               index->last_head = CAA_LOAD_SHARED(index->head);
                num_items += index->last_head - index->tail;
        }
        if (likely(!num_items)) {
@@ -241,7 +241,7 @@ void _defer_rcu(void (*fct)(void *p), void *p)
         * thread.
         */
        head = defer_queue.head;
-       tail = LOAD_SHARED(defer_queue.tail);
+       tail = CAA_LOAD_SHARED(defer_queue.tail);
 
        /*
         * If queue is full, or reached threshold. Empty queue ourself.
@@ -250,7 +250,7 @@ void _defer_rcu(void (*fct)(void *p), void *p)
        if (unlikely(head - tail >= DEFER_QUEUE_SIZE - 2)) {
                assert(head - tail <= DEFER_QUEUE_SIZE);
                rcu_defer_barrier_thread();
-               assert(head - LOAD_SHARED(defer_queue.tail) == 0);
+               assert(head - CAA_LOAD_SHARED(defer_queue.tail) == 0);
        }
 
        if (unlikely(defer_queue.last_fct_in != fct)) {
@@ -261,13 +261,13 @@ void _defer_rcu(void (*fct)(void *p), void *p)
                         * marker, write DQ_FCT_MARK followed by the function
                         * pointer.
                         */
-                       _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
+                       _CAA_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
                                      DQ_FCT_MARK);
-                       _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
+                       _CAA_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
                                      fct);
                } else {
                        DQ_SET_FCT_BIT(fct);
-                       _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
+                       _CAA_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
                                      fct);
                }
        } else {
@@ -276,16 +276,16 @@ void _defer_rcu(void (*fct)(void *p), void *p)
                         * If the data to encode is not aligned or the marker,
                         * write DQ_FCT_MARK followed by the function pointer.
                         */
-                       _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
+                       _CAA_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
                                      DQ_FCT_MARK);
-                       _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
+                       _CAA_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
                                      fct);
                }
        }
-       _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], p);
+       _CAA_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], p);
        cmm_smp_wmb();  /* Publish new pointer before head */
                        /* Write q[] before head. */
-       STORE_SHARED(defer_queue.head, head);
+       CAA_STORE_SHARED(defer_queue.head, head);
        cmm_smp_mb();   /* Write queue head before read futex */
        /*
         * Wake-up any waiting defer thread.
index c8ac7f0c0861a2d80e20e935dddf99303693fa75..5a1e0e4d12957fe984e6050aeabb06427425e264 100644 (file)
@@ -49,7 +49,7 @@ extern "C" {
  * Inserts memory barriers on architectures that require them (currently only
  * Alpha) and documents which pointers are protected by RCU.
  *
- * The compiler memory barrier in LOAD_SHARED() ensures that value-speculative
+ * The compiler memory barrier in CAA_LOAD_SHARED() ensures that value-speculative
  * optimizations (e.g. VSS: Value Speculation Scheduling) does not perform the
  * data read before the pointer read by speculating the value of the pointer.
  * Correct ordering is ensured because the pointer is read as a volatile access.
@@ -62,7 +62,7 @@ extern "C" {
  */
 
 #define _rcu_dereference(p)     ({                                     \
-                               typeof(p) _________p1 = LOAD_SHARED(p); \
+                               typeof(p) _________p1 = CAA_LOAD_SHARED(p); \
                                cmm_smp_read_barrier_depends();         \
                                (_________p1);                          \
                                })
index 108ef6e7181d58409b2314d0ea0551b211f8e76a..ab4dcb5ff1336503b26e35917b24ee249e178ede 100644 (file)
@@ -135,7 +135,7 @@ struct rcu_reader {
        /* Data used by both reader and synchronize_rcu() */
        unsigned long ctr;
        /* Data used for registry */
-       struct list_head node __attribute__((aligned(CACHE_LINE_SIZE)));
+       struct list_head node __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
        pthread_t tid;
 };
 
@@ -159,7 +159,7 @@ static inline int rcu_gp_ongoing(unsigned long *ctr)
 {
        unsigned long v;
 
-       v = LOAD_SHARED(*ctr);
+       v = CAA_LOAD_SHARED(*ctr);
        return v && (v != rcu_gp_ctr);
 }
 
@@ -175,7 +175,7 @@ static inline void _rcu_read_unlock(void)
 static inline void _rcu_quiescent_state(void)
 {
        cmm_smp_mb();
-       _STORE_SHARED(rcu_reader.ctr, _LOAD_SHARED(rcu_gp_ctr));
+       _CAA_STORE_SHARED(rcu_reader.ctr, _CAA_LOAD_SHARED(rcu_gp_ctr));
        cmm_smp_mb();   /* write rcu_reader.ctr before read futex */
        wake_up_gp();
        cmm_smp_mb();
@@ -184,7 +184,7 @@ static inline void _rcu_quiescent_state(void)
 static inline void _rcu_thread_offline(void)
 {
        cmm_smp_mb();
-       STORE_SHARED(rcu_reader.ctr, 0);
+       CAA_STORE_SHARED(rcu_reader.ctr, 0);
        cmm_smp_mb();   /* write rcu_reader.ctr before read futex */
        wake_up_gp();
        cmm_barrier();  /* Ensure the compiler does not reorder us with mutex */
@@ -193,7 +193,7 @@ static inline void _rcu_thread_offline(void)
 static inline void _rcu_thread_online(void)
 {
        cmm_barrier();  /* Ensure the compiler does not reorder us with mutex */
-       _STORE_SHARED(rcu_reader.ctr, LOAD_SHARED(rcu_gp_ctr));
+       _CAA_STORE_SHARED(rcu_reader.ctr, CAA_LOAD_SHARED(rcu_gp_ctr));
        cmm_smp_mb();
 }
 
index 2cf73d5d5be30f62771a62152623fb81b5798133..51d34c7b34ea6bf64846b628885f584e4019e2db 100644 (file)
@@ -114,10 +114,10 @@ static void update_counter_and_wait(void)
 
 #if (BITS_PER_LONG < 64)
        /* Switch parity: 0 -> 1, 1 -> 0 */
-       STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
+       CAA_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
 #else  /* !(BITS_PER_LONG < 64) */
        /* Increment current G.P. */
-       STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR);
+       CAA_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR);
 #endif /* !(BITS_PER_LONG < 64) */
 
        /*
@@ -163,7 +163,7 @@ static void update_counter_and_wait(void)
                                wait_gp();
                        } else {
 #ifndef HAS_INCOHERENT_CACHES
-                               cpu_relax();
+                               caa_cpu_relax();
 #else /* #ifndef HAS_INCOHERENT_CACHES */
                                cmm_smp_mb();
 #endif /* #else #ifndef HAS_INCOHERENT_CACHES */
@@ -198,7 +198,7 @@ void synchronize_rcu(void)
         * threads registered as readers.
         */
        if (was_online)
-               STORE_SHARED(rcu_reader.ctr, 0);
+               CAA_STORE_SHARED(rcu_reader.ctr, 0);
 
        mutex_lock(&rcu_gp_lock);
 
@@ -238,7 +238,7 @@ out:
         * freed.
         */
        if (was_online)
-               _STORE_SHARED(rcu_reader.ctr, LOAD_SHARED(rcu_gp_ctr));
+               _CAA_STORE_SHARED(rcu_reader.ctr, CAA_LOAD_SHARED(rcu_gp_ctr));
        cmm_smp_mb();
 }
 #else /* !(BITS_PER_LONG < 64) */
@@ -255,7 +255,7 @@ void synchronize_rcu(void)
         */
        cmm_smp_mb();
        if (was_online)
-               STORE_SHARED(rcu_reader.ctr, 0);
+               CAA_STORE_SHARED(rcu_reader.ctr, 0);
 
        mutex_lock(&rcu_gp_lock);
        if (list_empty(&registry))
@@ -265,7 +265,7 @@ out:
        mutex_unlock(&rcu_gp_lock);
 
        if (was_online)
-               _STORE_SHARED(rcu_reader.ctr, LOAD_SHARED(rcu_gp_ctr));
+               _CAA_STORE_SHARED(rcu_reader.ctr, CAA_LOAD_SHARED(rcu_gp_ctr));
        cmm_smp_mb();
 }
 #endif  /* !(BITS_PER_LONG < 64) */
index ad415ac7e86d5c686dc49ddb3f464ad8a24f8178..46fe64a902c177f3af572732f115f056d7aef1dc 100644 (file)
@@ -222,7 +222,7 @@ struct rcu_reader {
        unsigned long ctr;
        char need_mb;
        /* Data used for registry */
-       struct list_head node __attribute__((aligned(CACHE_LINE_SIZE)));
+       struct list_head node __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
        pthread_t tid;
 };
 
@@ -250,7 +250,7 @@ static inline int rcu_gp_ongoing(unsigned long *ctr)
         * Make sure both tests below are done on the same version of *value
         * to insure consistency.
         */
-       v = LOAD_SHARED(*ctr);
+       v = CAA_LOAD_SHARED(*ctr);
        return (v & RCU_GP_CTR_NEST_MASK) &&
                 ((v ^ rcu_gp_ctr) & RCU_GP_CTR_PHASE);
 }
@@ -266,14 +266,14 @@ static inline void _rcu_read_lock(void)
         *   RCU_GP_COUNT | (~RCU_GP_CTR_PHASE or RCU_GP_CTR_PHASE)
         */
        if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) {
-               _STORE_SHARED(rcu_reader.ctr, _LOAD_SHARED(rcu_gp_ctr));
+               _CAA_STORE_SHARED(rcu_reader.ctr, _CAA_LOAD_SHARED(rcu_gp_ctr));
                /*
                 * Set active readers count for outermost nesting level before
                 * accessing the pointer. See smp_mb_master().
                 */
                smp_mb_slave(RCU_MB_GROUP);
        } else {
-               _STORE_SHARED(rcu_reader.ctr, tmp + RCU_GP_COUNT);
+               _CAA_STORE_SHARED(rcu_reader.ctr, tmp + RCU_GP_COUNT);
        }
 }
 
@@ -288,12 +288,12 @@ static inline void _rcu_read_unlock(void)
         */
        if (likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) {
                smp_mb_slave(RCU_MB_GROUP);
-               _STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT);
+               _CAA_STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT);
                /* write rcu_reader.ctr before read futex */
                smp_mb_slave(RCU_MB_GROUP);
                wake_up_gp();
        } else {
-               _STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT);
+               _CAA_STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT);
        }
        cmm_barrier();  /* Ensure the compiler does not reorder us with mutex */
 }
diff --git a/urcu.c b/urcu.c
index 9c556aa6bd9d3eeff75ce2c8b857bfb59ca4596b..b4a5a7db948b8d57a03960747c5257d9631b1c7c 100644 (file)
--- a/urcu.c
+++ b/urcu.c
@@ -99,9 +99,9 @@ static void mutex_lock(pthread_mutex_t *mutex)
                        perror("Error in pthread mutex lock");
                        exit(-1);
                }
-               if (LOAD_SHARED(rcu_reader.need_mb)) {
+               if (CAA_LOAD_SHARED(rcu_reader.need_mb)) {
                        cmm_smp_mb();
-                       _STORE_SHARED(rcu_reader.need_mb, 0);
+                       _CAA_STORE_SHARED(rcu_reader.need_mb, 0);
                        cmm_smp_mb();
                }
                poll(NULL,0,10);
@@ -155,7 +155,7 @@ static void force_mb_all_readers(void)
         * cache flush is enforced.
         */
        list_for_each_entry(index, &registry, node) {
-               STORE_SHARED(index->need_mb, 1);
+               CAA_STORE_SHARED(index->need_mb, 1);
                pthread_kill(index->tid, SIGRCU);
        }
        /*
@@ -172,7 +172,7 @@ static void force_mb_all_readers(void)
         * the Linux Test Project (LTP).
         */
        list_for_each_entry(index, &registry, node) {
-               while (LOAD_SHARED(index->need_mb)) {
+               while (CAA_LOAD_SHARED(index->need_mb)) {
                        pthread_kill(index->tid, SIGRCU);
                        poll(NULL, 0, 1);
                }
@@ -205,7 +205,7 @@ void update_counter_and_wait(void)
        struct rcu_reader *index, *tmp;
 
        /* Switch parity: 0 -> 1, 1 -> 0 */
-       STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE);
+       CAA_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE);
 
        /*
         * Must commit rcu_gp_ctr update to memory before waiting for quiescent
@@ -251,7 +251,7 @@ void update_counter_and_wait(void)
                        if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS)
                                wait_gp();
                        else
-                               cpu_relax();
+                               caa_cpu_relax();
                }
 #else /* #ifndef HAS_INCOHERENT_CACHES */
                /*
@@ -275,7 +275,7 @@ void update_counter_and_wait(void)
                                wait_loops = 0;
                                break; /* only escape switch */
                        default:
-                               cpu_relax();
+                               caa_cpu_relax();
                        }
                }
 #endif /* #else #ifndef HAS_INCOHERENT_CACHES */
@@ -384,7 +384,7 @@ static void sigrcu_handler(int signo, siginfo_t *siginfo, void *context)
         * executed on.
         */
        cmm_smp_mb();
-       _STORE_SHARED(rcu_reader.need_mb, 0);
+       _CAA_STORE_SHARED(rcu_reader.need_mb, 0);
        cmm_smp_mb();
 }
 
index 25a9f7ac581b971a1b5d62a8b6363e2bad499075..100d3c6c12948a2f88bc769f0dc26325a1036027 100644 (file)
@@ -28,8 +28,8 @@
 extern "C" {
 #endif
 
-#ifndef CACHE_LINE_SIZE
-#define CACHE_LINE_SIZE        64
+#ifndef CAA_CACHE_LINE_SIZE
+#define CAA_CACHE_LINE_SIZE    64
 #endif
 
 #if !defined(cmm_mc) && !defined(cmm_rmc) && !defined(cmm_wmc)
@@ -117,8 +117,8 @@ extern "C" {
 #define cmm_smp_read_barrier_depends()
 #endif
 
-#ifndef cpu_relax
-#define cpu_relax()            cmm_barrier()
+#ifndef caa_cpu_relax
+#define caa_cpu_relax()                cmm_barrier()
 #endif
 
 #ifdef __cplusplus
index 93aed2a72f0c06b0f4a0cc68c0365f30ca150017..e1a827044aa424ead1d48510eb6b90a7e598b1a6 100644 (file)
@@ -30,7 +30,7 @@ extern "C" {
 #endif 
 
 /* Include size of POWER5+ L3 cache lines: 256 bytes */
-#define CACHE_LINE_SIZE        256
+#define CAA_CACHE_LINE_SIZE    256
 
 #define cmm_mb()    asm volatile("sync":::"memory")
 
index 8a33e203ecb8cee76e5f6c1a1e5b662a5395837a..8d1483abb4aff07c4b129c3e758887af58468ee5 100644 (file)
@@ -35,7 +35,7 @@
 extern "C" {
 #endif 
 
-#define CACHE_LINE_SIZE        128
+#define CAA_CACHE_LINE_SIZE    128
 
 #define cmm_mb()    __asm__ __volatile__("bcr 15,0" : : : "memory")
 
index 39f27c7adb054b826d9e21b61712f1d817fcd215..0752c4d7a04cac6041cac1be52ad4316b526897e 100644 (file)
@@ -29,7 +29,7 @@
 extern "C" {
 #endif 
 
-#define CACHE_LINE_SIZE        256
+#define CAA_CACHE_LINE_SIZE    256
 
 /*
  * Inspired from the Linux kernel. Workaround Spitfire bug #51.
index d0a58e80caa5abfc980a58ad51407f82b0ceeee6..20db5cfff98466c528800b37f986eaf3efaad689 100644 (file)
@@ -29,7 +29,7 @@
 extern "C" {
 #endif 
 
-#define CACHE_LINE_SIZE        128
+#define CAA_CACHE_LINE_SIZE    128
 
 #ifdef CONFIG_RCU_HAVE_FENCE
 #define cmm_mb()    asm volatile("mfence":::"memory")
@@ -45,7 +45,7 @@ extern "C" {
 #define cmm_wmb()   asm volatile("lock; addl $0,0(%%esp)"::: "memory")
 #endif
 
-#define cpu_relax()    asm volatile("rep; nop" : : : "memory");
+#define caa_cpu_relax()        asm volatile("rep; nop" : : : "memory");
 
 #define rdtscll(val)                                                     \
        do {                                                              \
@@ -57,7 +57,7 @@ extern "C" {
 
 typedef unsigned long long cycles_t;
 
-static inline cycles_t get_cycles(void)
+static inline cycles_t caa_get_cycles(void)
 {
         cycles_t ret = 0;
 
index d67e6c5d36922661f7b9e116d9c2b82ea555f6ec..ca32debce01d47a49614bd281ab495355949f4aa 100644 (file)
 /*
  * Instruct the compiler to perform only a single access to a variable
  * (prohibits merging and refetching). The compiler is also forbidden to reorder
- * successive instances of ACCESS_ONCE(), but only when the compiler is aware of
+ * successive instances of CAA_ACCESS_ONCE(), but only when the compiler is aware of
  * particular ordering. Compiler ordering can be ensured, for example, by
- * putting two ACCESS_ONCE() in separate C statements.
+ * putting two CAA_ACCESS_ONCE() in separate C statements.
  *
  * This macro does absolutely -nothing- to prevent the CPU from reordering,
  * merging, or refetching absolutely anything at any time.  Its main intended
  * use is to mediate communication between process-level code and irq/NMI
  * handlers, all running on the same CPU.
  */
-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
+#define CAA_ACCESS_ONCE(x)     (*(volatile typeof(x) *)&(x))
 
 #ifndef max
 #define max(a,b) ((a)>(b)?(a):(b))
 #endif
 
 #if defined(__SIZEOF_LONG__)
-#define BITS_PER_LONG  (__SIZEOF_LONG__ * 8)
+#define CAA_BITS_PER_LONG      (__SIZEOF_LONG__ * 8)
 #elif defined(_LP64)
-#define BITS_PER_LONG  64
+#define CAA_BITS_PER_LONG      64
 #else
-#define BITS_PER_LONG  32
+#define CAA_BITS_PER_LONG      32
 #endif
 
-#define container_of(ptr, type, member)                                        \
+#define caa_container_of(ptr, type, member)                                    \
        ({                                                              \
                const typeof(((type *)NULL)->member) * __ptr = (ptr);   \
                (type *)((char *)__ptr - offsetof(type, member));       \
index 11a499ec96629488d4a878fa9c6e4733600212c5..e0186066db0975930f4d778c1e45de46246deb5e 100644 (file)
 /*
  * Identify a shared load. A cmm_smp_rmc() or cmm_smp_mc() should come before the load.
  */
-#define _LOAD_SHARED(p)               ACCESS_ONCE(p)
+#define _CAA_LOAD_SHARED(p)           CAA_ACCESS_ONCE(p)
 
 /*
  * Load a data from shared memory, doing a cache flush if required.
  */
-#define LOAD_SHARED(p)                 \
+#define CAA_LOAD_SHARED(p)                     \
        ({                              \
                cmm_smp_rmc();          \
-               _LOAD_SHARED(p);        \
+               _CAA_LOAD_SHARED(p);    \
        })
 
 /*
  * Identify a shared store. A cmm_smp_wmc() or cmm_smp_mc() should follow the store.
  */
-#define _STORE_SHARED(x, v)    ({ ACCESS_ONCE(x) = (v); })
+#define _CAA_STORE_SHARED(x, v)        ({ CAA_ACCESS_ONCE(x) = (v); })
 
 /*
  * Store v into x, where x is located in shared memory. Performs the required
  * cache flush after writing. Returns v.
  */
-#define STORE_SHARED(x, v)             \
+#define CAA_STORE_SHARED(x, v)         \
        ({                              \
-               typeof(x) _v = _STORE_SHARED(x, v);     \
+               typeof(x) _v = _CAA_STORE_SHARED(x, v); \
                cmm_smp_wmc();          \
                _v;                     \
        })
index 043e616d5e0e94abac9689959c0b2416c8ee4a61..4e09afd7195aaa835a1478df76a7e9b0e508ff16 100644 (file)
@@ -39,7 +39,7 @@ struct __uatomic_dummy {
 };
 #define __hp(x)        ((struct __uatomic_dummy *)(x))
 
-#define _uatomic_set(addr, v)  STORE_SHARED(*(addr), (v))
+#define _uatomic_set(addr, v)  CAA_STORE_SHARED(*(addr), (v))
 
 /* cmpxchg */
 
index f65b398988143a126a664635ad3bbcf8ce1fefeb..6b4ef9e8ddb8e0ab1eabca7e1541e421ba87adbe 100644 (file)
@@ -29,11 +29,11 @@ extern "C" {
 #endif
 
 #ifndef uatomic_set
-#define uatomic_set(addr, v)   STORE_SHARED(*(addr), (v))
+#define uatomic_set(addr, v)   CAA_STORE_SHARED(*(addr), (v))
 #endif
 
 #ifndef uatomic_read
-#define uatomic_read(addr)     LOAD_SHARED(*(addr))
+#define uatomic_read(addr)     CAA_LOAD_SHARED(*(addr))
 #endif
 
 #if !defined __OPTIMIZE__  || defined UATOMIC_NO_LINK_ERROR
index 0f7e68f7a40fea14b208708ae9bd7425b79d35b2..d0db3fc616dc00f21e43b8c1dbd4e376f5f8ff5e 100644 (file)
@@ -79,7 +79,7 @@ void _wfq_enqueue(struct wfq_queue *q, struct wfq_node *node)
         * that the queue is being appended to. The following store will append
         * "node" to the queue from a dequeuer perspective.
         */
-       STORE_SHARED(*old_tail, node);
+       CAA_STORE_SHARED(*old_tail, node);
 }
 
 /*
@@ -99,19 +99,19 @@ ___wfq_dequeue_blocking(struct wfq_queue *q)
        /*
         * Queue is empty if it only contains the dummy node.
         */
-       if (q->head == &q->dummy && LOAD_SHARED(q->tail) == &q->dummy.next)
+       if (q->head == &q->dummy && CAA_LOAD_SHARED(q->tail) == &q->dummy.next)
                return NULL;
        node = q->head;
 
        /*
         * Adaptative busy-looping waiting for enqueuer to complete enqueue.
         */
-       while ((next = LOAD_SHARED(node->next)) == NULL) {
+       while ((next = CAA_LOAD_SHARED(node->next)) == NULL) {
                if (++attempt >= WFQ_ADAPT_ATTEMPTS) {
                        poll(NULL, 0, WFQ_WAIT);        /* Wait for 10ms */
                        attempt = 0;
                } else
-                       cpu_relax();
+                       caa_cpu_relax();
        }
        /*
         * Move queue head forward.
index 3f44743461ab1317c069829132e399fbc88d48eb..0acb4f0a3894cf77f0efb6795c40a11356f7ce52 100644 (file)
@@ -67,7 +67,7 @@ void _wfs_push(struct wfs_stack *s, struct wfs_node *node)
         * At this point, dequeuers see a NULL node->next, they should busy-wait
         * until node->next is set to old_head.
         */
-       STORE_SHARED(node->next, old_head);
+       CAA_STORE_SHARED(node->next, old_head);
 }
 
 /*
@@ -80,18 +80,18 @@ ___wfs_pop_blocking(struct wfs_stack *s)
        int attempt = 0;
 
 retry:
-       head = LOAD_SHARED(s->head);
+       head = CAA_LOAD_SHARED(s->head);
        if (head == WF_STACK_END)
                return NULL;
        /*
         * Adaptative busy-looping waiting for push to complete.
         */
-       while ((next = LOAD_SHARED(head->next)) == NULL) {
+       while ((next = CAA_LOAD_SHARED(head->next)) == NULL) {
                if (++attempt >= WFS_ADAPT_ATTEMPTS) {
                        poll(NULL, 0, WFS_WAIT);        /* Wait for 10ms */
                        attempt = 0;
                } else
-                       cpu_relax();
+                       caa_cpu_relax();
        }
        if (uatomic_cmpxchg(&s->head, head, next) == head)
                return head;
This page took 0.060597 seconds and 4 git commands to generate.