/*
* memory barriers to serialize with the previous uaddr modification.
*/
- smp_mb();
+ cmm_smp_mb();
ret = pthread_mutex_lock(&compat_futex_lock);
assert(!ret);
/*
* Ensure previous memory operations on uaddr have completed.
*/
- smp_mb();
+ cmm_smp_mb();
switch (op) {
case FUTEX_WAIT:
#define atomic_dec_return(v) (atomic_sub_return(1,v))
/* Atomic operations are already serializing on x86 */
-#define smp_mb__before_atomic_dec() barrier()
-#define smp_mb__after_atomic_dec() barrier()
-#define smp_mb__before_atomic_inc() barrier()
-#define smp_mb__after_atomic_inc() barrier()
+#define smp_mb__before_atomic_dec() cmm_barrier()
+#define smp_mb__after_atomic_dec() cmm_barrier()
+#define smp_mb__before_atomic_inc() cmm_barrier()
+#define smp_mb__after_atomic_inc() cmm_barrier()
#endif //0 /* duplicate with arch_atomic.h */
#define atomic_dec_return(v) (atomic_sub_return(1,v))
/* Atomic operations are already serializing on x86 */
-#define smp_mb__before_atomic_dec() smp_mb()
-#define smp_mb__after_atomic_dec() smp_mb()
-#define smp_mb__before_atomic_inc() smp_mb()
-#define smp_mb__after_atomic_inc() smp_mb()
+#define smp_mb__before_atomic_dec() cmm_smp_mb()
+#define smp_mb__after_atomic_dec() cmm_smp_mb()
+#define smp_mb__before_atomic_inc() cmm_smp_mb()
+#define smp_mb__after_atomic_inc() cmm_smp_mb()
#endif //0 /* duplicate with arch_atomic.h */
: : "r" (mask),"m" (*(addr)) : "memory")
/* Atomic operations are already serializing on x86 */
-#define smp_mb__before_atomic_dec() barrier()
-#define smp_mb__after_atomic_dec() barrier()
-#define smp_mb__before_atomic_inc() barrier()
-#define smp_mb__after_atomic_inc() barrier()
+#define smp_mb__before_atomic_dec() cmm_barrier()
+#define smp_mb__after_atomic_dec() cmm_barrier()
+#define smp_mb__before_atomic_inc() cmm_barrier()
+#define smp_mb__after_atomic_inc() cmm_barrier()
#endif //0
int t;
int duration = 1;
- smp_mb();
+ cmm_smp_mb();
while (uatomic_read(&nthreadsrunning) < nthreads)
poll(NULL, 0, 1);
goflag = GOFLAG_RUN;
- smp_mb();
+ cmm_smp_mb();
sleep(duration);
- smp_mb();
+ cmm_smp_mb();
goflag = GOFLAG_STOP;
- smp_mb();
+ cmm_smp_mb();
wait_all_threads();
for_each_thread(t) {
n_reads += per_thread(n_reads_pt, t);
i = 0;
p = &rcu_stress_array[i];
p->mbtest = 0;
- smp_mb();
+ cmm_smp_mb();
p->pipe_count = 0;
p->mbtest = 1;
rcu_assign_pointer(rcu_stress_current, p);
create_thread(rcu_update_stress_test, NULL);
for (i = 0; i < 5; i++)
create_thread(rcu_fake_update_stress_test, NULL);
- smp_mb();
+ cmm_smp_mb();
goflag = GOFLAG_RUN;
- smp_mb();
+ cmm_smp_mb();
sleep(10);
- smp_mb();
+ cmm_smp_mb();
goflag = GOFLAG_STOP;
- smp_mb();
+ cmm_smp_mb();
wait_all_threads();
for_each_thread(t)
n_reads += per_thread(n_reads_pt, t);
while (!test_go)
{
}
- smp_mb();
+ cmm_smp_mb();
for (;;) {
pthread_mutex_lock(&lock);
show_usage(argc, argv);
return -1;
}
- smp_mb();
+ cmm_smp_mb();
err = sscanf(argv[1], "%u", &nr_readers);
if (err != 1) {
exit(1);
}
- smp_mb();
+ cmm_smp_mb();
test_go = 1;
while (!test_go)
{
}
- smp_mb();
+ cmm_smp_mb();
for (;;) {
for (tidx = 0; tidx < nr_readers; tidx++) {
show_usage(argc, argv);
return -1;
}
- smp_mb();
+ cmm_smp_mb();
err = sscanf(argv[1], "%u", &nr_readers);
if (err != 1) {
exit(1);
}
- smp_mb();
+ cmm_smp_mb();
test_go = 1;
while (!test_go)
{
}
- smp_mb();
+ cmm_smp_mb();
for (;;) {
rcu_read_lock();
while (!test_go)
{
}
- smp_mb();
+ cmm_smp_mb();
for (;;) {
new = test_array_alloc();
exit(1);
}
- smp_mb();
+ cmm_smp_mb();
test_go = 1;
while (!test_go)
{
}
- smp_mb();
+ cmm_smp_mb();
for (;;) {
_rcu_read_lock();
while (!test_go)
{
}
- smp_mb();
+ cmm_smp_mb();
for (;;) {
#ifndef TEST_LOCAL_GC
exit(1);
}
- smp_mb();
+ cmm_smp_mb();
test_go = 1;
while (!test_go)
{
}
- smp_mb();
+ cmm_smp_mb();
for (;;) {
pthread_rwlock_wrlock(&lock);
show_usage(argc, argv);
return -1;
}
- smp_mb();
+ cmm_smp_mb();
err = sscanf(argv[1], "%u", &nr_readers);
if (err != 1) {
exit(1);
}
- smp_mb();
+ cmm_smp_mb();
test_go = 1;
while (!test_go)
{
}
- smp_mb();
+ cmm_smp_mb();
for (;;) {
rcu_read_lock();
while (!test_go)
{
}
- smp_mb();
+ cmm_smp_mb();
for (;;) {
new = test_array_alloc();
exit(1);
}
- smp_mb();
+ cmm_smp_mb();
test_go = 1;
while (!test_go)
{
}
- smp_mb();
+ cmm_smp_mb();
for (;;) {
rcu_read_lock();
while (!test_go)
{
}
- smp_mb();
+ cmm_smp_mb();
for (;;) {
new = test_array_alloc();
exit(1);
}
- smp_mb();
+ cmm_smp_mb();
test_go = 1;
while (!test_go)
{
}
- smp_mb();
+ cmm_smp_mb();
for (;;) {
rcu_read_lock();
while (!test_go)
{
}
- smp_mb();
+ cmm_smp_mb();
for (;;) {
new = test_array_alloc();
exit(1);
}
- smp_mb();
+ cmm_smp_mb();
test_go = 1;
while (!test_go)
{
}
- smp_mb();
+ cmm_smp_mb();
for (;;) {
rcu_read_lock();
while (!test_go)
{
}
- smp_mb();
+ cmm_smp_mb();
for (;;) {
new = malloc(sizeof(*new));
exit(1);
}
- smp_mb();
+ cmm_smp_mb();
test_go = 1;
while (!test_go)
{
}
- smp_mb();
+ cmm_smp_mb();
for (;;) {
rcu_read_lock();
while (!test_go)
{
}
- smp_mb();
+ cmm_smp_mb();
for (;;) {
#ifndef TEST_LOCAL_GC
exit(1);
}
- smp_mb();
+ cmm_smp_mb();
test_go = 1;
while (!test_go)
{
}
- smp_mb();
+ cmm_smp_mb();
for (;;) {
struct rcu_lfq_node *node = malloc(sizeof(*node));
while (!test_go)
{
}
- smp_mb();
+ cmm_smp_mb();
for (;;) {
struct rcu_lfq_node *node = rcu_lfq_dequeue(&q,
exit(1);
}
- smp_mb();
+ cmm_smp_mb();
test_go = 1;
while (!test_go)
{
}
- smp_mb();
+ cmm_smp_mb();
for (;;) {
struct rcu_lfs_node *node = malloc(sizeof(*node));
while (!test_go)
{
}
- smp_mb();
+ cmm_smp_mb();
for (;;) {
struct rcu_lfs_node *node = rcu_lfs_pop(&s);
exit(1);
}
- smp_mb();
+ cmm_smp_mb();
test_go = 1;
while (!test_go)
{
}
- smp_mb();
+ cmm_smp_mb();
for (;;) {
struct wfq_node *node = malloc(sizeof(*node));
while (!test_go)
{
}
- smp_mb();
+ cmm_smp_mb();
for (;;) {
struct wfq_node *node = wfq_dequeue_blocking(&q);
exit(1);
}
- smp_mb();
+ cmm_smp_mb();
test_go = 1;
while (!test_go)
{
}
- smp_mb();
+ cmm_smp_mb();
for (;;) {
struct wfs_node *node = malloc(sizeof(*node));
while (!test_go)
{
}
- smp_mb();
+ cmm_smp_mb();
for (;;) {
struct wfs_node *node = wfs_pop_blocking(&s);
exit(1);
}
- smp_mb();
+ cmm_smp_mb();
test_go = 1;
if (unlikely(!rcu_reader))
rcu_bp_register();
- barrier(); /* Ensure the compiler does not reorder us with mutex */
+ cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
tmp = rcu_reader->ctr;
/*
* rcu_gp_ctr is
* Set active readers count for outermost nesting level before
* accessing the pointer.
*/
- smp_mb();
+ cmm_smp_mb();
} else {
_STORE_SHARED(rcu_reader->ctr, tmp + RCU_GP_COUNT);
}
/*
* Finish using rcu before decrementing the pointer.
*/
- smp_mb();
+ cmm_smp_mb();
_STORE_SHARED(rcu_reader->ctr, rcu_reader->ctr - RCU_GP_COUNT);
- barrier(); /* Ensure the compiler does not reorder us with mutex */
+ cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
}
#ifdef __cplusplus
exit(-1);
}
if (rcu_reader.need_mb) {
- smp_mb();
+ cmm_smp_mb();
rcu_reader.need_mb = 0;
- smp_mb();
+ cmm_smp_mb();
}
poll(NULL,0,10);
}
*/
/*
- * Adding a smp_mb() which is _not_ formally required, but makes the
+ * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
* model easier to understand. It does not have a big performance impact
* anyway, given this is the write-side.
*/
- smp_mb();
+ cmm_smp_mb();
/*
* Wait for each thread rcu_reader.ctr count to become 0.
/* All threads should read qparity before accessing data structure
* where new ptr points to. */
/* Write new ptr before changing the qparity */
- smp_mb();
+ cmm_smp_mb();
/* Remove old registry elements */
rcu_gc_registry();
update_counter_and_wait(); /* 0 -> 1, wait readers in parity 0 */
/*
- * Adding a smp_mb() which is _not_ formally required, but makes the
+ * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
* model easier to understand. It does not have a big performance impact
* anyway, given this is the write-side.
*/
- smp_mb();
+ cmm_smp_mb();
/*
* Wait for previous parity to be empty of readers.
* Finish waiting for reader threads before letting the old ptr being
* freed.
*/
- smp_mb();
+ cmm_smp_mb();
out:
mutex_unlock(&rcu_gp_lock);
ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
static void wait_defer(void)
{
uatomic_dec(&defer_thread_futex);
- smp_mb(); /* Write futex before read queue */
+ cmm_smp_mb(); /* Write futex before read queue */
if (rcu_defer_num_callbacks()) {
- smp_mb(); /* Read queue before write futex */
+ cmm_smp_mb(); /* Read queue before write futex */
/* Callbacks are queued, don't wait. */
uatomic_set(&defer_thread_futex, 0);
} else {
- smp_rmb(); /* Read queue before read futex */
+ cmm_smp_rmb(); /* Read queue before read futex */
if (uatomic_read(&defer_thread_futex) == -1)
futex_noasync(&defer_thread_futex, FUTEX_WAIT, -1,
NULL, NULL, 0);
*/
for (i = queue->tail; i != head;) {
- smp_rmb(); /* read head before q[]. */
+ cmm_smp_rmb(); /* read head before q[]. */
p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
if (unlikely(DQ_IS_FCT_BIT(p))) {
DQ_CLEAR_FCT_BIT(p);
fct = queue->last_fct_out;
fct(p);
}
- smp_mb(); /* push tail after having used q[] */
+ cmm_smp_mb(); /* push tail after having used q[] */
STORE_SHARED(queue->tail, i);
}
}
}
_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], p);
- smp_wmb(); /* Publish new pointer before head */
+ cmm_smp_wmb(); /* Publish new pointer before head */
/* Write q[] before head. */
STORE_SHARED(defer_queue.head, head);
- smp_mb(); /* Write queue head before read futex */
+ cmm_smp_mb(); /* Write queue head before read futex */
/*
* Wake-up any waiting defer thread.
*/
#define _rcu_dereference(p) ({ \
typeof(p) _________p1 = LOAD_SHARED(p); \
- smp_read_barrier_depends(); \
+ cmm_smp_read_barrier_depends(); \
(_________p1); \
})
typeof(*p) _________pnew = (_new); \
if (!__builtin_constant_p(_new) || \
((_new) != NULL)) \
- wmb(); \
+ cmm_wmb(); \
uatomic_cmpxchg(p, _________pold, _________pnew); \
})
typeof(*p) _________pv = (v); \
if (!__builtin_constant_p(v) || \
((v) != NULL)) \
- wmb(); \
+ cmm_wmb(); \
uatomic_xchg(p, _________pv); \
})
typeof(*p) _________pv = (v); \
if (!__builtin_constant_p(v) || \
((v) != NULL)) \
- wmb(); \
+ cmm_wmb(); \
uatomic_set(p, _________pv); \
})
void *rcu_set_pointer_sym(void **p, void *v)
{
- wmb();
+ cmm_wmb();
return uatomic_set(p, v);
}
void *rcu_xchg_pointer_sym(void **p, void *v)
{
- wmb();
+ cmm_wmb();
return uatomic_xchg(p, v);
}
void *rcu_cmpxchg_pointer_sym(void **p, void *old, void *_new)
{
- wmb();
+ cmm_wmb();
return uatomic_cmpxchg(p, old, _new);
}
static inline void _rcu_quiescent_state(void)
{
- smp_mb();
+ cmm_smp_mb();
_STORE_SHARED(rcu_reader.ctr, _LOAD_SHARED(rcu_gp_ctr));
- smp_mb(); /* write rcu_reader.ctr before read futex */
+ cmm_smp_mb(); /* write rcu_reader.ctr before read futex */
wake_up_gp();
- smp_mb();
+ cmm_smp_mb();
}
static inline void _rcu_thread_offline(void)
{
- smp_mb();
+ cmm_smp_mb();
STORE_SHARED(rcu_reader.ctr, 0);
- smp_mb(); /* write rcu_reader.ctr before read futex */
+ cmm_smp_mb(); /* write rcu_reader.ctr before read futex */
wake_up_gp();
- barrier(); /* Ensure the compiler does not reorder us with mutex */
+ cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
}
static inline void _rcu_thread_online(void)
{
- barrier(); /* Ensure the compiler does not reorder us with mutex */
+ cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
_STORE_SHARED(rcu_reader.ctr, LOAD_SHARED(rcu_gp_ctr));
- smp_mb();
+ cmm_smp_mb();
}
#ifdef __cplusplus
static void wait_gp(void)
{
/* Read reader_gp before read futex */
- smp_rmb();
+ cmm_smp_rmb();
if (uatomic_read(&gp_futex) == -1)
futex_noasync(&gp_futex, FUTEX_WAIT, -1,
NULL, NULL, 0);
* while new readers are always accessing data (no progress). Enforce
* compiler-order of store to rcu_gp_ctr before load rcu_reader ctr.
*/
- barrier();
+ cmm_barrier();
/*
- * Adding a smp_mb() which is _not_ formally required, but makes the
+ * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
* model easier to understand. It does not have a big performance impact
* anyway, given this is the write-side.
*/
- smp_mb();
+ cmm_smp_mb();
/*
* Wait for each thread rcu_reader_qs_gp count to become 0.
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
uatomic_dec(&gp_futex);
/* Write futex before read reader_gp */
- smp_mb();
+ cmm_smp_mb();
}
list_for_each_entry_safe(index, tmp, ®istry, node) {
if (list_empty(®istry)) {
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
/* Read reader_gp before write futex */
- smp_mb();
+ cmm_smp_mb();
uatomic_set(&gp_futex, 0);
}
break;
#ifndef HAS_INCOHERENT_CACHES
cpu_relax();
#else /* #ifndef HAS_INCOHERENT_CACHES */
- smp_mb();
+ cmm_smp_mb();
#endif /* #else #ifndef HAS_INCOHERENT_CACHES */
}
}
* where new ptr points to.
*/
/* Write new ptr before changing the qparity */
- smp_mb();
+ cmm_smp_mb();
/*
* Mark the writer thread offline to make sure we don't wait for
* accessing data (no progress). Enforce compiler-order of load
* rcu_reader ctr before store to rcu_gp_ctr.
*/
- barrier();
+ cmm_barrier();
/*
- * Adding a smp_mb() which is _not_ formally required, but makes the
+ * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
* model easier to understand. It does not have a big performance impact
* anyway, given this is the write-side.
*/
- smp_mb();
+ cmm_smp_mb();
/*
* Wait for previous parity to be empty of readers.
*/
if (was_online)
_STORE_SHARED(rcu_reader.ctr, LOAD_SHARED(rcu_gp_ctr));
- smp_mb();
+ cmm_smp_mb();
}
#else /* !(BITS_PER_LONG < 64) */
void synchronize_rcu(void)
* our own quiescent state. This allows using synchronize_rcu() in
* threads registered as readers.
*/
- smp_mb();
+ cmm_smp_mb();
if (was_online)
STORE_SHARED(rcu_reader.ctr, 0);
if (was_online)
_STORE_SHARED(rcu_reader.ctr, LOAD_SHARED(rcu_gp_ctr));
- smp_mb();
+ cmm_smp_mb();
}
#endif /* !(BITS_PER_LONG < 64) */
static inline void smp_mb_slave(int group)
{
if (likely(has_sys_membarrier))
- barrier();
+ cmm_barrier();
else
- smp_mb();
+ cmm_smp_mb();
}
#endif
#ifdef RCU_MB
static inline void smp_mb_slave(int group)
{
- smp_mb();
+ cmm_smp_mb();
}
#endif
#ifdef RCU_SIGNAL
static inline void smp_mb_slave(int group)
{
- barrier();
+ cmm_barrier();
}
#endif
{
unsigned long tmp;
- barrier(); /* Ensure the compiler does not reorder us with mutex */
+ cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
tmp = rcu_reader.ctr;
/*
* rcu_gp_ctr is
} else {
_STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT);
}
- barrier(); /* Ensure the compiler does not reorder us with mutex */
+ cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
}
#ifdef __cplusplus
exit(-1);
}
if (LOAD_SHARED(rcu_reader.need_mb)) {
- smp_mb();
+ cmm_smp_mb();
_STORE_SHARED(rcu_reader.need_mb, 0);
- smp_mb();
+ cmm_smp_mb();
}
poll(NULL,0,10);
}
if (likely(has_sys_membarrier))
membarrier(MEMBARRIER_EXPEDITED);
else
- smp_mb();
+ cmm_smp_mb();
}
#endif
#ifdef RCU_MB
static void smp_mb_master(int group)
{
- smp_mb();
+ cmm_smp_mb();
}
#endif
struct rcu_reader *index;
/*
- * Ask for each threads to execute a smp_mb() so we can consider the
+ * Ask for each threads to execute a cmm_smp_mb() so we can consider the
* compiler barriers around rcu read lock as real memory barriers.
*/
if (list_empty(®istry))
return;
/*
- * pthread_kill has a smp_mb(). But beware, we assume it performs
+ * pthread_kill has a cmm_smp_mb(). But beware, we assume it performs
* a cache flush on architectures with non-coherent cache. Let's play
- * safe and don't assume anything : we use smp_mc() to make sure the
+ * safe and don't assume anything : we use cmm_smp_mc() to make sure the
* cache flush is enforced.
*/
list_for_each_entry(index, ®istry, node) {
poll(NULL, 0, 1);
}
}
- smp_mb(); /* read ->need_mb before ending the barrier */
+ cmm_smp_mb(); /* read ->need_mb before ending the barrier */
}
static void smp_mb_master(int group)
* while new readers are always accessing data (no progress). Enforce
* compiler-order of store to rcu_gp_ctr before load rcu_reader ctr.
*/
- barrier();
+ cmm_barrier();
/*
*
- * Adding a smp_mb() which is _not_ formally required, but makes the
+ * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
* model easier to understand. It does not have a big performance impact
* anyway, given this is the write-side.
*/
- smp_mb();
+ cmm_smp_mb();
/*
* Wait for each thread rcu_reader.ctr count to become 0.
* accessing data (no progress). Enforce compiler-order of load
* rcu_reader ctr before store to rcu_gp_ctr.
*/
- barrier();
+ cmm_barrier();
/*
- * Adding a smp_mb() which is _not_ formally required, but makes the
+ * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
* model easier to understand. It does not have a big performance impact
* anyway, given this is the write-side.
*/
- smp_mb();
+ cmm_smp_mb();
/*
* Wait for previous parity to be empty of readers.
static void sigrcu_handler(int signo, siginfo_t *siginfo, void *context)
{
/*
- * Executing this smp_mb() is the only purpose of this signal handler.
- * It punctually promotes barrier() into smp_mb() on every thread it is
+ * Executing this cmm_smp_mb() is the only purpose of this signal handler.
+ * It punctually promotes cmm_barrier() into cmm_smp_mb() on every thread it is
* executed on.
*/
- smp_mb();
+ cmm_smp_mb();
_STORE_SHARED(rcu_reader.need_mb, 0);
- smp_mb();
+ cmm_smp_mb();
}
/*
extern "C" {
#endif
-#define mb() asm volatile("mb":::"memory")
-#define wmb() asm volatile("wmb":::"memory")
-#define read_barrier_depends() asm volatile("mb":::"memory")
+#define cmm_mb() asm volatile("mb":::"memory")
+#define cmm_wmb() asm volatile("wmb":::"memory")
+#define cmm_read_barrier_depends() asm volatile("mb":::"memory")
typedef unsigned long long cycles_t;
extern "C" {
#endif
-#define mb() asm volatile("dmb":::"memory")
+#define cmm_mb() asm volatile("dmb":::"memory")
#include <stdlib.h>
#include <sys/time.h>
#define CACHE_LINE_SIZE 64
#endif
-#if !defined(mc) && !defined(rmc) && !defined(wmc)
+#if !defined(cmm_mc) && !defined(cmm_rmc) && !defined(cmm_wmc)
#define CONFIG_HAVE_MEM_COHERENCY
/*
- * Architectures with cache coherency must _not_ define mc/rmc/wmc.
+ * Architectures with cache coherency must _not_ define cmm_mc/cmm_rmc/cmm_wmc.
*
- * For them, mc/rmc/wmc are implemented with a * simple compiler barrier;
- * in addition, we provide defaults for mb (using GCC builtins) as well as
- * rmb and wmb (defaulting to mb).
+ * For them, cmm_mc/cmm_rmc/cmm_wmc are implemented with a * simple compiler barrier;
+ * in addition, we provide defaults for cmm_mb (using GCC builtins) as well as
+ * cmm_rmb and cmm_wmb (defaulting to cmm_mb).
*/
-#ifndef mb
-#define mb() __sync_synchronize()
+#ifndef cmm_mb
+#define cmm_mb() __sync_synchronize()
#endif
-#ifndef rmb
-#define rmb() mb()
+#ifndef cmm_rmb
+#define cmm_rmb() cmm_mb()
#endif
-#ifndef wmb
-#define wmb() mb()
+#ifndef cmm_wmb
+#define cmm_wmb() cmm_mb()
#endif
-#define mc() barrier()
-#define rmc() barrier()
-#define wmc() barrier()
+#define cmm_mc() cmm_barrier()
+#define cmm_rmc() cmm_barrier()
+#define cmm_wmc() cmm_barrier()
#else
/*
* Architectures without cache coherency need something like the following:
*
- * #define mc() arch_cache_flush()
- * #define rmc() arch_cache_flush_read()
- * #define wmc() arch_cache_flush_write()
+ * #define cmm_mc() arch_cache_flush()
+ * #define cmm_rmc() arch_cache_flush_read()
+ * #define cmm_wmc() arch_cache_flush_write()
*
- * Of these, only mc is mandatory. rmc and wmc default to mc. mb/rmb/wmb
- * use these definitions by default:
+ * Of these, only cmm_mc is mandatory. cmm_rmc and cmm_wmc default to cmm_mc.
+ * cmm_mb/cmm_rmb/cmm_wmb use these definitions by default:
*
- * #define mb() mc()
- * #define rmb() rmc()
- * #define wmb() wmc()
+ * #define cmm_mb() cmm_mc()
+ * #define cmm_rmb() cmm_rmc()
+ * #define cmm_wmb() cmm_wmc()
*/
-#ifndef mb
-#define mb() mc()
+#ifndef cmm_mb
+#define cmm_mb() cmm_mc()
#endif
-#ifndef rmb
-#define rmb() rmc()
+#ifndef cmm_rmb
+#define cmm_rmb() cmm_rmc()
#endif
-#ifndef wmb
-#define wmb() wmc()
+#ifndef cmm_wmb
+#define cmm_wmb() cmm_wmc()
#endif
-#ifndef rmc
-#define rmc() mc()
+#ifndef cmm_rmc
+#define cmm_rmc() cmm_mc()
#endif
-#ifndef wmc
-#define wmc() mc()
+#ifndef cmm_wmc
+#define cmm_wmc() cmm_mc()
#endif
#endif
/* Nop everywhere except on alpha. */
-#ifndef read_barrier_depends
-#define read_barrier_depends()
+#ifndef cmm_read_barrier_depends
+#define cmm_read_barrier_depends()
#endif
#ifdef CONFIG_RCU_SMP
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
-#define smp_mc() mc()
-#define smp_rmc() rmc()
-#define smp_wmc() wmc()
-#define smp_read_barrier_depends() read_barrier_depends()
+#define cmm_smp_mb() cmm_mb()
+#define cmm_smp_rmb() cmm_rmb()
+#define cmm_smp_wmb() cmm_wmb()
+#define cmm_smp_mc() cmm_mc()
+#define cmm_smp_rmc() cmm_rmc()
+#define cmm_smp_wmc() cmm_wmc()
+#define cmm_smp_read_barrier_depends() cmm_read_barrier_depends()
#else
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define smp_mc() barrier()
-#define smp_rmc() barrier()
-#define smp_wmc() barrier()
-#define smp_read_barrier_depends()
+#define cmm_smp_mb() cmm_barrier()
+#define cmm_smp_rmb() cmm_barrier()
+#define cmm_smp_wmb() cmm_barrier()
+#define cmm_smp_mc() cmm_barrier()
+#define cmm_smp_rmc() cmm_barrier()
+#define cmm_smp_wmc() cmm_barrier()
+#define cmm_smp_read_barrier_depends()
#endif
#ifndef cpu_relax
-#define cpu_relax() barrier()
+#define cpu_relax() cmm_barrier()
#endif
#ifdef __cplusplus
/* Include size of POWER5+ L3 cache lines: 256 bytes */
#define CACHE_LINE_SIZE 256
-#define mb() asm volatile("sync":::"memory")
+#define cmm_mb() asm volatile("sync":::"memory")
#define mftbl() \
({ \
for (;;) {
h = mftbu();
- barrier();
+ cmm_barrier();
l = mftbl();
- barrier();
+ cmm_barrier();
if (mftbu() == h)
return (((cycles_t) h) << 32) + l;
}
#define CACHE_LINE_SIZE 128
-#define mb() __asm__ __volatile__("bcr 15,0" : : : "memory")
+#define cmm_mb() __asm__ __volatile__("bcr 15,0" : : : "memory")
typedef unsigned long long cycles_t;
"1:\n" \
: : : "memory")
-#define mb() membar_safe("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad")
-#define rmb() membar_safe("#LoadLoad")
-#define wmb() membar_safe("#StoreStore")
+#define cmm_mb() membar_safe("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad")
+#define cmm_rmb() membar_safe("#LoadLoad")
+#define cmm_wmb() membar_safe("#StoreStore")
typedef unsigned long long cycles_t;
#define CACHE_LINE_SIZE 128
#ifdef CONFIG_RCU_HAVE_FENCE
-#define mb() asm volatile("mfence":::"memory")
-#define rmb() asm volatile("lfence":::"memory")
-#define wmb() asm volatile("sfence"::: "memory")
+#define cmm_mb() asm volatile("mfence":::"memory")
+#define cmm_rmb() asm volatile("lfence":::"memory")
+#define cmm_wmb() asm volatile("sfence"::: "memory")
#else
/*
- * Some non-Intel clones support out of order store. wmb() ceases to be a
+ * Some non-Intel clones support out of order store. cmm_wmb() ceases to be a
* nop for these.
*/
-#define mb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
-#define rmb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
-#define wmb() asm volatile("lock; addl $0,0(%%esp)"::: "memory")
+#define cmm_mb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
+#define cmm_rmb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
+#define cmm_wmb() asm volatile("lock; addl $0,0(%%esp)"::: "memory")
#endif
#define cpu_relax() asm volatile("rep; nop" : : : "memory");
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
-#define barrier() asm volatile("" : : : "memory")
+#define cmm_barrier() asm volatile("" : : : "memory")
/*
* Instruct the compiler to perform only a single access to a variable
{
newp->next = head->next;
newp->prev = (struct hlist_node *)head;
- smp_wmb();
+ cmm_smp_wmb();
if (head->next)
head->next->prev = newp;
head->next = newp;
{
newp->next = head->next;
newp->prev = head;
- smp_wmb();
+ cmm_smp_wmb();
head->next->prev = newp;
head->next = newp;
}
#include <urcu/arch.h>
/*
- * Identify a shared load. A smp_rmc() or smp_mc() should come before the load.
+ * Identify a shared load. A cmm_smp_rmc() or cmm_smp_mc() should come before the load.
*/
#define _LOAD_SHARED(p) ACCESS_ONCE(p)
*/
#define LOAD_SHARED(p) \
({ \
- smp_rmc(); \
+ cmm_smp_rmc(); \
_LOAD_SHARED(p); \
})
/*
- * Identify a shared store. A smp_wmc() or smp_mc() should follow the store.
+ * Identify a shared store. A cmm_smp_wmc() or cmm_smp_mc() should follow the store.
*/
#define _STORE_SHARED(x, v) ({ ACCESS_ONCE(x) = (v); })
#define STORE_SHARED(x, v) \
({ \
typeof(x) _v = _STORE_SHARED(x, v); \
- smp_wmc(); \
+ cmm_smp_wmc(); \
_v; \
})