liburcu_la_SOURCES = urcu.c urcu-pointer.c $(COMPAT)
liburcu_mb_la_SOURCES = urcu.c urcu-pointer.c $(COMPAT)
-liburcu_mb_la_CFLAGS = -DURCU_MB
+liburcu_mb_la_CFLAGS = -DRCU_MB
liburcu_bp_la_SOURCES = urcu-bp.c urcu-pointer.c $(COMPAT)
* Link the application with "-lurcu".
* This is the preferred version of the library, both in terms of speed
and flexibility. Requires a signal, typically SIGUSR1. Can be
- overridden with -DSIGURCU by modifying Makefile.build.inc.
+ overridden with -DSIGRCU by modifying Makefile.build.inc.
Usage of liburcu-mb
* #include <urcu.h>
- * Compile any _LGPL_SOURCE code using this library with "-DURCU_MB".
+ * Compile any _LGPL_SOURCE code using this library with "-DRCU_MB".
* Link with "-lurcu-mb".
* This version of the urcu library does not need to
- reserve a signal number. URCU_MB uses full memory barriers for
+ reserve a signal number. RCU_MB uses full memory barriers for
readers. This eliminates the need for signals but results in slower
reads.
* Link with "-lurcu-bp".
* The BP library flavor stands for "bulletproof". It is specifically
designed to help tracing library to hook on applications without
- requiring to modify these applications. urcu_init(),
+ requiring to modify these applications. rcu_init(),
rcu_register_thread() and rcu_unregister_thread() all become nops.
The state is dealt with by the library internally at the expense of
read-side and write-side performance.
/*
* It does not really matter if the constructor is called before using
- * the library, as long as the caller checks if __urcu_cas_avail < 0 and calls
+ * the library, as long as the caller checks if __rcu_cas_avail < 0 and calls
* compat_arch_init() explicitely if needed.
*/
-int __attribute__((constructor)) __urcu_cas_init(void);
+int __attribute__((constructor)) __rcu_cas_init(void);
/*
* -1: unknown
* 1: available
* 0: unavailable
*/
-int __urcu_cas_avail = -1;
+int __rcu_cas_avail = -1;
static pthread_mutex_t compat_mutex = PTHREAD_MUTEX_INITIALIZER;
return result;
}
-int __urcu_cas_init(void)
+int __rcu_cas_init(void)
{
- if (__urcu_cas_avail < 0)
- __urcu_cas_avail = compare_and_swap_is_available();
- return __urcu_cas_avail;
+ if (__rcu_cas_avail < 0)
+ __rcu_cas_avail = compare_and_swap_is_available();
+ return __rcu_cas_avail;
}
AC_CONFIG_HEADERS([config.h urcu/config.h])
# Keep at the end to do not pollute installed header.
-AH_TEMPLATE([CONFIG_URCU_SMP], [Enable SMP support. With SMP support enabled, uniprocessors are also supported. With SMP support disabled, UP systems work fine, but the behavior of SMP systems is undefined.])
-AH_TEMPLATE([CONFIG_URCU_HAVE_FENCE], [Defined when on a system that has memory fence instructions.])
-AH_TEMPLATE([CONFIG_URCU_HAVE_FUTEX], [Defined when on a system with futex support.])
-AH_TEMPLATE([CONFIG_URCU_COMPAT_ARCH], [Compatibility mode for i386 which lacks
+AH_TEMPLATE([CONFIG_RCU_SMP], [Enable SMP support. With SMP support enabled, uniprocessors are also supported. With SMP support disabled, UP systems work fine, but the behavior of SMP systems is undefined.])
+AH_TEMPLATE([CONFIG_RCU_HAVE_FENCE], [Defined when on a system that has memory fence instructions.])
+AH_TEMPLATE([CONFIG_RCU_HAVE_FUTEX], [Defined when on a system with futex support.])
+AH_TEMPLATE([CONFIG_RCU_COMPAT_ARCH], [Compatibility mode for i386 which lacks
cmpxchg instruction.])
# Checks for programs.
#Only using fence for x86_64.
if test "x$ARCHTYPE" = "xx86" -a "x$host_cpu" != "xi386" -a "x$host_cpu" != "xi486" -a "x$host_cpu" != "xi586" -a "x$host_cpu" != "xi686"; then
]
- AC_DEFINE([CONFIG_URCU_HAVE_FENCE], [1])
+ AC_DEFINE([CONFIG_RCU_HAVE_FENCE], [1])
[
fi
]
],
[
AC_MSG_RESULT([yes])
- AC_DEFINE([CONFIG_URCU_HAVE_FUTEX], [1])
+ AC_DEFINE([CONFIG_RCU_HAVE_FUTEX], [1])
compat_futex_test=0
]
,
[
if test "x$SUBARCHTYPE" = xx86compat; then
]
- AC_DEFINE([CONFIG_URCU_COMPAT_ARCH], [1])
+ AC_DEFINE([CONFIG_RCU_COMPAT_ARCH], [1])
[
fi
]
echo "SMP support disabled."
else
]
- AC_DEFINE([CONFIG_URCU_SMP], [1])
+ AC_DEFINE([CONFIG_RCU_SMP], [1])
[
echo "SMP support enabled."
fi
URCU_SIGNAL=$(top_srcdir)/urcu.c $(top_srcdir)/urcu-pointer.c $(COMPAT)
# URCU_SIGNAL_YIELD uses urcu.c but -DDEBUG_YIELD must be defined
URCU_SIGNAL_YIELD=$(top_srcdir)/urcu.c $(top_srcdir)/urcu-pointer.c $(COMPAT)
-# URCU_MB uses urcu.c but -DURCU_MB must be defined
+# URCU_MB uses urcu.c but -DRCU_MB must be defined
URCU_MB=$(top_srcdir)/urcu.c $(top_srcdir)/urcu-pointer.c $(COMPAT)
URCU_BP=$(top_srcdir)/urcu-bp.c $(top_srcdir)/urcu-pointer.c $(COMPAT)
URCU_QSBR=$(top_srcdir)/urcu-qsbr.c $(top_srcdir)/urcu-pointer.c $(COMPAT)
-# -DURCU_MB must be defined
+# -DRCU_MB must be defined
URCU_MB_DEFER=$(top_srcdir)/urcu.c $(top_srcdir)/urcu-defer.c $(top_srcdir)/urcu-pointer.c $(COMPAT)
URCU_SIGNAL_LIB=$(top_builddir)/liburcu.la
test_urcu_yield_CFLAGS = -DDEBUG_YIELD $(AM_CFLAGS)
test_urcu_mb_SOURCES = test_urcu.c $(URCU_MB)
-test_urcu_mb_CFLAGS = -DURCU_MB $(AM_CFLAGS)
+test_urcu_mb_CFLAGS = -DRCU_MB $(AM_CFLAGS)
test_qsbr_timing_SOURCES = test_qsbr_timing.c $(URCU_QSBR)
test_urcu_gc_SOURCES = test_urcu_gc.c $(URCU_SIGNAL)
test_urcu_gc_mb_SOURCES = test_urcu_gc.c $(URCU_MB)
-test_urcu_gc_mb_CFLAGS = -DURCU_MB $(AM_CFLAGS)
+test_urcu_gc_mb_CFLAGS = -DRCU_MB $(AM_CFLAGS)
test_qsbr_gc_SOURCES = test_qsbr_gc.c $(URCU_QSBR)
test_urcu_lgc_CFLAGS = -DTEST_LOCAL_GC $(AM_CFLAGS)
test_urcu_lgc_mb_SOURCES = test_urcu_gc.c $(URCU_MB)
-test_urcu_lgc_mb_CFLAGS = -DTEST_LOCAL_GC -DURCU_MB $(AM_CFLAGS)
+test_urcu_lgc_mb_CFLAGS = -DTEST_LOCAL_GC -DRCU_MB $(AM_CFLAGS)
test_qsbr_dynamic_link_SOURCES = test_qsbr.c $(URCU_QSBR)
test_qsbr_dynamic_link_CFLAGS = -DDYNAMIC_LINK_TEST $(AM_CFLAGS)
test_urcu_mb_defer_SOURCES = test_urcu_defer.c $(URCU_MB_DEFER)
-test_urcu_mb_defer_CFLAGS = -DURCU_MB $(AM_CFLAGS)
+test_urcu_mb_defer_CFLAGS = -DRCU_MB $(AM_CFLAGS)
test_uatomic_SOURCES = test_uatomic.c $(COMPAT)
#include <urcu.h>
#endif
#ifdef TORTURE_URCU_MB
-#define URCU_MB
+#define RCU_MB
#include <urcu.h>
#endif
#ifdef TORTURE_QSBR
#define YIELD_WRITE (1 << 1)
/*
- * Updates without URCU_MB are much slower. Account this in
+ * Updates without RCU_MB are much slower. Account this in
* the delay.
*/
/* maximum sleep delay, in us */
#endif
/*
- * The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a
+ * The trick here is that RCU_GP_CTR_PHASE must be a multiple of 8 so we can use a
* full 8-bits, 16-bits or 32-bits bitmask for the lower order bits.
*/
#define RCU_GP_COUNT (1UL << 0)
/* Use the amount of bits equal to half of the architecture long size */
-#define RCU_GP_CTR_BIT (1UL << (sizeof(long) << 2))
-#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+#define RCU_GP_CTR_PHASE (1UL << (sizeof(long) << 2))
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_PHASE - 1)
/*
* Used internally by _rcu_read_lock.
* Using a int rather than a char to eliminate false register dependencies
* causing stalls on some architectures.
*/
-extern long urcu_gp_ctr;
+extern long rcu_gp_ctr;
-struct urcu_reader {
+struct rcu_reader {
/* Data used by both reader and synchronize_rcu() */
long ctr;
/* Data used for registry */
* Adds a pointer dereference on the read-side, but won't require to unregister
* the reader thread.
*/
-extern struct urcu_reader __thread *urcu_reader;
+extern struct rcu_reader __thread *rcu_reader;
static inline int rcu_old_gp_ongoing(long *value)
{
*/
v = LOAD_SHARED(*value);
return (v & RCU_GP_CTR_NEST_MASK) &&
- ((v ^ urcu_gp_ctr) & RCU_GP_CTR_BIT);
+ ((v ^ rcu_gp_ctr) & RCU_GP_CTR_PHASE);
}
static inline void _rcu_read_lock(void)
long tmp;
/* Check if registered */
- if (unlikely(!urcu_reader))
+ if (unlikely(!rcu_reader))
rcu_bp_register();
- tmp = urcu_reader->ctr;
- /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */
+ tmp = rcu_reader->ctr;
+ /*
+ * rcu_gp_ctr is
+ * RCU_GP_COUNT | (~RCU_GP_CTR_PHASE or RCU_GP_CTR_PHASE)
+ */
if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) {
- _STORE_SHARED(urcu_reader->ctr, _LOAD_SHARED(urcu_gp_ctr));
+ _STORE_SHARED(rcu_reader->ctr, _LOAD_SHARED(rcu_gp_ctr));
/*
* Set active readers count for outermost nesting level before
* accessing the pointer.
*/
smp_mb();
} else {
- _STORE_SHARED(urcu_reader->ctr, tmp + RCU_GP_COUNT);
+ _STORE_SHARED(rcu_reader->ctr, tmp + RCU_GP_COUNT);
}
}
* Finish using rcu before decrementing the pointer.
*/
smp_mb();
- _STORE_SHARED(urcu_reader->ctr, urcu_reader->ctr - RCU_GP_COUNT);
+ _STORE_SHARED(rcu_reader->ctr, rcu_reader->ctr - RCU_GP_COUNT);
}
#ifdef __cplusplus
#define RCU_SLEEP_DELAY 1000
#define ARENA_INIT_ALLOC 16
-void __attribute__((destructor)) urcu_bp_exit(void);
+void __attribute__((destructor)) rcu_bp_exit(void);
-static pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_mutex_t rcu_mutex = PTHREAD_MUTEX_INITIALIZER;
#ifdef DEBUG_YIELD
unsigned int yield_active;
/*
* Global grace period counter.
- * Contains the current RCU_GP_CTR_BIT.
+ * Contains the current RCU_GP_CTR_PHASE.
* Also has a RCU_GP_COUNT of 1, to accelerate the reader fast path.
* Written to only by writer with mutex taken. Read by both writer and readers.
*/
-long urcu_gp_ctr = RCU_GP_COUNT;
+long rcu_gp_ctr = RCU_GP_COUNT;
/*
* Pointer to registry elements. Written to only by each individual reader. Read
* by both the reader and the writers.
*/
-struct urcu_reader __thread *urcu_reader;
+struct rcu_reader __thread *rcu_reader;
static LIST_HEAD(registry);
static void rcu_gc_registry(void);
-static void internal_urcu_lock(void)
+static void internal_rcu_lock(void)
{
int ret;
#ifndef DISTRUST_SIGNALS_EXTREME
- ret = pthread_mutex_lock(&urcu_mutex);
+ ret = pthread_mutex_lock(&rcu_mutex);
if (ret) {
perror("Error in pthread mutex lock");
exit(-1);
}
#else /* #ifndef DISTRUST_SIGNALS_EXTREME */
- while ((ret = pthread_mutex_trylock(&urcu_mutex)) != 0) {
+ while ((ret = pthread_mutex_trylock(&rcu_mutex)) != 0) {
if (ret != EBUSY && ret != EINTR) {
printf("ret = %d, errno = %d\n", ret, errno);
perror("Error in pthread mutex lock");
exit(-1);
}
- if (urcu_reader.need_mb) {
+ if (rcu_reader.need_mb) {
smp_mb();
- urcu_reader.need_mb = 0;
+ rcu_reader.need_mb = 0;
smp_mb();
}
poll(NULL,0,10);
#endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
}
-static void internal_urcu_unlock(void)
+static void internal_rcu_unlock(void)
{
int ret;
- ret = pthread_mutex_unlock(&urcu_mutex);
+ ret = pthread_mutex_unlock(&rcu_mutex);
if (ret) {
perror("Error in pthread mutex unlock");
exit(-1);
}
/*
- * called with urcu_mutex held.
+ * called with rcu_mutex held.
*/
-static void switch_next_urcu_qparity(void)
+static void switch_next_rcu_qparity(void)
{
- STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr ^ RCU_GP_CTR_BIT);
+ STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE);
}
void wait_for_quiescent_state(void)
{
LIST_HEAD(qsreaders);
int wait_loops = 0;
- struct urcu_reader *index, *tmp;
+ struct rcu_reader *index, *tmp;
if (list_empty(®istry))
return;
/*
- * Wait for each thread urcu_reader.ctr count to become 0.
+ * Wait for each thread rcu_reader.ctr count to become 0.
*/
for (;;) {
wait_loops++;
ret = pthread_sigmask(SIG_SETMASK, &newmask, &oldmask);
assert(!ret);
- internal_urcu_lock();
+ internal_rcu_lock();
/* Remove old registry elements */
rcu_gc_registry();
/* All threads should read qparity before accessing data structure
- * where new ptr points to. Must be done within internal_urcu_lock
+ * where new ptr points to. Must be done within internal_rcu_lock
* because it iterates on reader threads.*/
/* Write new ptr before changing the qparity */
smp_mb();
- switch_next_urcu_qparity(); /* 0 -> 1 */
+ switch_next_rcu_qparity(); /* 0 -> 1 */
/*
* Must commit qparity update to memory before waiting for parity
*/
smp_mb();
- switch_next_urcu_qparity(); /* 1 -> 0 */
+ switch_next_rcu_qparity(); /* 1 -> 0 */
/*
* Must commit qparity update to memory before waiting for parity
wait_for_quiescent_state(); /* Wait readers in parity 1 */
/* Finish waiting for reader threads before letting the old ptr being
- * freed. Must be done within internal_urcu_lock because it iterates on
+ * freed. Must be done within internal_rcu_lock because it iterates on
* reader threads. */
smp_mb();
- internal_urcu_unlock();
+ internal_rcu_unlock();
ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
assert(!ret);
}
/* Called with signals off and mutex locked */
static void add_thread(void)
{
- struct urcu_reader *urcu_reader_reg;
+ struct rcu_reader *rcu_reader_reg;
if (registry_arena.len
- < registry_arena.used + sizeof(struct urcu_reader))
+ < registry_arena.used + sizeof(struct rcu_reader))
resize_arena(®istry_arena,
max(registry_arena.len << 1, ARENA_INIT_ALLOC));
/*
* Find a free spot.
*/
- for (urcu_reader_reg = registry_arena.p;
- (void *)urcu_reader_reg < registry_arena.p + registry_arena.len;
- urcu_reader_reg++) {
- if (!urcu_reader_reg->alloc)
+ for (rcu_reader_reg = registry_arena.p;
+ (void *)rcu_reader_reg < registry_arena.p + registry_arena.len;
+ rcu_reader_reg++) {
+ if (!rcu_reader_reg->alloc)
break;
}
- urcu_reader_reg->alloc = 1;
- registry_arena.used += sizeof(struct urcu_reader);
+ rcu_reader_reg->alloc = 1;
+ registry_arena.used += sizeof(struct rcu_reader);
/* Add to registry */
- urcu_reader_reg->tid = pthread_self();
- assert(urcu_reader_reg->ctr == 0);
- list_add(&urcu_reader_reg->head, ®istry);
- urcu_reader = urcu_reader_reg;
+ rcu_reader_reg->tid = pthread_self();
+ assert(rcu_reader_reg->ctr == 0);
+ list_add(&rcu_reader_reg->head, ®istry);
+ rcu_reader = rcu_reader_reg;
}
/* Called with signals off and mutex locked */
static void rcu_gc_registry(void)
{
- struct urcu_reader *urcu_reader_reg;
+ struct rcu_reader *rcu_reader_reg;
pthread_t tid;
int ret;
- for (urcu_reader_reg = registry_arena.p;
- (void *)urcu_reader_reg < registry_arena.p + registry_arena.len;
- urcu_reader_reg++) {
- if (!urcu_reader_reg->alloc)
+ for (rcu_reader_reg = registry_arena.p;
+ (void *)rcu_reader_reg < registry_arena.p + registry_arena.len;
+ rcu_reader_reg++) {
+ if (!rcu_reader_reg->alloc)
continue;
- tid = urcu_reader_reg->tid;
+ tid = rcu_reader_reg->tid;
ret = pthread_kill(tid, 0);
assert(ret != EINVAL);
if (ret == ESRCH) {
- list_del(&urcu_reader_reg->head);
- urcu_reader_reg->alloc = 0;
- registry_arena.used -= sizeof(struct urcu_reader);
+ list_del(&rcu_reader_reg->head);
+ rcu_reader_reg->alloc = 0;
+ registry_arena.used -= sizeof(struct rcu_reader);
}
}
}
/*
* Check if a signal concurrently registered our thread since
* the check in rcu_read_lock(). */
- if (urcu_reader)
+ if (rcu_reader)
goto end;
- internal_urcu_lock();
+ internal_rcu_lock();
add_thread();
- internal_urcu_unlock();
+ internal_rcu_unlock();
end:
ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
assert(!ret);
}
-void urcu_bp_exit()
+void rcu_bp_exit()
{
munmap(registry_arena.p, registry_arena.len);
}
{
}
-static inline void urcu_init(void)
+static inline void rcu_init(void)
{
}
/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
#include "urcu-defer.h"
-void __attribute__((destructor)) urcu_defer_exit(void);
+void __attribute__((destructor)) rcu_defer_exit(void);
extern void synchronize_rcu(void);
/*
- * urcu_defer_mutex nests inside defer_thread_mutex.
+ * rcu_defer_mutex nests inside defer_thread_mutex.
*/
-static pthread_mutex_t urcu_defer_mutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_mutex_t rcu_defer_mutex = PTHREAD_MUTEX_INITIALIZER;
static pthread_mutex_t defer_thread_mutex = PTHREAD_MUTEX_INITIALIZER;
static int defer_thread_futex;
static LIST_HEAD(registry);
static pthread_t tid_defer;
-static void internal_urcu_lock(pthread_mutex_t *mutex)
+static void internal_rcu_lock(pthread_mutex_t *mutex)
{
int ret;
#endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
}
-static void internal_urcu_unlock(pthread_mutex_t *mutex)
+static void internal_rcu_unlock(pthread_mutex_t *mutex)
{
int ret;
unsigned long num_items = 0, head;
struct defer_queue *index;
- internal_urcu_lock(&urcu_defer_mutex);
+ internal_rcu_lock(&rcu_defer_mutex);
list_for_each_entry(index, ®istry, list) {
head = LOAD_SHARED(index->head);
num_items += head - index->tail;
}
- internal_urcu_unlock(&urcu_defer_mutex);
+ internal_rcu_unlock(&rcu_defer_mutex);
return num_items;
}
void rcu_defer_barrier_thread(void)
{
- internal_urcu_lock(&urcu_defer_mutex);
+ internal_rcu_lock(&rcu_defer_mutex);
_rcu_defer_barrier_thread();
- internal_urcu_unlock(&urcu_defer_mutex);
+ internal_rcu_unlock(&rcu_defer_mutex);
}
/*
if (list_empty(®istry))
return;
- internal_urcu_lock(&urcu_defer_mutex);
+ internal_rcu_lock(&rcu_defer_mutex);
list_for_each_entry(index, ®istry, list) {
index->last_head = LOAD_SHARED(index->head);
num_items += index->last_head - index->tail;
list_for_each_entry(index, ®istry, list)
rcu_defer_barrier_queue(index, index->last_head);
end:
- internal_urcu_unlock(&urcu_defer_mutex);
+ internal_rcu_unlock(&rcu_defer_mutex);
}
/*
assert(defer_queue.q == NULL);
defer_queue.q = malloc(sizeof(void *) * DEFER_QUEUE_SIZE);
- internal_urcu_lock(&defer_thread_mutex);
- internal_urcu_lock(&urcu_defer_mutex);
+ internal_rcu_lock(&defer_thread_mutex);
+ internal_rcu_lock(&rcu_defer_mutex);
was_empty = list_empty(®istry);
list_add(&defer_queue.list, ®istry);
- internal_urcu_unlock(&urcu_defer_mutex);
+ internal_rcu_unlock(&rcu_defer_mutex);
if (was_empty)
start_defer_thread();
- internal_urcu_unlock(&defer_thread_mutex);
+ internal_rcu_unlock(&defer_thread_mutex);
}
void rcu_defer_unregister_thread(void)
{
int is_empty;
- internal_urcu_lock(&defer_thread_mutex);
- internal_urcu_lock(&urcu_defer_mutex);
+ internal_rcu_lock(&defer_thread_mutex);
+ internal_rcu_lock(&rcu_defer_mutex);
list_del(&defer_queue.list);
_rcu_defer_barrier_thread();
free(defer_queue.q);
defer_queue.q = NULL;
is_empty = list_empty(®istry);
- internal_urcu_unlock(&urcu_defer_mutex);
+ internal_rcu_unlock(&rcu_defer_mutex);
if (is_empty)
stop_defer_thread();
- internal_urcu_unlock(&defer_thread_mutex);
+ internal_rcu_unlock(&defer_thread_mutex);
}
-void urcu_defer_exit(void)
+void rcu_defer_exit(void)
{
assert(list_empty(®istry));
}
/*
* If a reader is really non-cooperative and refuses to commit its
- * urcu_reader.ctr count to memory (there is no barrier in the reader
+ * rcu_reader.ctr count to memory (there is no barrier in the reader
* per-se), kick it after a few loops waiting for it.
*/
#define KICK_READER_LOOPS 10000
* Using a int rather than a char to eliminate false register dependencies
* causing stalls on some architectures.
*/
-extern unsigned long urcu_gp_ctr;
+extern unsigned long rcu_gp_ctr;
-struct urcu_reader {
+struct rcu_reader {
/* Data used by both reader and synchronize_rcu() */
unsigned long ctr;
/* Data used for registry */
pthread_t tid;
};
-extern struct urcu_reader __thread urcu_reader;
+extern struct rcu_reader __thread rcu_reader;
extern int gp_futex;
if (value == NULL)
return 0;
reader_gp = LOAD_SHARED(*value);
- return reader_gp && ((reader_gp ^ urcu_gp_ctr) & RCU_GP_CTR);
+ return reader_gp && ((reader_gp ^ rcu_gp_ctr) & RCU_GP_CTR);
}
#else /* !(BITS_PER_LONG < 64) */
static inline int rcu_gp_ongoing(unsigned long *value)
if (value == NULL)
return 0;
reader_gp = LOAD_SHARED(*value);
- return reader_gp && (reader_gp - urcu_gp_ctr > ULONG_MAX / 2);
+ return reader_gp && (reader_gp - rcu_gp_ctr > ULONG_MAX / 2);
}
#endif /* !(BITS_PER_LONG < 64) */
static inline void _rcu_read_lock(void)
{
- rcu_assert(urcu_reader.ctr);
+ rcu_assert(rcu_reader.ctr);
}
static inline void _rcu_read_unlock(void)
static inline void _rcu_quiescent_state(void)
{
smp_mb();
- _STORE_SHARED(urcu_reader.ctr, _LOAD_SHARED(urcu_gp_ctr));
- smp_mb(); /* write urcu_reader.ctr before read futex */
+ _STORE_SHARED(rcu_reader.ctr, _LOAD_SHARED(rcu_gp_ctr));
+ smp_mb(); /* write rcu_reader.ctr before read futex */
wake_up_gp();
smp_mb();
}
static inline void _rcu_thread_offline(void)
{
smp_mb();
- STORE_SHARED(urcu_reader.ctr, 0);
- smp_mb(); /* write urcu_reader.ctr before read futex */
+ STORE_SHARED(rcu_reader.ctr, 0);
+ smp_mb(); /* write rcu_reader.ctr before read futex */
wake_up_gp();
}
static inline void _rcu_thread_online(void)
{
- _STORE_SHARED(urcu_reader.ctr, LOAD_SHARED(urcu_gp_ctr));
+ _STORE_SHARED(rcu_reader.ctr, LOAD_SHARED(rcu_gp_ctr));
smp_mb();
}
void __attribute__((destructor)) rcu_exit(void);
-static pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_mutex_t rcu_mutex = PTHREAD_MUTEX_INITIALIZER;
int gp_futex;
/*
* Global grace period counter.
*/
-unsigned long urcu_gp_ctr = RCU_GP_ONLINE;
+unsigned long rcu_gp_ctr = RCU_GP_ONLINE;
/*
* Written to only by each individual reader. Read by both the reader and the
* writers.
*/
-struct urcu_reader __thread urcu_reader;
+struct rcu_reader __thread rcu_reader;
#ifdef DEBUG_YIELD
unsigned int yield_active;
static LIST_HEAD(registry);
-static void internal_urcu_lock(void)
+static void internal_rcu_lock(void)
{
int ret;
#ifndef DISTRUST_SIGNALS_EXTREME
- ret = pthread_mutex_lock(&urcu_mutex);
+ ret = pthread_mutex_lock(&rcu_mutex);
if (ret) {
perror("Error in pthread mutex lock");
exit(-1);
}
#else /* #ifndef DISTRUST_SIGNALS_EXTREME */
- while ((ret = pthread_mutex_trylock(&urcu_mutex)) != 0) {
+ while ((ret = pthread_mutex_trylock(&rcu_mutex)) != 0) {
if (ret != EBUSY && ret != EINTR) {
printf("ret = %d, errno = %d\n", ret, errno);
perror("Error in pthread mutex lock");
#endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
}
-static void internal_urcu_unlock(void)
+static void internal_rcu_unlock(void)
{
int ret;
- ret = pthread_mutex_unlock(&urcu_mutex);
+ ret = pthread_mutex_unlock(&rcu_mutex);
if (ret) {
perror("Error in pthread mutex unlock");
exit(-1);
{
LIST_HEAD(qsreaders);
int wait_loops = 0;
- struct urcu_reader *index, *tmp;
+ struct rcu_reader *index, *tmp;
if (list_empty(®istry))
return;
#if (BITS_PER_LONG < 64)
/*
- * called with urcu_mutex held.
+ * called with rcu_mutex held.
*/
-static void switch_next_urcu_qparity(void)
+static void switch_next_rcu_qparity(void)
{
- STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr ^ RCU_GP_CTR);
+ STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
}
void synchronize_rcu(void)
{
unsigned long was_online;
- was_online = urcu_reader.ctr;
+ was_online = rcu_reader.ctr;
/* All threads should read qparity before accessing data structure
* where new ptr points to.
* threads registered as readers.
*/
if (was_online)
- STORE_SHARED(urcu_reader.ctr, 0);
+ STORE_SHARED(rcu_reader.ctr, 0);
- internal_urcu_lock();
+ internal_rcu_lock();
- switch_next_urcu_qparity(); /* 0 -> 1 */
+ switch_next_rcu_qparity(); /* 0 -> 1 */
/*
* Must commit qparity update to memory before waiting for parity
* Ensured by STORE_SHARED and LOAD_SHARED.
*/
- switch_next_urcu_qparity(); /* 1 -> 0 */
+ switch_next_rcu_qparity(); /* 1 -> 0 */
/*
* Must commit qparity update to memory before waiting for parity
*/
wait_for_quiescent_state(); /* Wait readers in parity 1 */
- internal_urcu_unlock();
+ internal_rcu_unlock();
/*
* Finish waiting for reader threads before letting the old ptr being
* freed.
*/
if (was_online)
- _STORE_SHARED(urcu_reader.ctr, LOAD_SHARED(urcu_gp_ctr));
+ _STORE_SHARED(rcu_reader.ctr, LOAD_SHARED(rcu_gp_ctr));
smp_mb();
}
#else /* !(BITS_PER_LONG < 64) */
{
unsigned long was_online;
- was_online = urcu_reader.ctr;
+ was_online = rcu_reader.ctr;
/*
* Mark the writer thread offline to make sure we don't wait for
*/
smp_mb();
if (was_online)
- STORE_SHARED(urcu_reader.ctr, 0);
+ STORE_SHARED(rcu_reader.ctr, 0);
- internal_urcu_lock();
- STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr + RCU_GP_CTR);
+ internal_rcu_lock();
+ STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR);
wait_for_quiescent_state();
- internal_urcu_unlock();
+ internal_rcu_unlock();
if (was_online)
- _STORE_SHARED(urcu_reader.ctr, LOAD_SHARED(urcu_gp_ctr));
+ _STORE_SHARED(rcu_reader.ctr, LOAD_SHARED(rcu_gp_ctr));
smp_mb();
}
#endif /* !(BITS_PER_LONG < 64) */
void rcu_register_thread(void)
{
- urcu_reader.tid = pthread_self();
- assert(urcu_reader.ctr == 0);
+ rcu_reader.tid = pthread_self();
+ assert(rcu_reader.ctr == 0);
- internal_urcu_lock();
- list_add(&urcu_reader.head, ®istry);
- internal_urcu_unlock();
+ internal_rcu_lock();
+ list_add(&rcu_reader.head, ®istry);
+ internal_rcu_unlock();
_rcu_thread_online();
}
* with a waiting writer.
*/
_rcu_thread_offline();
- internal_urcu_lock();
- list_del(&urcu_reader.head);
- internal_urcu_unlock();
+ internal_rcu_lock();
+ list_del(&rcu_reader.head);
+ internal_rcu_unlock();
}
void rcu_exit(void)
* rcu_read_unlock()
*
* Mark the beginning and end of a read-side critical section.
- * DON'T FORGET TO USE RCU_REGISTER/UNREGISTER_THREAD() FOR EACH THREAD WITH
- * READ-SIDE CRITICAL SECTION.
+ * DON'T FORGET TO USE rcu_register_thread/rcu_unregister_thread() FOR EACH
+ * THREAD WITH READ-SIDE CRITICAL SECTION.
*/
#define rcu_read_lock() _rcu_read_lock()
#define rcu_read_unlock() _rcu_read_unlock()
* QSBR read lock/unlock are guaranteed to be no-ops. Therefore, we expose them
* in the LGPL header for any code to use. However, the debug version is not
* nops and may contain sanity checks. To activate it, applications must be
- * recompiled with -DURCU_DEBUG (even non-LGPL/GPL applications). This is the
+ * recompiled with -DRCU_DEBUG (even non-LGPL/GPL applications). This is the
* best trade-off between license/performance/code triviality and
* library debugging & tracing features we could come up with.
*/
-#if (!defined(BUILD_QSBR_LIB) && defined(URCU_DEBUG))
+#if (!defined(BUILD_QSBR_LIB) && defined(RCU_DEBUG))
static inline void rcu_read_lock(void)
{
{
}
-#else /* !URCU_DEBUG */
+#else /* !RCU_DEBUG */
extern void rcu_read_lock(void);
extern void rcu_read_unlock(void);
-#endif /* !URCU_DEBUG */
+#endif /* !RCU_DEBUG */
extern void rcu_quiescent_state(void);
extern void rcu_thread_offline(void);
/*
* The signal number used by the RCU library can be overridden with
- * -DSIGURCU= when compiling the library.
+ * -DSIGRCU= when compiling the library.
*/
-#ifndef SIGURCU
-#define SIGURCU SIGUSR1
+#ifndef SIGRCU
+#define SIGRCU SIGUSR1
#endif
/*
* If a reader is really non-cooperative and refuses to commit its
- * urcu_active_readers count to memory (there is no barrier in the reader
+ * rcu_active_readers count to memory (there is no barrier in the reader
* per-se), kick it after a few loops waiting for it.
*/
#define KICK_READER_LOOPS 10000
#define YIELD_WRITE (1 << 1)
/*
- * Updates without URCU_MB are much slower. Account this in
+ * Updates without RCU_MB are much slower. Account this in
* the delay.
*/
-#ifdef URCU_MB
+#ifdef RCU_MB
/* maximum sleep delay, in us */
#define MAX_SLEEP 50
#else
}
#endif
-#ifdef URCU_MB
+#ifdef RCU_MB
static inline void reader_barrier()
{
smp_mb();
#endif
/*
- * The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a
- * full 8-bits, 16-bits or 32-bits bitmask for the lower order bits.
+ * The trick here is that RCU_GP_CTR_PHASE must be a multiple of 8 so we can use
+ * a full 8-bits, 16-bits or 32-bits bitmask for the lower order bits.
*/
#define RCU_GP_COUNT (1UL << 0)
/* Use the amount of bits equal to half of the architecture long size */
-#define RCU_GP_CTR_BIT (1UL << (sizeof(long) << 2))
-#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+#define RCU_GP_CTR_PHASE (1UL << (sizeof(long) << 2))
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_PHASE - 1)
/*
* Global quiescent period counter with low-order bits unused.
* Using a int rather than a char to eliminate false register dependencies
* causing stalls on some architectures.
*/
-extern long urcu_gp_ctr;
+extern long rcu_gp_ctr;
-struct urcu_reader {
+struct rcu_reader {
/* Data used by both reader and synchronize_rcu() */
long ctr;
char need_mb;
pthread_t tid;
};
-extern struct urcu_reader __thread urcu_reader;
+extern struct rcu_reader __thread rcu_reader;
extern int gp_futex;
*/
v = LOAD_SHARED(*value);
return (v & RCU_GP_CTR_NEST_MASK) &&
- ((v ^ urcu_gp_ctr) & RCU_GP_CTR_BIT);
+ ((v ^ rcu_gp_ctr) & RCU_GP_CTR_PHASE);
}
static inline void _rcu_read_lock(void)
{
long tmp;
- tmp = urcu_reader.ctr;
- /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */
+ tmp = rcu_reader.ctr;
+ /*
+ * rcu_gp_ctr is
+ * RCU_GP_COUNT | (~RCU_GP_CTR_PHASE or RCU_GP_CTR_PHASE)
+ */
if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) {
- _STORE_SHARED(urcu_reader.ctr, _LOAD_SHARED(urcu_gp_ctr));
+ _STORE_SHARED(rcu_reader.ctr, _LOAD_SHARED(rcu_gp_ctr));
/*
* Set active readers count for outermost nesting level before
* accessing the pointer. See force_mb_all_threads().
*/
reader_barrier();
} else {
- _STORE_SHARED(urcu_reader.ctr, tmp + RCU_GP_COUNT);
+ _STORE_SHARED(rcu_reader.ctr, tmp + RCU_GP_COUNT);
}
}
{
long tmp;
- tmp = urcu_reader.ctr;
+ tmp = rcu_reader.ctr;
/*
* Finish using rcu before decrementing the pointer.
* See force_mb_all_threads().
*/
if (likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) {
reader_barrier();
- _STORE_SHARED(urcu_reader.ctr, urcu_reader.ctr - RCU_GP_COUNT);
- /* write urcu_reader.ctr before read futex */
+ _STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT);
+ /* write rcu_reader.ctr before read futex */
reader_barrier();
wake_up_gp();
} else {
- _STORE_SHARED(urcu_reader.ctr, urcu_reader.ctr - RCU_GP_COUNT);
+ _STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT);
}
}
/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
#include "urcu.h"
-#ifndef URCU_MB
+#ifndef RCU_MB
static int init_done;
-void __attribute__((constructor)) urcu_init(void);
-void __attribute__((destructor)) urcu_exit(void);
+void __attribute__((constructor)) rcu_init(void);
+void __attribute__((destructor)) rcu_exit(void);
#else
-void urcu_init(void)
+void rcu_init(void)
{
}
#endif
-static pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_mutex_t rcu_mutex = PTHREAD_MUTEX_INITIALIZER;
int gp_futex;
/*
* Global grace period counter.
- * Contains the current RCU_GP_CTR_BIT.
+ * Contains the current RCU_GP_CTR_PHASE.
* Also has a RCU_GP_COUNT of 1, to accelerate the reader fast path.
* Written to only by writer with mutex taken. Read by both writer and readers.
*/
-long urcu_gp_ctr = RCU_GP_COUNT;
+long rcu_gp_ctr = RCU_GP_COUNT;
/*
* Written to only by each individual reader. Read by both the reader and the
* writers.
*/
-struct urcu_reader __thread urcu_reader;
+struct rcu_reader __thread rcu_reader;
#ifdef DEBUG_YIELD
unsigned int yield_active;
static LIST_HEAD(registry);
-static void internal_urcu_lock(void)
+static void internal_rcu_lock(void)
{
int ret;
#ifndef DISTRUST_SIGNALS_EXTREME
- ret = pthread_mutex_lock(&urcu_mutex);
+ ret = pthread_mutex_lock(&rcu_mutex);
if (ret) {
perror("Error in pthread mutex lock");
exit(-1);
}
#else /* #ifndef DISTRUST_SIGNALS_EXTREME */
- while ((ret = pthread_mutex_trylock(&urcu_mutex)) != 0) {
+ while ((ret = pthread_mutex_trylock(&rcu_mutex)) != 0) {
if (ret != EBUSY && ret != EINTR) {
printf("ret = %d, errno = %d\n", ret, errno);
perror("Error in pthread mutex lock");
exit(-1);
}
- if (urcu_reader.need_mb) {
+ if (rcu_reader.need_mb) {
smp_mb();
- urcu_reader.need_mb = 0;
+ rcu_reader.need_mb = 0;
smp_mb();
}
poll(NULL,0,10);
#endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
}
-static void internal_urcu_unlock(void)
+static void internal_rcu_unlock(void)
{
int ret;
- ret = pthread_mutex_unlock(&urcu_mutex);
+ ret = pthread_mutex_unlock(&rcu_mutex);
if (ret) {
perror("Error in pthread mutex unlock");
exit(-1);
}
/*
- * called with urcu_mutex held.
+ * called with rcu_mutex held.
*/
-static void switch_next_urcu_qparity(void)
+static void switch_next_rcu_qparity(void)
{
- STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr ^ RCU_GP_CTR_BIT);
+ STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE);
}
-#ifdef URCU_MB
+#ifdef RCU_MB
#if 0 /* unused */
-static void force_mb_single_thread(struct urcu_reader *index)
+static void force_mb_single_thread(struct rcu_reader *index)
{
smp_mb();
}
{
smp_mb();
}
-#else /* #ifdef URCU_MB */
+#else /* #ifdef RCU_MB */
#if 0 /* unused */
-static void force_mb_single_thread(struct urcu_reader *index)
+static void force_mb_single_thread(struct rcu_reader *index)
{
assert(!list_empty(®istry));
/*
*/
index->need_mb = 1;
smp_mc(); /* write ->need_mb before sending the signals */
- pthread_kill(index->tid, SIGURCU);
+ pthread_kill(index->tid, SIGRCU);
smp_mb();
/*
* Wait for sighandler (and thus mb()) to execute on every thread.
static void force_mb_all_threads(void)
{
- struct urcu_reader *index;
+ struct rcu_reader *index;
/*
* Ask for each threads to execute a smp_mb() so we can consider the
list_for_each_entry(index, ®istry, head) {
index->need_mb = 1;
smp_mc(); /* write need_mb before sending the signal */
- pthread_kill(index->tid, SIGURCU);
+ pthread_kill(index->tid, SIGRCU);
}
/*
* Wait for sighandler (and thus mb()) to execute on every thread.
*/
list_for_each_entry(index, ®istry, head) {
while (index->need_mb) {
- pthread_kill(index->tid, SIGURCU);
+ pthread_kill(index->tid, SIGRCU);
poll(NULL, 0, 1);
}
}
smp_mb(); /* read ->need_mb before ending the barrier */
}
-#endif /* #else #ifdef URCU_MB */
+#endif /* #else #ifdef RCU_MB */
/*
* synchronize_rcu() waiting. Single thread.
{
LIST_HEAD(qsreaders);
int wait_loops = 0;
- struct urcu_reader *index, *tmp;
+ struct rcu_reader *index, *tmp;
if (list_empty(®istry))
return;
/*
- * Wait for each thread urcu_reader.ctr count to become 0.
+ * Wait for each thread rcu_reader.ctr count to become 0.
*/
for (;;) {
wait_loops++;
#else /* #ifndef HAS_INCOHERENT_CACHES */
/*
* BUSY-LOOP. Force the reader thread to commit its
- * urcu_reader.ctr update to memory if we wait for too long.
+ * rcu_reader.ctr update to memory if we wait for too long.
*/
if (list_empty(®istry)) {
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
void synchronize_rcu(void)
{
- internal_urcu_lock();
+ internal_rcu_lock();
/* All threads should read qparity before accessing data structure
- * where new ptr points to. Must be done within internal_urcu_lock
+ * where new ptr points to. Must be done within internal_rcu_lock
* because it iterates on reader threads.*/
/* Write new ptr before changing the qparity */
force_mb_all_threads();
- switch_next_urcu_qparity(); /* 0 -> 1 */
+ switch_next_rcu_qparity(); /* 0 -> 1 */
/*
* Must commit qparity update to memory before waiting for parity
*/
smp_mb();
- switch_next_urcu_qparity(); /* 1 -> 0 */
+ switch_next_rcu_qparity(); /* 1 -> 0 */
/*
* Must commit qparity update to memory before waiting for parity
wait_for_quiescent_state(); /* Wait readers in parity 1 */
/* Finish waiting for reader threads before letting the old ptr being
- * freed. Must be done within internal_urcu_lock because it iterates on
+ * freed. Must be done within internal_rcu_lock because it iterates on
* reader threads. */
force_mb_all_threads();
- internal_urcu_unlock();
+ internal_rcu_unlock();
}
/*
void rcu_register_thread(void)
{
- urcu_reader.tid = pthread_self();
- assert(urcu_reader.need_mb == 0);
- assert(urcu_reader.ctr == 0);
-
- internal_urcu_lock();
- urcu_init(); /* In case gcc does not support constructor attribute */
- list_add(&urcu_reader.head, ®istry);
- internal_urcu_unlock();
+ rcu_reader.tid = pthread_self();
+ assert(rcu_reader.need_mb == 0);
+ assert(rcu_reader.ctr == 0);
+
+ internal_rcu_lock();
+ rcu_init(); /* In case gcc does not support constructor attribute */
+ list_add(&rcu_reader.head, ®istry);
+ internal_rcu_unlock();
}
void rcu_unregister_thread(void)
{
- internal_urcu_lock();
- list_del(&urcu_reader.head);
- internal_urcu_unlock();
+ internal_rcu_lock();
+ list_del(&rcu_reader.head);
+ internal_rcu_unlock();
}
-#ifndef URCU_MB
-static void sigurcu_handler(int signo, siginfo_t *siginfo, void *context)
+#ifndef RCU_MB
+static void sigrcu_handler(int signo, siginfo_t *siginfo, void *context)
{
/*
* Executing this smp_mb() is the only purpose of this signal handler.
* executed on.
*/
smp_mb();
- urcu_reader.need_mb = 0;
+ rcu_reader.need_mb = 0;
smp_mb();
}
/*
- * urcu_init constructor. Called when the library is linked, but also when
+ * rcu_init constructor. Called when the library is linked, but also when
* reader threads are calling rcu_register_thread().
* Should only be called by a single thread at a given time. This is ensured by
- * holing the internal_urcu_lock() from rcu_register_thread() or by running at
+ * holing the internal_rcu_lock() from rcu_register_thread() or by running at
* library load time, which should not be executed by multiple threads nor
* concurrently with rcu_register_thread() anyway.
*/
-void urcu_init(void)
+void rcu_init(void)
{
struct sigaction act;
int ret;
return;
init_done = 1;
- act.sa_sigaction = sigurcu_handler;
+ act.sa_sigaction = sigrcu_handler;
act.sa_flags = SA_SIGINFO | SA_RESTART;
sigemptyset(&act.sa_mask);
- ret = sigaction(SIGURCU, &act, NULL);
+ ret = sigaction(SIGRCU, &act, NULL);
if (ret) {
perror("Error in sigaction");
exit(-1);
}
}
-void urcu_exit(void)
+void rcu_exit(void)
{
struct sigaction act;
int ret;
- ret = sigaction(SIGURCU, NULL, &act);
+ ret = sigaction(SIGRCU, NULL, &act);
if (ret) {
perror("Error in sigaction");
exit(-1);
}
- assert(act.sa_sigaction == sigurcu_handler);
+ assert(act.sa_sigaction == sigrcu_handler);
assert(list_empty(®istry));
}
-#endif /* #ifndef URCU_MB */
+#endif /* #ifndef RCU_MB */
extern void rcu_unregister_thread(void);
/*
- * Explicit urcu initialization, for "early" use within library constructors.
+ * Explicit rcu initialization, for "early" use within library constructors.
*/
-extern void urcu_init(void);
+extern void rcu_init(void);
#ifdef __cplusplus
}
#define rmc() barrier()
#define wmc() barrier()
-#ifdef CONFIG_URCU_SMP
+#ifdef CONFIG_RCU_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#define rmc() barrier()
#define wmc() barrier()
-#ifdef CONFIG_URCU_SMP
+#ifdef CONFIG_RCU_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#define rmc() barrier()
#define wmc() barrier()
-#ifdef CONFIG_URCU_SMP
+#ifdef CONFIG_RCU_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#define CACHE_LINE_SIZE 128
-#ifdef CONFIG_URCU_HAVE_FENCE
+#ifdef CONFIG_RCU_HAVE_FENCE
#define mb() asm volatile("mfence":::"memory")
#define rmb() asm volatile("lfence":::"memory")
#define wmb() asm volatile("sfence"::: "memory")
#define rmc() barrier()
#define wmc() barrier()
-#ifdef CONFIG_URCU_SMP
+#ifdef CONFIG_RCU_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
/* urcu/config.h.in. Manually generatad for control over the contained defs. */
/* Defined when on a system that has memory fence instructions. */
-#undef CONFIG_URCU_HAVE_FENCE
+#undef CONFIG_RCU_HAVE_FENCE
/* Defined when on a system with futex support. */
-#undef CONFIG_URCU_HAVE_FUTEX
+#undef CONFIG_RCU_HAVE_FUTEX
/* Enable SMP support. With SMP support enabled, uniprocessors are also
supported. With SMP support disabled, UP systems work fine, but the
behavior of SMP systems is undefined. */
-#undef CONFIG_URCU_SMP
+#undef CONFIG_RCU_SMP
/* Compatibility mode for i386 which lacks cmpxchg instruction. */
-#undef CONFIG_URCU_COMPAT_ARCH
+#undef CONFIG_RCU_COMPAT_ARCH
#define _uatomic_dec(addr) (__uatomic_dec((addr), sizeof(*(addr))))
-#if ((BITS_PER_LONG != 64) && defined(CONFIG_URCU_COMPAT_ARCH))
-extern int __urcu_cas_avail;
-extern int __urcu_cas_init(void);
+#if ((BITS_PER_LONG != 64) && defined(CONFIG_RCU_COMPAT_ARCH))
+extern int __rcu_cas_avail;
+extern int __rcu_cas_init(void);
#define UATOMIC_COMPAT(insn) \
- ((likely(__urcu_cas_avail > 0)) \
+ ((likely(__rcu_cas_avail > 0)) \
? (_uatomic_##insn) \
- : ((unlikely(__urcu_cas_avail < 0) \
- ? ((__urcu_cas_init() > 0) \
+ : ((unlikely(__rcu_cas_avail < 0) \
+ ? ((__rcu_cas_init() > 0) \
? (_uatomic_##insn) \
: (compat_uatomic_##insn)) \
: (compat_uatomic_##insn))))
* on the wait-side in compatibility mode.
*/
-#ifdef CONFIG_URCU_HAVE_FUTEX
+#ifdef CONFIG_RCU_HAVE_FUTEX
#include <sys/syscall.h>
#define futex(...) syscall(__NR_futex, __VA_ARGS__)
#define futex_noasync(uaddr, op, val, timeout, uaddr2, val3) \