gp_futex, yield_active, rand_yield, has_sys_membarrier, rcu_defer_exit,
call_rcu_data_free, call_rcu_before_fork, call_rcu_after_fork_parent,
call_rcu_after_fork_child are exported by each urcu flavor.
In order to fix use-cases where multiple flavors are statically linked
into the same application, we need to move these symbols to local
namespaces.
Ensure that all symbols are prefixed by "rcu_".
Also add each of those symbols into urcu/map/*.h headers, so they get
mapped to their flavor-specific symbol name by the preprocessor.
This requires bumping our .so version from 1.0.0 to 2.0.0, because it
changes some symbol names.
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
19 files changed:
# Following the numbering scheme proposed by libtool for the library version
# http://www.gnu.org/software/libtool/manual/html_node/Updating-version-info.html
# Following the numbering scheme proposed by libtool for the library version
# http://www.gnu.org/software/libtool/manual/html_node/Updating-version-info.html
-AC_SUBST([URCU_LIBRARY_VERSION], [1:0:0])
+AC_SUBST([URCU_LIBRARY_VERSION], [2:0:0])
AC_CONFIG_AUX_DIR([config])
AC_CONFIG_MACRO_DIR([config])
AC_CONFIG_AUX_DIR([config])
AC_CONFIG_MACRO_DIR([config])
#ifndef DYNAMIC_LINK_TEST
#define _LGPL_SOURCE
#else
#ifndef DYNAMIC_LINK_TEST
#define _LGPL_SOURCE
#else
-#define debug_yield_read()
+#define rcu_debug_yield_read()
for (;;) {
rcu_read_lock();
local_ptr = rcu_dereference(test_rcu_pointer);
for (;;) {
rcu_read_lock();
local_ptr = rcu_dereference(test_rcu_pointer);
+ rcu_debug_yield_read();
if (local_ptr)
assert(local_ptr->a == 8);
if (caa_unlikely(rduration))
if (local_ptr)
assert(local_ptr->a == 8);
if (caa_unlikely(rduration))
switch (argv[i][1]) {
#ifdef DEBUG_YIELD
case 'r':
switch (argv[i][1]) {
#ifdef DEBUG_YIELD
case 'r':
- yield_active |= YIELD_READ;
+ rcu_yield_active |= RCU_YIELD_READ;
- yield_active |= YIELD_WRITE;
+ rcu_yield_active |= RCU_YIELD_WRITE;
#ifndef DYNAMIC_LINK_TEST
#define _LGPL_SOURCE
#else
#ifndef DYNAMIC_LINK_TEST
#define _LGPL_SOURCE
#else
-#define debug_yield_read()
+#define rcu_debug_yield_read()
for (;;) {
rcu_read_lock();
local_ptr = rcu_dereference(test_rcu_pointer);
for (;;) {
rcu_read_lock();
local_ptr = rcu_dereference(test_rcu_pointer);
+ rcu_debug_yield_read();
if (local_ptr)
assert(local_ptr->a == 8);
if (caa_unlikely(rduration))
if (local_ptr)
assert(local_ptr->a == 8);
if (caa_unlikely(rduration))
switch (argv[i][1]) {
#ifdef DEBUG_YIELD
case 'r':
switch (argv[i][1]) {
#ifdef DEBUG_YIELD
case 'r':
- yield_active |= YIELD_READ;
+ rcu_yield_active |= RCU_YIELD_READ;
- yield_active |= YIELD_WRITE;
+ rcu_yield_active |= RCU_YIELD_WRITE;
#ifndef DYNAMIC_LINK_TEST
#define _LGPL_SOURCE
#else
#ifndef DYNAMIC_LINK_TEST
#define _LGPL_SOURCE
#else
-#define debug_yield_read()
+#define rcu_debug_yield_read()
#endif
#include <urcu-bp.h>
#endif
#include <urcu-bp.h>
for (;;) {
rcu_read_lock();
local_ptr = rcu_dereference(test_rcu_pointer);
for (;;) {
rcu_read_lock();
local_ptr = rcu_dereference(test_rcu_pointer);
+ rcu_debug_yield_read();
if (local_ptr)
assert(local_ptr->a == 8);
if (caa_unlikely(rduration))
if (local_ptr)
assert(local_ptr->a == 8);
if (caa_unlikely(rduration))
switch (argv[i][1]) {
#ifdef DEBUG_YIELD
case 'r':
switch (argv[i][1]) {
#ifdef DEBUG_YIELD
case 'r':
- yield_active |= YIELD_READ;
+ rcu_yield_active |= RCU_YIELD_READ;
- yield_active |= YIELD_WRITE;
+ rcu_yield_active |= RCU_YIELD_WRITE;
#ifndef DYNAMIC_LINK_TEST
#define _LGPL_SOURCE
#else
#ifndef DYNAMIC_LINK_TEST
#define _LGPL_SOURCE
#else
-#define debug_yield_read()
+#define rcu_debug_yield_read()
#endif
#include <urcu.h>
#include <urcu-defer.h>
#endif
#include <urcu.h>
#include <urcu-defer.h>
for (;;) {
rcu_read_lock();
local_ptr = rcu_dereference(test_rcu_pointer);
for (;;) {
rcu_read_lock();
local_ptr = rcu_dereference(test_rcu_pointer);
+ rcu_debug_yield_read();
if (local_ptr)
assert(local_ptr->a == 8);
if (caa_unlikely(rduration))
if (local_ptr)
assert(local_ptr->a == 8);
if (caa_unlikely(rduration))
switch (argv[i][1]) {
#ifdef DEBUG_YIELD
case 'r':
switch (argv[i][1]) {
#ifdef DEBUG_YIELD
case 'r':
- yield_active |= YIELD_READ;
+ rcu_yield_active |= RCU_YIELD_READ;
- yield_active |= YIELD_WRITE;
+ rcu_yield_active |= RCU_YIELD_WRITE;
#ifndef DYNAMIC_LINK_TEST
#define _LGPL_SOURCE
#else
#ifndef DYNAMIC_LINK_TEST
#define _LGPL_SOURCE
#else
-#define debug_yield_read()
+#define rcu_debug_yield_read()
for (;;) {
rcu_read_lock();
local_ptr = rcu_dereference(test_rcu_pointer);
for (;;) {
rcu_read_lock();
local_ptr = rcu_dereference(test_rcu_pointer);
+ rcu_debug_yield_read();
if (local_ptr)
assert(local_ptr->a == 8);
if (caa_unlikely(rduration))
if (local_ptr)
assert(local_ptr->a == 8);
if (caa_unlikely(rduration))
switch (argv[i][1]) {
#ifdef DEBUG_YIELD
case 'r':
switch (argv[i][1]) {
#ifdef DEBUG_YIELD
case 'r':
- yield_active |= YIELD_READ;
+ rcu_yield_active |= RCU_YIELD_READ;
- yield_active |= YIELD_WRITE;
+ rcu_yield_active |= RCU_YIELD_WRITE;
} else {
URCU_TLS(lookup_ok)++;
}
} else {
URCU_TLS(lookup_ok)++;
}
+ rcu_debug_yield_read();
if (caa_unlikely(rduration))
loop_sleep(rduration);
rcu_read_unlock();
if (caa_unlikely(rduration))
loop_sleep(rduration);
rcu_read_unlock();
+ rcu_debug_yield_read();
if (caa_unlikely(rduration))
loop_sleep(rduration);
URCU_TLS(nr_reads)++;
if (caa_unlikely(rduration))
loop_sleep(rduration);
URCU_TLS(nr_reads)++;
#ifndef DYNAMIC_LINK_TEST
#define _LGPL_SOURCE
#else
#ifndef DYNAMIC_LINK_TEST
#define _LGPL_SOURCE
#else
-#define debug_yield_read()
+#define rcu_debug_yield_read()
#endif
#include "urcu-qsbr.h"
#endif
#include "urcu-qsbr.h"
for (;;) {
rcu_read_lock();
local_ptr = rcu_dereference(test_rcu_pointer);
for (;;) {
rcu_read_lock();
local_ptr = rcu_dereference(test_rcu_pointer);
+ rcu_debug_yield_read();
if (local_ptr)
assert(local_ptr->a == 8);
if (caa_unlikely(rduration))
if (local_ptr)
assert(local_ptr->a == 8);
if (caa_unlikely(rduration))
switch (argv[i][1]) {
#ifdef DEBUG_YIELD
case 'r':
switch (argv[i][1]) {
#ifdef DEBUG_YIELD
case 'r':
- yield_active |= YIELD_READ;
+ rcu_yield_active |= RCU_RCU_YIELD_READ;
- yield_active |= YIELD_WRITE;
+ rcu_yield_active |= RCU_RCU_YIELD_WRITE;
for (;;) {
_rcu_read_lock();
local_ptr = _rcu_dereference(test_rcu_pointer);
for (;;) {
_rcu_read_lock();
local_ptr = _rcu_dereference(test_rcu_pointer);
+ rcu_debug_yield_read();
if (local_ptr)
assert(local_ptr->a == 8);
if (caa_unlikely(rduration))
if (local_ptr)
assert(local_ptr->a == 8);
if (caa_unlikely(rduration))
switch (argv[i][1]) {
#ifdef DEBUG_YIELD
case 'r':
switch (argv[i][1]) {
#ifdef DEBUG_YIELD
case 'r':
- yield_active |= YIELD_READ;
+ rcu_yield_active |= RCU_YIELD_READ;
- yield_active |= YIELD_WRITE;
+ rcu_yield_active |= RCU_YIELD_WRITE;
static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
#ifdef DEBUG_YIELD
static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
#ifdef DEBUG_YIELD
-unsigned int yield_active;
-DEFINE_URCU_TLS(unsigned int, rand_yield);
+unsigned int rcu_yield_active;
+DEFINE_URCU_TLS(unsigned int, rcu_rand_yield);
static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
/*
* Global grace period counter.
/*
* Global grace period counter.
DEFINE_URCU_TLS(struct rcu_reader, rcu_reader);
#ifdef DEBUG_YIELD
DEFINE_URCU_TLS(struct rcu_reader, rcu_reader);
#ifdef DEBUG_YIELD
-unsigned int yield_active;
-DEFINE_URCU_TLS(unsigned int, rand_yield);
+unsigned int rcu_yield_active;
+DEFINE_URCU_TLS(unsigned int, rcu_rand_yield);
#endif
static CDS_LIST_HEAD(registry);
#endif
static CDS_LIST_HEAD(registry);
{
/* Read reader_gp before read futex */
cmm_smp_rmb();
{
/* Read reader_gp before read futex */
cmm_smp_rmb();
- if (uatomic_read(&gp_futex) == -1)
- futex_noasync(&gp_futex, FUTEX_WAIT, -1,
+ if (uatomic_read(&rcu_gp_futex) == -1)
+ futex_noasync(&rcu_gp_futex, FUTEX_WAIT, -1,
for (;;) {
wait_loops++;
if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
for (;;) {
wait_loops++;
if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
- uatomic_set(&gp_futex, -1);
+ uatomic_set(&rcu_gp_futex, -1);
/*
* Write futex before write waiting (the other side
* reads them in the opposite order).
/*
* Write futex before write waiting (the other side
* reads them in the opposite order).
if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
/* Read reader_gp before write futex */
cmm_smp_mb();
if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
/* Read reader_gp before write futex */
cmm_smp_mb();
- uatomic_set(&gp_futex, 0);
+ uatomic_set(&rcu_gp_futex, 0);
#ifdef RCU_MEMBARRIER
static int init_done;
#ifdef RCU_MEMBARRIER
static int init_done;
+int rcu_has_sys_membarrier;
void __attribute__((constructor)) rcu_init(void);
#endif
void __attribute__((constructor)) rcu_init(void);
#endif
static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
/*
* Global grace period counter.
/*
* Global grace period counter.
DEFINE_URCU_TLS(struct rcu_reader, rcu_reader);
#ifdef DEBUG_YIELD
DEFINE_URCU_TLS(struct rcu_reader, rcu_reader);
#ifdef DEBUG_YIELD
-unsigned int yield_active;
-DEFINE_URCU_TLS(unsigned int, rand_yield);
+unsigned int rcu_yield_active;
+DEFINE_URCU_TLS(unsigned int, rcu_rand_yield);
#endif
static CDS_LIST_HEAD(registry);
#endif
static CDS_LIST_HEAD(registry);
#ifdef RCU_MEMBARRIER
static void smp_mb_master(int group)
{
#ifdef RCU_MEMBARRIER
static void smp_mb_master(int group)
{
- if (caa_likely(has_sys_membarrier))
+ if (caa_likely(rcu_has_sys_membarrier))
membarrier(MEMBARRIER_EXPEDITED);
else
cmm_smp_mb();
membarrier(MEMBARRIER_EXPEDITED);
else
cmm_smp_mb();
{
/* Read reader_gp before read futex */
smp_mb_master(RCU_MB_GROUP);
{
/* Read reader_gp before read futex */
smp_mb_master(RCU_MB_GROUP);
- if (uatomic_read(&gp_futex) == -1)
- futex_async(&gp_futex, FUTEX_WAIT, -1,
+ if (uatomic_read(&rcu_gp_futex) == -1)
+ futex_async(&rcu_gp_futex, FUTEX_WAIT, -1,
for (;;) {
wait_loops++;
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
for (;;) {
wait_loops++;
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
- uatomic_dec(&gp_futex);
+ uatomic_dec(&rcu_gp_futex);
/* Write futex before read reader_gp */
smp_mb_master(RCU_MB_GROUP);
}
/* Write futex before read reader_gp */
smp_mb_master(RCU_MB_GROUP);
}
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
/* Read reader_gp before write futex */
smp_mb_master(RCU_MB_GROUP);
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
/* Read reader_gp before write futex */
smp_mb_master(RCU_MB_GROUP);
- uatomic_set(&gp_futex, 0);
+ uatomic_set(&rcu_gp_futex, 0);
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
/* Read reader_gp before write futex */
smp_mb_master(RCU_MB_GROUP);
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
/* Read reader_gp before write futex */
smp_mb_master(RCU_MB_GROUP);
- uatomic_set(&gp_futex, 0);
+ uatomic_set(&rcu_gp_futex, 0);
return;
init_done = 1;
if (!membarrier(MEMBARRIER_EXPEDITED | MEMBARRIER_QUERY))
return;
init_done = 1;
if (!membarrier(MEMBARRIER_EXPEDITED | MEMBARRIER_QUERY))
- has_sys_membarrier = 1;
+ rcu_has_sys_membarrier = 1;
#define synchronize_rcu synchronize_rcu_bp
#define rcu_reader rcu_reader_bp
#define rcu_gp_ctr rcu_gp_ctr_bp
#define synchronize_rcu synchronize_rcu_bp
#define rcu_reader rcu_reader_bp
#define rcu_gp_ctr rcu_gp_ctr_bp
+#define rcu_gp_futex rcu_gp_futex_bp /* unused */
#define get_cpu_call_rcu_data get_cpu_call_rcu_data_bp
#define get_call_rcu_thread get_call_rcu_thread_bp
#define get_cpu_call_rcu_data get_cpu_call_rcu_data_bp
#define get_call_rcu_thread get_call_rcu_thread_bp
#define create_all_cpu_call_rcu_data create_all_cpu_call_rcu_data_bp
#define free_all_cpu_call_rcu_data free_all_cpu_call_rcu_data_bp
#define call_rcu call_rcu_bp
#define create_all_cpu_call_rcu_data create_all_cpu_call_rcu_data_bp
#define free_all_cpu_call_rcu_data free_all_cpu_call_rcu_data_bp
#define call_rcu call_rcu_bp
+#define call_rcu_data_free call_rcu_data_free_bp
+#define call_rcu_before_fork call_rcu_before_fork_bp
+#define call_rcu_after_fork_parent call_rcu_after_fork_parent_bp
+#define call_rcu_after_fork_child call_rcu_after_fork_child_bp
#define defer_rcu defer_rcu_bp
#define rcu_defer_register_thread rcu_defer_register_thread_bp
#define rcu_defer_unregister_thread rcu_defer_unregister_thread_bp
#define rcu_defer_barrier rcu_defer_barrier_bp
#define rcu_defer_barrier_thread rcu_defer_barrier_thread_bp
#define defer_rcu defer_rcu_bp
#define rcu_defer_register_thread rcu_defer_register_thread_bp
#define rcu_defer_unregister_thread rcu_defer_unregister_thread_bp
#define rcu_defer_barrier rcu_defer_barrier_bp
#define rcu_defer_barrier_thread rcu_defer_barrier_thread_bp
+#define rcu_defer_exit rcu_defer_exit_bp
#define rcu_flavor rcu_flavor_bp
#define rcu_flavor rcu_flavor_bp
+#define rcu_yield_active rcu_yield_active_bp
+#define rcu_rand_yield rcu_rand_yield_bp
+
#endif /* _URCU_BP_MAP_H */
#endif /* _URCU_BP_MAP_H */
#define synchronize_rcu synchronize_rcu_qsbr
#define rcu_reader rcu_reader_qsbr
#define rcu_gp_ctr rcu_gp_ctr_qsbr
#define synchronize_rcu synchronize_rcu_qsbr
#define rcu_reader rcu_reader_qsbr
#define rcu_gp_ctr rcu_gp_ctr_qsbr
+#define rcu_gp_futex rcu_gp_futex_qsbr
#define get_cpu_call_rcu_data get_cpu_call_rcu_data_qsbr
#define get_call_rcu_thread get_call_rcu_thread_qsbr
#define get_cpu_call_rcu_data get_cpu_call_rcu_data_qsbr
#define get_call_rcu_thread get_call_rcu_thread_qsbr
#define set_thread_call_rcu_data set_thread_call_rcu_data_qsbr
#define create_all_cpu_call_rcu_data create_all_cpu_call_rcu_data_qsbr
#define call_rcu call_rcu_qsbr
#define set_thread_call_rcu_data set_thread_call_rcu_data_qsbr
#define create_all_cpu_call_rcu_data create_all_cpu_call_rcu_data_qsbr
#define call_rcu call_rcu_qsbr
+#define call_rcu_data_free call_rcu_data_free_qsbr
+#define call_rcu_before_fork call_rcu_before_fork_qsbr
+#define call_rcu_after_fork_parent call_rcu_after_fork_parent_qsbr
+#define call_rcu_after_fork_child call_rcu_after_fork_child_qsbr
#define defer_rcu defer_rcu_qsbr
#define rcu_defer_register_thread rcu_defer_register_thread_qsbr
#define rcu_defer_unregister_thread rcu_defer_unregister_thread_qsbr
#define rcu_defer_barrier rcu_defer_barrier_qsbr
#define rcu_defer_barrier_thread rcu_defer_barrier_thread_qsbr
#define defer_rcu defer_rcu_qsbr
#define rcu_defer_register_thread rcu_defer_register_thread_qsbr
#define rcu_defer_unregister_thread rcu_defer_unregister_thread_qsbr
#define rcu_defer_barrier rcu_defer_barrier_qsbr
#define rcu_defer_barrier_thread rcu_defer_barrier_thread_qsbr
+#define rcu_defer_exit rcu_defer_exit_qsbr
#define rcu_flavor rcu_flavor_qsbr
#define rcu_flavor rcu_flavor_qsbr
+#define rcu_yield_active rcu_yield_active_memb_qsbr
+#define rcu_rand_yield rcu_rand_yield_memb_qsbr
+
#endif /* _URCU_QSBR_MAP_H */
#endif /* _URCU_QSBR_MAP_H */
#define synchronize_rcu synchronize_rcu_memb
#define rcu_reader rcu_reader_memb
#define rcu_gp_ctr rcu_gp_ctr_memb
#define synchronize_rcu synchronize_rcu_memb
#define rcu_reader rcu_reader_memb
#define rcu_gp_ctr rcu_gp_ctr_memb
+#define rcu_gp_futex rcu_gp_futex_memb
#define get_cpu_call_rcu_data get_cpu_call_rcu_data_memb
#define get_call_rcu_thread get_call_rcu_thread_memb
#define get_cpu_call_rcu_data get_cpu_call_rcu_data_memb
#define get_call_rcu_thread get_call_rcu_thread_memb
#define create_all_cpu_call_rcu_data create_all_cpu_call_rcu_data_memb
#define free_all_cpu_call_rcu_data free_all_cpu_call_rcu_data_memb
#define call_rcu call_rcu_memb
#define create_all_cpu_call_rcu_data create_all_cpu_call_rcu_data_memb
#define free_all_cpu_call_rcu_data free_all_cpu_call_rcu_data_memb
#define call_rcu call_rcu_memb
+#define call_rcu_data_free call_rcu_data_free_memb
+#define call_rcu_before_fork call_rcu_before_fork_memb
+#define call_rcu_after_fork_parent call_rcu_after_fork_parent_memb
+#define call_rcu_after_fork_child call_rcu_after_fork_child_memb
#define defer_rcu defer_rcu_memb
#define rcu_defer_register_thread rcu_defer_register_thread_memb
#define rcu_defer_unregister_thread rcu_defer_unregister_thread_memb
#define rcu_defer_barrier rcu_defer_barrier_memb
#define rcu_defer_barrier_thread rcu_defer_barrier_thread_memb
#define defer_rcu defer_rcu_memb
#define rcu_defer_register_thread rcu_defer_register_thread_memb
#define rcu_defer_unregister_thread rcu_defer_unregister_thread_memb
#define rcu_defer_barrier rcu_defer_barrier_memb
#define rcu_defer_barrier_thread rcu_defer_barrier_thread_memb
+#define rcu_defer_exit rcu_defer_exit_memb
#define rcu_flavor rcu_flavor_memb
#define rcu_flavor rcu_flavor_memb
+#define rcu_yield_active rcu_yield_active_memb
+#define rcu_rand_yield rcu_rand_yield_memb
+
+/* Specific to MEMBARRIER flavor */
+#define rcu_has_sys_membarrier rcu_has_sys_membarrier_memb
+
#elif defined(RCU_SIGNAL)
#define rcu_read_lock rcu_read_lock_sig
#elif defined(RCU_SIGNAL)
#define rcu_read_lock rcu_read_lock_sig
#define synchronize_rcu synchronize_rcu_sig
#define rcu_reader rcu_reader_sig
#define rcu_gp_ctr rcu_gp_ctr_sig
#define synchronize_rcu synchronize_rcu_sig
#define rcu_reader rcu_reader_sig
#define rcu_gp_ctr rcu_gp_ctr_sig
+#define rcu_gp_futex rcu_gp_futex_sig
#define get_cpu_call_rcu_data get_cpu_call_rcu_data_sig
#define get_call_rcu_thread get_call_rcu_thread_sig
#define get_cpu_call_rcu_data get_cpu_call_rcu_data_sig
#define get_call_rcu_thread get_call_rcu_thread_sig
#define create_all_cpu_call_rcu_data create_all_cpu_call_rcu_data_sig
#define free_all_cpu_call_rcu_data free_all_cpu_call_rcu_data_sig
#define call_rcu call_rcu_sig
#define create_all_cpu_call_rcu_data create_all_cpu_call_rcu_data_sig
#define free_all_cpu_call_rcu_data free_all_cpu_call_rcu_data_sig
#define call_rcu call_rcu_sig
+#define call_rcu_data_free call_rcu_data_free_sig
+#define call_rcu_before_fork call_rcu_before_fork_sig
+#define call_rcu_after_fork_parent call_rcu_after_fork_parent_sig
+#define call_rcu_after_fork_child call_rcu_after_fork_child_sig
#define defer_rcu defer_rcu_sig
#define rcu_defer_register_thread rcu_defer_register_thread_sig
#define rcu_defer_unregister_thread rcu_defer_unregister_thread_sig
#define rcu_defer_barrier rcu_defer_barrier_sig
#define rcu_defer_barrier_thread rcu_defer_barrier_thread_sig
#define defer_rcu defer_rcu_sig
#define rcu_defer_register_thread rcu_defer_register_thread_sig
#define rcu_defer_unregister_thread rcu_defer_unregister_thread_sig
#define rcu_defer_barrier rcu_defer_barrier_sig
#define rcu_defer_barrier_thread rcu_defer_barrier_thread_sig
+#define rcu_defer_exit rcu_defer_exit_sig
#define rcu_flavor rcu_flavor_sig
#define rcu_flavor rcu_flavor_sig
+#define rcu_yield_active rcu_yield_active_sig
+#define rcu_rand_yield rcu_rand_yield_sig
+
#elif defined(RCU_MB)
#define rcu_read_lock rcu_read_lock_mb
#elif defined(RCU_MB)
#define rcu_read_lock rcu_read_lock_mb
#define synchronize_rcu synchronize_rcu_mb
#define rcu_reader rcu_reader_mb
#define rcu_gp_ctr rcu_gp_ctr_mb
#define synchronize_rcu synchronize_rcu_mb
#define rcu_reader rcu_reader_mb
#define rcu_gp_ctr rcu_gp_ctr_mb
+#define rcu_gp_futex rcu_gp_futex_mb
#define get_cpu_call_rcu_data get_cpu_call_rcu_data_mb
#define get_call_rcu_thread get_call_rcu_thread_mb
#define get_cpu_call_rcu_data get_cpu_call_rcu_data_mb
#define get_call_rcu_thread get_call_rcu_thread_mb
#define create_all_cpu_call_rcu_data create_all_cpu_call_rcu_data_mb
#define free_all_cpu_call_rcu_data free_all_cpu_call_rcu_data_mb
#define call_rcu call_rcu_mb
#define create_all_cpu_call_rcu_data create_all_cpu_call_rcu_data_mb
#define free_all_cpu_call_rcu_data free_all_cpu_call_rcu_data_mb
#define call_rcu call_rcu_mb
+#define call_rcu_data_free call_rcu_data_free_mb
+#define call_rcu_before_fork call_rcu_before_fork_mb
+#define call_rcu_after_fork_parent call_rcu_after_fork_parent_mb
+#define call_rcu_after_fork_child call_rcu_after_fork_child_mb
#define defer_rcu defer_rcu_mb
#define rcu_defer_register_thread rcu_defer_register_thread_mb
#define rcu_defer_unregister_thread rcu_defer_unregister_thread_mb
#define rcu_defer_barrier rcu_defer_barrier_mb
#define rcu_defer_barrier_thread rcu_defer_barrier_thread_mb
#define defer_rcu defer_rcu_mb
#define rcu_defer_register_thread rcu_defer_register_thread_mb
#define rcu_defer_unregister_thread rcu_defer_unregister_thread_mb
#define rcu_defer_barrier rcu_defer_barrier_mb
#define rcu_defer_barrier_thread rcu_defer_barrier_thread_mb
+#define rcu_defer_exit rcu_defer_exit_mb
#define rcu_flavor rcu_flavor_mb
#define rcu_flavor rcu_flavor_mb
+#define rcu_yield_active rcu_yield_active_mb
+#define rcu_rand_yield rcu_rand_yield_mb
+
#else
#error "Undefined selection"
#else
#error "Undefined selection"
#include <pthread.h>
#include <unistd.h>
#include <pthread.h>
#include <unistd.h>
-#define YIELD_READ (1 << 0)
-#define YIELD_WRITE (1 << 1)
+#define RCU_YIELD_READ (1 << 0)
+#define RCU_YIELD_WRITE (1 << 1)
/*
* Updates without RCU_MB are much slower. Account this in
/*
* Updates without RCU_MB are much slower. Account this in
/* maximum sleep delay, in us */
#define MAX_SLEEP 50
/* maximum sleep delay, in us */
#define MAX_SLEEP 50
-extern unsigned int yield_active;
-extern DECLARE_URCU_TLS(unsigned int, rand_yield);
+extern unsigned int rcu_yield_active;
+extern DECLARE_URCU_TLS(unsigned int, rcu_rand_yield);
-static inline void debug_yield_read(void)
+static inline void rcu_debug_yield_read(void)
- if (yield_active & YIELD_READ)
- if (rand_r(&URCU_TLS(rand_yield)) & 0x1)
- usleep(rand_r(&URCU_TLS(rand_yield)) % MAX_SLEEP);
+ if (rcu_yield_active & RCU_YIELD_READ)
+ if (rand_r(&URCU_TLS(rcu_rand_yield)) & 0x1)
+ usleep(rand_r(&URCU_TLS(rcu_rand_yield)) % MAX_SLEEP);
-static inline void debug_yield_write(void)
+static inline void rcu_debug_yield_write(void)
- if (yield_active & YIELD_WRITE)
- if (rand_r(&URCU_TLS(rand_yield)) & 0x1)
- usleep(rand_r(&URCU_TLS(rand_yield)) % MAX_SLEEP);
+ if (rcu_yield_active & RCU_YIELD_WRITE)
+ if (rand_r(&URCU_TLS(rcu_rand_yield)) & 0x1)
+ usleep(rand_r(&URCU_TLS(rcu_rand_yield)) % MAX_SLEEP);
-static inline void debug_yield_init(void)
+static inline void rcu_debug_yield_init(void)
- URCU_TLS(rand_yield) = time(NULL) ^ pthread_self();
+ URCU_TLS(rcu_rand_yield) = time(NULL) ^ pthread_self();
-static inline void debug_yield_read(void)
+static inline void rcu_debug_yield_read(void)
-static inline void debug_yield_write(void)
+static inline void rcu_debug_yield_write(void)
-static inline void debug_yield_init(void)
+static inline void rcu_debug_yield_init(void)
#include <pthread.h>
#include <unistd.h>
#include <pthread.h>
#include <unistd.h>
-#define YIELD_READ (1 << 0)
-#define YIELD_WRITE (1 << 1)
+#define RCU_YIELD_READ (1 << 0)
+#define RCU_YIELD_WRITE (1 << 1)
/* maximum sleep delay, in us */
#define MAX_SLEEP 50
/* maximum sleep delay, in us */
#define MAX_SLEEP 50
-extern unsigned int yield_active;
-extern DECLARE_URCU_TLS(unsigned int, rand_yield);
+extern unsigned int rcu_yield_active;
+extern DECLARE_URCU_TLS(unsigned int, rcu_rand_yield);
-static inline void debug_yield_read(void)
+static inline void rcu_debug_yield_read(void)
- if (yield_active & YIELD_READ)
- if (rand_r(&URCU_TLS(rand_yield)) & 0x1)
- usleep(rand_r(&URCU_TLS(rand_yield)) % MAX_SLEEP);
+ if (rcu_yield_active & RCU_YIELD_READ)
+ if (rand_r(&URCU_TLS(rcu_rand_yield)) & 0x1)
+ usleep(rand_r(&URCU_TLS(rcu_rand_yield)) % MAX_SLEEP);
-static inline void debug_yield_write(void)
+static inline void rcu_debug_yield_write(void)
- if (yield_active & YIELD_WRITE)
- if (rand_r(&URCU_TLS(rand_yield)) & 0x1)
- usleep(rand_r(&URCU_TLS(rand_yield)) % MAX_SLEEP);
+ if (rcu_yield_active & RCU_YIELD_WRITE)
+ if (rand_r(&URCU_TLS(rcu_rand_yield)) & 0x1)
+ usleep(rand_r(&URCU_TLS(rcu_rand_yield)) % MAX_SLEEP);
-static inline void debug_yield_init(void)
+static inline void rcu_debug_yield_init(void)
- URCU_TLS(rand_yield) = time(NULL) ^ pthread_self();
+ URCU_TLS(rcu_rand_yield) = time(NULL) ^ pthread_self();
-static inline void debug_yield_read(void)
+static inline void rcu_debug_yield_read(void)
-static inline void debug_yield_write(void)
+static inline void rcu_debug_yield_write(void)
-static inline void debug_yield_init(void)
+static inline void rcu_debug_yield_init(void)
extern DECLARE_URCU_TLS(struct rcu_reader, rcu_reader);
extern DECLARE_URCU_TLS(struct rcu_reader, rcu_reader);
-extern int32_t gp_futex;
+extern int32_t rcu_gp_futex;
/*
* Wake-up waiting synchronize_rcu(). Called from many concurrent threads.
/*
* Wake-up waiting synchronize_rcu(). Called from many concurrent threads.
if (caa_unlikely(_CMM_LOAD_SHARED(URCU_TLS(rcu_reader).waiting))) {
_CMM_STORE_SHARED(URCU_TLS(rcu_reader).waiting, 0);
cmm_smp_mb();
if (caa_unlikely(_CMM_LOAD_SHARED(URCU_TLS(rcu_reader).waiting))) {
_CMM_STORE_SHARED(URCU_TLS(rcu_reader).waiting, 0);
cmm_smp_mb();
- if (uatomic_read(&gp_futex) != -1)
+ if (uatomic_read(&rcu_gp_futex) != -1)
- uatomic_set(&gp_futex, 0);
- futex_noasync(&gp_futex, FUTEX_WAKE, 1,
+ uatomic_set(&rcu_gp_futex, 0);
+ futex_noasync(&rcu_gp_futex, FUTEX_WAKE, 1,
#include <pthread.h>
#include <unistd.h>
#include <pthread.h>
#include <unistd.h>
-#define YIELD_READ (1 << 0)
-#define YIELD_WRITE (1 << 1)
+#define RCU_YIELD_READ (1 << 0)
+#define RCU_YIELD_WRITE (1 << 1)
/*
* Updates with RCU_SIGNAL are much slower. Account this in the delay.
/*
* Updates with RCU_SIGNAL are much slower. Account this in the delay.
#define MAX_SLEEP 50
#endif
#define MAX_SLEEP 50
#endif
-extern unsigned int yield_active;
-extern DECLARE_URCU_TLS(unsigned int, rand_yield);
+extern unsigned int rcu_yield_active;
+extern DECLARE_URCU_TLS(unsigned int, rcu_rand_yield);
-static inline void debug_yield_read(void)
+static inline void rcu_debug_yield_read(void)
- if (yield_active & YIELD_READ)
- if (rand_r(&URCU_TLS(rand_yield)) & 0x1)
- usleep(rand_r(&URCU_TLS(rand_yield)) % MAX_SLEEP);
+ if (rcu_yield_active & RCU_YIELD_READ)
+ if (rand_r(&URCU_TLS(rcu_rand_yield)) & 0x1)
+ usleep(rand_r(&URCU_TLS(rcu_rand_yield)) % MAX_SLEEP);
-static inline void debug_yield_write(void)
+static inline void rcu_debug_yield_write(void)
- if (yield_active & YIELD_WRITE)
- if (rand_r(&URCU_TLS(rand_yield)) & 0x1)
- usleep(rand_r(&URCU_TLS(rand_yield)) % MAX_SLEEP);
+ if (rcu_yield_active & RCU_YIELD_WRITE)
+ if (rand_r(&URCU_TLS(rcu_rand_yield)) & 0x1)
+ usleep(rand_r(&URCU_TLS(rcu_rand_yield)) % MAX_SLEEP);
-static inline void debug_yield_init(void)
+static inline void rcu_debug_yield_init(void)
- URCU_TLS(rand_yield) = time(NULL) ^ (unsigned long) pthread_self();
+ URCU_TLS(rcu_rand_yield) = time(NULL) ^ (unsigned long) pthread_self();
-static inline void debug_yield_read(void)
+static inline void rcu_debug_yield_read(void)
-static inline void debug_yield_write(void)
+static inline void rcu_debug_yield_write(void)
-static inline void debug_yield_init(void)
+static inline void rcu_debug_yield_init(void)
#define RCU_MB_GROUP MB_GROUP_ALL
#ifdef RCU_MEMBARRIER
#define RCU_MB_GROUP MB_GROUP_ALL
#ifdef RCU_MEMBARRIER
-extern int has_sys_membarrier;
+extern int rcu_has_sys_membarrier;
static inline void smp_mb_slave(int group)
{
static inline void smp_mb_slave(int group)
{
- if (caa_likely(has_sys_membarrier))
+ if (caa_likely(rcu_has_sys_membarrier))
cmm_barrier();
else
cmm_smp_mb();
cmm_barrier();
else
cmm_smp_mb();
extern DECLARE_URCU_TLS(struct rcu_reader, rcu_reader);
extern DECLARE_URCU_TLS(struct rcu_reader, rcu_reader);
-extern int32_t gp_futex;
+extern int32_t rcu_gp_futex;
/*
* Wake-up waiting synchronize_rcu(). Called from many concurrent threads.
*/
static inline void wake_up_gp(void)
{
/*
* Wake-up waiting synchronize_rcu(). Called from many concurrent threads.
*/
static inline void wake_up_gp(void)
{
- if (caa_unlikely(uatomic_read(&gp_futex) == -1)) {
- uatomic_set(&gp_futex, 0);
- futex_async(&gp_futex, FUTEX_WAKE, 1,
+ if (caa_unlikely(uatomic_read(&rcu_gp_futex) == -1)) {
+ uatomic_set(&rcu_gp_futex, 0);
+ futex_async(&rcu_gp_futex, FUTEX_WAKE, 1,