Provides compatibility for OpenBSD, NetBSD and Darwin.
Suggested-by: Marek VavruĊĦa <marek.vavrusa@nic.cz>
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
#include <errno.h>
#include <urcu/arch.h>
+#include <urcu/tls-compat.h>
#ifdef __linux__
#include <syscall.h>
return !test_stop;
}
-static unsigned long long __thread nr_writes;
-static unsigned long long __thread nr_reads;
+static DEFINE_URCU_TLS(unsigned long long, nr_writes);
+static DEFINE_URCU_TLS(unsigned long long, nr_reads);
static
unsigned long long __attribute__((aligned(CAA_CACHE_LINE_SIZE))) *tot_nr_writes;
if (caa_unlikely(rduration))
loop_sleep(rduration);
pthread_mutex_unlock(&lock);
- nr_reads++;
+ URCU_TLS(nr_reads)++;
if (caa_unlikely(!test_duration_read()))
break;
}
- tot_nr_reads[tidx] = nr_reads;
+ tot_nr_reads[tidx] = URCU_TLS(nr_reads);
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"reader", pthread_self(), (unsigned long)gettid());
return ((void*)1);
if (caa_unlikely(wduration))
loop_sleep(wduration);
pthread_mutex_unlock(&lock);
- nr_writes++;
+ URCU_TLS(nr_writes)++;
if (caa_unlikely(!test_duration_write()))
break;
if (caa_unlikely(wdelay))
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"writer", pthread_self(), (unsigned long)gettid());
- tot_nr_writes[wtidx] = nr_writes;
+ tot_nr_writes[wtidx] = URCU_TLS(nr_writes);
return ((void*)2);
}
#include <errno.h>
#include <urcu/arch.h>
+#include <urcu/tls-compat.h>
#ifdef __linux__
#include <syscall.h>
return !test_stop;
}
-static unsigned long long __thread nr_writes;
-static unsigned long long __thread nr_reads;
+static DEFINE_URCU_TLS(unsigned long long, nr_writes);
+static DEFINE_URCU_TLS(unsigned long long, nr_reads);
static
unsigned long long __attribute__((aligned(CAA_CACHE_LINE_SIZE))) *tot_nr_writes;
if (caa_unlikely(rduration))
loop_sleep(rduration);
pthread_mutex_unlock(&per_thread_lock[tidx].lock);
- nr_reads++;
+ URCU_TLS(nr_reads)++;
if (caa_unlikely(!test_duration_read()))
break;
}
- tot_nr_reads[tidx] = nr_reads;
+ tot_nr_reads[tidx] = URCU_TLS(nr_reads);
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"reader", pthread_self(), (unsigned long)gettid());
return ((void*)1);
for (tidx = (long)nr_readers - 1; tidx >= 0; tidx--) {
pthread_mutex_unlock(&per_thread_lock[tidx].lock);
}
- nr_writes++;
+ URCU_TLS(nr_writes)++;
if (caa_unlikely(!test_duration_write()))
break;
if (caa_unlikely(wdelay))
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"writer", pthread_self(), (unsigned long)gettid());
- tot_nr_writes[wtidx] = nr_writes;
+ tot_nr_writes[wtidx] = URCU_TLS(nr_writes);
return ((void*)2);
}
#include <errno.h>
#include <urcu/arch.h>
+#include <urcu/tls-compat.h>
#ifdef __linux__
#include <syscall.h>
return !test_stop;
}
-static unsigned long long __thread nr_writes;
-static unsigned long long __thread nr_reads;
+static DEFINE_URCU_TLS(unsigned long long, nr_writes);
+static DEFINE_URCU_TLS(unsigned long long, nr_reads);
static unsigned int nr_readers;
static unsigned int nr_writers;
if (caa_unlikely(rduration))
loop_sleep(rduration);
pthread_rwlock_unlock(&lock);
- nr_reads++;
+ URCU_TLS(nr_reads)++;
if (caa_unlikely(!test_duration_read()))
break;
}
- *count = nr_reads;
+ *count = URCU_TLS(nr_reads);
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"reader", pthread_self(), (unsigned long)gettid());
return ((void*)1);
if (caa_unlikely(wduration))
loop_sleep(wduration);
pthread_rwlock_unlock(&lock);
- nr_writes++;
+ URCU_TLS(nr_writes)++;
if (caa_unlikely(!test_duration_write()))
break;
if (caa_unlikely(wdelay))
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"writer", pthread_self(), (unsigned long)gettid());
- *count = nr_writes;
+ *count = URCU_TLS(nr_writes);
return ((void*)2);
}
#include <errno.h>
#include <urcu/arch.h>
+#include <urcu/tls-compat.h>
#ifdef __linux__
#include <syscall.h>
return !test_stop;
}
-static unsigned long long __thread nr_writes;
-static unsigned long long __thread nr_reads;
+static DEFINE_URCU_TLS(unsigned long long, nr_writes);
+static DEFINE_URCU_TLS(unsigned long long, nr_reads);
static unsigned int nr_readers;
static unsigned int nr_writers;
if (caa_unlikely(rduration))
loop_sleep(rduration);
rcu_read_unlock();
- nr_reads++;
+ URCU_TLS(nr_reads)++;
if (caa_unlikely(!test_duration_read()))
break;
}
rcu_register_thread();
rcu_unregister_thread();
- *count = nr_reads;
+ *count = URCU_TLS(nr_reads);
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"reader", pthread_self(), (unsigned long)gettid());
return ((void*)1);
old->a = 0;
test_array_free(old);
rcu_copy_mutex_unlock();
- nr_writes++;
+ URCU_TLS(nr_writes)++;
if (caa_unlikely(!test_duration_write()))
break;
if (caa_unlikely(wdelay))
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"writer", pthread_self(), (unsigned long)gettid());
- *count = nr_writes;
+ *count = URCU_TLS(nr_writes);
return ((void*)2);
}
#include <errno.h>
#include <urcu/arch.h>
+#include <urcu/tls-compat.h>
#ifdef __linux__
#include <syscall.h>
return !test_stop;
}
-static unsigned long long __thread nr_writes;
-static unsigned long long __thread nr_reads;
+static DEFINE_URCU_TLS(unsigned long long, nr_writes);
+static DEFINE_URCU_TLS(unsigned long long, nr_reads);
static unsigned int nr_readers;
static unsigned int nr_writers;
if (caa_unlikely(rduration))
loop_sleep(rduration);
rcu_read_unlock();
- nr_reads++;
+ URCU_TLS(nr_reads)++;
if (caa_unlikely(!test_duration_read()))
break;
}
rcu_unregister_thread();
- *count = nr_reads;
+ *count = URCU_TLS(nr_reads);
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"reader", pthread_self(), (unsigned long)gettid());
return ((void*)1);
old->a = 0;
test_array_free(old);
rcu_copy_mutex_unlock();
- nr_writes++;
+ URCU_TLS(nr_writes)++;
if (caa_unlikely(!test_duration_write()))
break;
if (caa_unlikely(wdelay))
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"writer", pthread_self(), (unsigned long)gettid());
- *count = nr_writes;
+ *count = URCU_TLS(nr_writes);
return ((void*)2);
}
#include <errno.h>
#include <urcu/arch.h>
+#include <urcu/tls-compat.h>
#ifdef __linux__
#include <syscall.h>
return !test_stop;
}
-static unsigned long long __thread nr_writes;
-static unsigned long long __thread nr_reads;
+static DEFINE_URCU_TLS(unsigned long long, nr_writes);
+static DEFINE_URCU_TLS(unsigned long long, nr_reads);
static unsigned int nr_readers;
static unsigned int nr_writers;
if (caa_unlikely(rduration))
loop_sleep(rduration);
rcu_read_unlock();
- nr_reads++;
+ URCU_TLS(nr_reads)++;
if (caa_unlikely(!test_duration_read()))
break;
}
rcu_unregister_thread();
- *count = nr_reads;
+ *count = URCU_TLS(nr_reads);
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"reader", pthread_self(), (unsigned long)gettid());
return ((void*)1);
old->a = 0;
test_array_free(old);
rcu_copy_mutex_unlock();
- nr_writes++;
+ URCU_TLS(nr_writes)++;
if (caa_unlikely(!test_duration_write()))
break;
if (caa_unlikely(wdelay))
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"writer", pthread_self(), (unsigned long)gettid());
- *count = nr_writes;
+ *count = URCU_TLS(nr_writes);
return ((void*)2);
}
#include <errno.h>
#include <urcu/arch.h>
+#include <urcu/tls-compat.h>
#ifdef __linux__
#include <syscall.h>
return !test_stop;
}
-static unsigned long long __thread nr_writes;
-static unsigned long long __thread nr_reads;
+static DEFINE_URCU_TLS(unsigned long long, nr_writes);
+static DEFINE_URCU_TLS(unsigned long long, nr_reads);
static
unsigned long long __attribute__((aligned(CAA_CACHE_LINE_SIZE))) *tot_nr_writes;
if (caa_unlikely(rduration))
loop_sleep(rduration);
rcu_read_unlock();
- nr_reads++;
+ URCU_TLS(nr_reads)++;
if (caa_unlikely(!test_duration_read()))
break;
}
rcu_unregister_thread();
- *count = nr_reads;
+ *count = URCU_TLS(nr_reads);
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"reader", pthread_self(), (unsigned long)gettid());
return ((void*)1);
defer_rcu(test_cb2, (void *)-2L);
defer_rcu(test_cb2, (void *)-4L);
defer_rcu(test_cb2, (void *)-2L);
- nr_writes++;
+ URCU_TLS(nr_writes)++;
if (caa_unlikely(!test_duration_write()))
break;
if (caa_unlikely(wdelay))
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"writer", pthread_self(), (unsigned long)gettid());
- tot_nr_writes[wtidx] = nr_writes;
+ tot_nr_writes[wtidx] = URCU_TLS(nr_writes);
return ((void*)2);
}
#include <errno.h>
#include <urcu/arch.h>
+#include <urcu/tls-compat.h>
#ifdef __linux__
#include <syscall.h>
return !test_stop;
}
-static unsigned long long __thread nr_writes;
-static unsigned long long __thread nr_reads;
+static DEFINE_URCU_TLS(unsigned long long, nr_writes);
+static DEFINE_URCU_TLS(unsigned long long, nr_reads);
static
unsigned long long __attribute__((aligned(CAA_CACHE_LINE_SIZE))) *tot_nr_writes;
if (caa_unlikely(rduration))
loop_sleep(rduration);
rcu_read_unlock();
- nr_reads++;
+ URCU_TLS(nr_reads)++;
if (caa_unlikely(!test_duration_read()))
break;
}
rcu_unregister_thread();
- *count = nr_reads;
+ *count = URCU_TLS(nr_reads);
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"reader", pthread_self(), (unsigned long)gettid());
return ((void*)1);
if (caa_unlikely(wduration))
loop_sleep(wduration);
rcu_gc_reclaim(wtidx, old);
- nr_writes++;
+ URCU_TLS(nr_writes)++;
if (caa_unlikely(!test_duration_write()))
break;
if (caa_unlikely(wdelay))
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"writer", pthread_self(), (unsigned long)gettid());
- tot_nr_writes[wtidx] = nr_writes;
+ tot_nr_writes[wtidx] = URCU_TLS(nr_writes);
return ((void*)2);
}
return test_hash_cb[test_choice].populate_hash;
}
-unsigned int __thread rand_lookup;
-unsigned long __thread nr_add;
-unsigned long __thread nr_addexist;
-unsigned long __thread nr_del;
-unsigned long __thread nr_delnoent;
-unsigned long __thread lookup_fail;
-unsigned long __thread lookup_ok;
+DEFINE_URCU_TLS(unsigned int, rand_lookup);
+DEFINE_URCU_TLS(unsigned long, nr_add);
+DEFINE_URCU_TLS(unsigned long, nr_addexist);
+DEFINE_URCU_TLS(unsigned long, nr_del);
+DEFINE_URCU_TLS(unsigned long, nr_delnoent);
+DEFINE_URCU_TLS(unsigned long, lookup_fail);
+DEFINE_URCU_TLS(unsigned long, lookup_ok);
struct cds_lfht *test_ht;
pthread_mutex_t affinity_mutex = PTHREAD_MUTEX_INITIALIZER;
-unsigned long long __thread nr_writes;
-unsigned long long __thread nr_reads;
+DEFINE_URCU_TLS(unsigned long long, nr_writes);
+DEFINE_URCU_TLS(unsigned long long, nr_reads);
unsigned int nr_readers;
unsigned int nr_writers;
#include <errno.h>
#include <signal.h>
+#include <urcu/tls-compat.h>
+
#ifdef __linux__
#include <syscall.h>
#endif
unsigned long remove;
};
-extern unsigned int __thread rand_lookup;
-extern unsigned long __thread nr_add;
-extern unsigned long __thread nr_addexist;
-extern unsigned long __thread nr_del;
-extern unsigned long __thread nr_delnoent;
-extern unsigned long __thread lookup_fail;
-extern unsigned long __thread lookup_ok;
+extern DECLARE_URCU_TLS(unsigned int, rand_lookup);
+extern DECLARE_URCU_TLS(unsigned long, nr_add);
+extern DECLARE_URCU_TLS(unsigned long, nr_addexist);
+extern DECLARE_URCU_TLS(unsigned long, nr_del);
+extern DECLARE_URCU_TLS(unsigned long, nr_delnoent);
+extern DECLARE_URCU_TLS(unsigned long, lookup_fail);
+extern DECLARE_URCU_TLS(unsigned long, lookup_ok);
extern struct cds_lfht *test_ht;
return !test_stop;
}
-extern unsigned long long __thread nr_writes;
-extern unsigned long long __thread nr_reads;
+extern DECLARE_URCU_TLS(unsigned long long, nr_writes);
+extern DECLARE_URCU_TLS(unsigned long long, nr_reads);
extern unsigned int nr_readers;
extern unsigned int nr_writers;
for (;;) {
rcu_read_lock();
cds_lfht_test_lookup(test_ht,
- (void *)(((unsigned long) rand_r(&rand_lookup) % lookup_pool_size) + lookup_pool_offset),
+ (void *)(((unsigned long) rand_r(&URCU_TLS(rand_lookup)) % lookup_pool_size) + lookup_pool_offset),
sizeof(void *), &iter);
node = cds_lfht_iter_get_test_node(&iter);
if (node == NULL) {
printf("[ERROR] Lookup cannot find initial node.\n");
exit(-1);
}
- lookup_fail++;
+ URCU_TLS(lookup_fail)++;
} else {
- lookup_ok++;
+ URCU_TLS(lookup_ok)++;
}
debug_yield_read();
if (caa_unlikely(rduration))
loop_sleep(rduration);
rcu_read_unlock();
- nr_reads++;
+ URCU_TLS(nr_reads)++;
if (caa_unlikely(!test_duration_read()))
break;
- if (caa_unlikely((nr_reads & ((1 << 10) - 1)) == 0))
+ if (caa_unlikely((URCU_TLS(nr_reads) & ((1 << 10) - 1)) == 0))
rcu_quiescent_state();
}
rcu_unregister_thread();
- *count = nr_reads;
+ *count = URCU_TLS(nr_reads);
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"reader", pthread_self(), (unsigned long)gettid());
printf_verbose("readid : %lx, lookupfail %lu, lookupok %lu\n",
- pthread_self(), lookup_fail, lookup_ok);
+ pthread_self(), URCU_TLS(lookup_fail),
+ URCU_TLS(lookup_ok));
return ((void*)1);
}
for (;;) {
if ((addremove == AR_ADD || add_only)
- || (addremove == AR_RANDOM && rand_r(&rand_lookup) & 1)) {
+ || (addremove == AR_RANDOM && rand_r(&URCU_TLS(rand_lookup)) & 1)) {
node = malloc(sizeof(struct lfht_test_node));
lfht_test_node_init(node,
- (void *)(((unsigned long) rand_r(&rand_lookup) % write_pool_size) + write_pool_offset),
+ (void *)(((unsigned long) rand_r(&URCU_TLS(rand_lookup)) % write_pool_size) + write_pool_offset),
sizeof(void *));
rcu_read_lock();
if (add_unique) {
rcu_read_unlock();
if (add_unique && ret_node != &node->node) {
free(node);
- nr_addexist++;
+ URCU_TLS(nr_addexist)++;
} else {
if (add_replace && ret_node) {
call_rcu(&to_test_node(ret_node)->head,
free_node_cb);
- nr_addexist++;
+ URCU_TLS(nr_addexist)++;
} else {
- nr_add++;
+ URCU_TLS(nr_add)++;
}
}
} else {
/* May delete */
rcu_read_lock();
cds_lfht_test_lookup(test_ht,
- (void *)(((unsigned long) rand_r(&rand_lookup) % write_pool_size) + write_pool_offset),
+ (void *)(((unsigned long) rand_r(&URCU_TLS(rand_lookup)) % write_pool_size) + write_pool_offset),
sizeof(void *), &iter);
ret = cds_lfht_del(test_ht, cds_lfht_iter_get_node(&iter));
rcu_read_unlock();
if (ret == 0) {
node = cds_lfht_iter_get_test_node(&iter);
call_rcu(&node->head, free_node_cb);
- nr_del++;
+ URCU_TLS(nr_del)++;
} else
- nr_delnoent++;
+ URCU_TLS(nr_delnoent)++;
}
#if 0
- //if (nr_writes % 100000 == 0) {
- if (nr_writes % 1000 == 0) {
+ //if (URCU_TLS(nr_writes) % 100000 == 0) {
+ if (URCU_TLS(nr_writes) % 1000 == 0) {
rcu_read_lock();
- if (rand_r(&rand_lookup) & 1) {
+ if (rand_r(&URCU_TLS(rand_lookup)) & 1) {
ht_resize(test_ht, 1);
} else {
ht_resize(test_ht, -1);
rcu_read_unlock();
}
#endif //0
- nr_writes++;
+ URCU_TLS(nr_writes)++;
if (caa_unlikely(!test_duration_write()))
break;
if (caa_unlikely(wdelay))
loop_sleep(wdelay);
- if (caa_unlikely((nr_writes & ((1 << 10) - 1)) == 0))
+ if (caa_unlikely((URCU_TLS(nr_writes) & ((1 << 10) - 1)) == 0))
rcu_quiescent_state();
}
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"writer", pthread_self(), (unsigned long)gettid());
printf_verbose("info id %lx: nr_add %lu, nr_addexist %lu, nr_del %lu, "
- "nr_delnoent %lu\n", pthread_self(), nr_add,
- nr_addexist, nr_del, nr_delnoent);
- count->update_ops = nr_writes;
- count->add = nr_add;
- count->add_exist = nr_addexist;
- count->remove = nr_del;
+ "nr_delnoent %lu\n", pthread_self(), URCU_TLS(nr_add),
+ URCU_TLS(nr_addexist), URCU_TLS(nr_del),
+ URCU_TLS(nr_delnoent));
+ count->update_ops = URCU_TLS(nr_writes);
+ count->add = URCU_TLS(nr_add);
+ count->add_exist = URCU_TLS(nr_addexist);
+ count->remove = URCU_TLS(nr_del);
return ((void*)2);
}
"larger random pool (-p option). This may take a while...\n", init_populate, init_pool_size);
}
- while (nr_add < init_populate) {
+ while (URCU_TLS(nr_add) < init_populate) {
node = malloc(sizeof(struct lfht_test_node));
lfht_test_node_init(node,
- (void *)(((unsigned long) rand_r(&rand_lookup) % init_pool_size) + init_pool_offset),
+ (void *)(((unsigned long) rand_r(&URCU_TLS(rand_lookup)) % init_pool_size) + init_pool_offset),
sizeof(void *));
rcu_read_lock();
if (add_unique) {
rcu_read_unlock();
if (add_unique && ret_node != &node->node) {
free(node);
- nr_addexist++;
+ URCU_TLS(nr_addexist)++;
} else {
if (add_replace && ret_node) {
call_rcu(&to_test_node(ret_node)->head, free_node_cb);
- nr_addexist++;
+ URCU_TLS(nr_addexist)++;
} else {
- nr_add++;
+ URCU_TLS(nr_add)++;
}
}
- nr_writes++;
+ URCU_TLS(nr_writes)++;
}
return 0;
}
debug_yield_read();
if (caa_unlikely(rduration))
loop_sleep(rduration);
- nr_reads++;
+ URCU_TLS(nr_reads)++;
if (caa_unlikely(!test_duration_read()))
break;
- if (caa_unlikely((nr_reads & ((1 << 10) - 1)) == 0))
+ if (caa_unlikely((URCU_TLS(nr_reads) & ((1 << 10) - 1)) == 0))
rcu_quiescent_state();
}
rcu_unregister_thread();
- *count = nr_reads;
+ *count = URCU_TLS(nr_reads);
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"reader", pthread_self(), (unsigned long)gettid());
printf_verbose("readid : %lx, lookupfail %lu, lookupok %lu\n",
- pthread_self(), lookup_fail, lookup_ok);
+ pthread_self(), URCU_TLS(lookup_fail),
+ URCU_TLS(lookup_ok));
return ((void*)1);
}
* add unique/add replace with new node key from range.
*/
if (1 || (addremove == AR_ADD || add_only)
- || (addremove == AR_RANDOM && rand_r(&rand_lookup) & 1)) {
+ || (addremove == AR_RANDOM && rand_r(&URCU_TLS(rand_lookup)) & 1)) {
node = malloc(sizeof(struct lfht_test_node));
lfht_test_node_init(node,
- (void *)(((unsigned long) rand_r(&rand_lookup) % write_pool_size) + write_pool_offset),
+ (void *)(((unsigned long) rand_r(&URCU_TLS(rand_lookup)) % write_pool_size) + write_pool_offset),
sizeof(void *));
rcu_read_lock();
- loc_add_unique = rand_r(&rand_lookup) & 1;
+ loc_add_unique = rand_r(&URCU_TLS(rand_lookup)) & 1;
if (loc_add_unique) {
ret_node = cds_lfht_add_unique(test_ht,
test_hash(node->key, node->key_len, TEST_HASH_SEED),
if (loc_add_unique) {
if (ret_node != &node->node) {
free(node);
- nr_addexist++;
+ URCU_TLS(nr_addexist)++;
} else {
- nr_add++;
+ URCU_TLS(nr_add)++;
}
} else {
if (ret_node) {
call_rcu(&to_test_node(ret_node)->head,
free_node_cb);
- nr_addexist++;
+ URCU_TLS(nr_addexist)++;
} else {
- nr_add++;
+ URCU_TLS(nr_add)++;
}
}
} else {
/* May delete */
rcu_read_lock();
cds_lfht_test_lookup(test_ht,
- (void *)(((unsigned long) rand_r(&rand_lookup) % write_pool_size) + write_pool_offset),
+ (void *)(((unsigned long) rand_r(&URCU_TLS(rand_lookup)) % write_pool_size) + write_pool_offset),
sizeof(void *), &iter);
ret = cds_lfht_del(test_ht, cds_lfht_iter_get_node(&iter));
rcu_read_unlock();
if (ret == 0) {
node = cds_lfht_iter_get_test_node(&iter);
call_rcu(&node->head, free_node_cb);
- nr_del++;
+ URCU_TLS(nr_del)++;
} else
- nr_delnoent++;
+ URCU_TLS(nr_delnoent)++;
}
#if 0
- //if (nr_writes % 100000 == 0) {
- if (nr_writes % 1000 == 0) {
+ //if (URCU_TLS(nr_writes) % 100000 == 0) {
+ if (URCU_TLS(nr_writes) % 1000 == 0) {
rcu_read_lock();
- if (rand_r(&rand_lookup) & 1) {
+ if (rand_r(&URCU_TLS(rand_lookup)) & 1) {
ht_resize(test_ht, 1);
} else {
ht_resize(test_ht, -1);
rcu_read_unlock();
}
#endif //0
- nr_writes++;
+ URCU_TLS(nr_writes)++;
if (caa_unlikely(!test_duration_write()))
break;
if (caa_unlikely(wdelay))
loop_sleep(wdelay);
- if (caa_unlikely((nr_writes & ((1 << 10) - 1)) == 0))
+ if (caa_unlikely((URCU_TLS(nr_writes) & ((1 << 10) - 1)) == 0))
rcu_quiescent_state();
}
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"writer", pthread_self(), (unsigned long)gettid());
printf_verbose("info id %lx: nr_add %lu, nr_addexist %lu, nr_del %lu, "
- "nr_delnoent %lu\n", pthread_self(), nr_add,
- nr_addexist, nr_del, nr_delnoent);
- count->update_ops = nr_writes;
- count->add = nr_add;
- count->add_exist = nr_addexist;
- count->remove = nr_del;
+ "nr_delnoent %lu\n", pthread_self(), URCU_TLS(nr_add),
+ URCU_TLS(nr_addexist), URCU_TLS(nr_del),
+ URCU_TLS(nr_delnoent));
+ count->update_ops = URCU_TLS(nr_writes);
+ count->add = URCU_TLS(nr_add);
+ count->add_exist = URCU_TLS(nr_addexist);
+ count->remove = URCU_TLS(nr_del);
return ((void*)2);
}
"larger random pool (-p option). This may take a while...\n", init_populate, init_pool_size);
}
- while (nr_add < init_populate) {
+ while (URCU_TLS(nr_add) < init_populate) {
node = malloc(sizeof(struct lfht_test_node));
lfht_test_node_init(node,
- (void *)(((unsigned long) rand_r(&rand_lookup) % init_pool_size) + init_pool_offset),
+ (void *)(((unsigned long) rand_r(&URCU_TLS(rand_lookup)) % init_pool_size) + init_pool_offset),
sizeof(void *));
rcu_read_lock();
ret_node = cds_lfht_add_replace(test_ht,
rcu_read_unlock();
if (ret_node) {
call_rcu(&to_test_node(ret_node)->head, free_node_cb);
- nr_addexist++;
+ URCU_TLS(nr_addexist)++;
} else {
- nr_add++;
+ URCU_TLS(nr_add)++;
}
- nr_writes++;
+ URCU_TLS(nr_writes)++;
}
return 0;
}
#include <errno.h>
#include <urcu/arch.h>
+#include <urcu/tls-compat.h>
#ifdef __linux__
#include <syscall.h>
return !test_stop;
}
-static unsigned long long __thread nr_dequeues;
-static unsigned long long __thread nr_enqueues;
+static DEFINE_URCU_TLS(unsigned long long, nr_dequeues);
+static DEFINE_URCU_TLS(unsigned long long, nr_enqueues);
-static unsigned long long __thread nr_successful_dequeues;
-static unsigned long long __thread nr_successful_enqueues;
+static DEFINE_URCU_TLS(unsigned long long, nr_successful_dequeues);
+static DEFINE_URCU_TLS(unsigned long long, nr_successful_enqueues);
static unsigned int nr_enqueuers;
static unsigned int nr_dequeuers;
rcu_read_lock();
cds_lfq_enqueue_rcu(&q, &node->list);
rcu_read_unlock();
- nr_successful_enqueues++;
+ URCU_TLS(nr_successful_enqueues)++;
if (caa_unlikely(wdelay))
loop_sleep(wdelay);
fail:
- nr_enqueues++;
+ URCU_TLS(nr_enqueues)++;
if (caa_unlikely(!test_duration_enqueue()))
break;
}
rcu_unregister_thread();
- count[0] = nr_enqueues;
- count[1] = nr_successful_enqueues;
+ count[0] = URCU_TLS(nr_enqueues);
+ count[1] = URCU_TLS(nr_successful_enqueues);
printf_verbose("enqueuer thread_end, thread id : %lx, tid %lu, "
"enqueues %llu successful_enqueues %llu\n",
- pthread_self(), (unsigned long)gettid(), nr_enqueues,
- nr_successful_enqueues);
+ pthread_self(), (unsigned long)gettid(),
+ URCU_TLS(nr_enqueues), URCU_TLS(nr_successful_enqueues));
return ((void*)1);
}
if (node) {
call_rcu(&node->rcu, free_node_cb);
- nr_successful_dequeues++;
+ URCU_TLS(nr_successful_dequeues)++;
}
- nr_dequeues++;
+ URCU_TLS(nr_dequeues)++;
if (caa_unlikely(!test_duration_dequeue()))
break;
if (caa_unlikely(rduration))
rcu_defer_unregister_thread();
printf_verbose("dequeuer thread_end, thread id : %lx, tid %lu, "
"dequeues %llu, successful_dequeues %llu\n",
- pthread_self(), (unsigned long)gettid(), nr_dequeues,
- nr_successful_dequeues);
- count[0] = nr_dequeues;
- count[1] = nr_successful_dequeues;
+ pthread_self(), (unsigned long)gettid(),
+ URCU_TLS(nr_dequeues), URCU_TLS(nr_successful_dequeues));
+ count[0] = URCU_TLS(nr_dequeues);
+ count[1] = URCU_TLS(nr_successful_dequeues);
return ((void*)2);
}
#include <errno.h>
#include <urcu/arch.h>
+#include <urcu/tls-compat.h>
#ifdef __linux__
#include <syscall.h>
return !test_stop;
}
-static unsigned long long __thread nr_dequeues;
-static unsigned long long __thread nr_enqueues;
+static DEFINE_URCU_TLS(unsigned long long, nr_dequeues);
+static DEFINE_URCU_TLS(unsigned long long, nr_enqueues);
-static unsigned long long __thread nr_successful_dequeues;
-static unsigned long long __thread nr_successful_enqueues;
+static DEFINE_URCU_TLS(unsigned long long, nr_successful_dequeues);
+static DEFINE_URCU_TLS(unsigned long long, nr_successful_enqueues);
static unsigned int nr_enqueuers;
static unsigned int nr_dequeuers;
cds_lfs_node_init_rcu(&node->list);
/* No rcu read-side is needed for push */
cds_lfs_push_rcu(&s, &node->list);
- nr_successful_enqueues++;
+ URCU_TLS(nr_successful_enqueues)++;
if (caa_unlikely(wdelay))
loop_sleep(wdelay);
fail:
- nr_enqueues++;
+ URCU_TLS(nr_enqueues)++;
if (caa_unlikely(!test_duration_enqueue()))
break;
}
rcu_unregister_thread();
- count[0] = nr_enqueues;
- count[1] = nr_successful_enqueues;
+ count[0] = URCU_TLS(nr_enqueues);
+ count[1] = URCU_TLS(nr_successful_enqueues);
printf_verbose("enqueuer thread_end, thread id : %lx, tid %lu, "
"enqueues %llu successful_enqueues %llu\n",
- pthread_self(), (unsigned long)gettid(), nr_enqueues,
- nr_successful_enqueues);
+ pthread_self(), (unsigned long)gettid(),
+ URCU_TLS(nr_enqueues), URCU_TLS(nr_successful_enqueues));
return ((void*)1);
}
rcu_read_unlock();
if (node) {
call_rcu(&node->rcu, free_node_cb);
- nr_successful_dequeues++;
+ URCU_TLS(nr_successful_dequeues)++;
}
- nr_dequeues++;
+ URCU_TLS(nr_dequeues)++;
if (caa_unlikely(!test_duration_dequeue()))
break;
if (caa_unlikely(rduration))
printf_verbose("dequeuer thread_end, thread id : %lx, tid %lu, "
"dequeues %llu, successful_dequeues %llu\n",
- pthread_self(), (unsigned long)gettid(), nr_dequeues,
- nr_successful_dequeues);
- count[0] = nr_dequeues;
- count[1] = nr_successful_dequeues;
+ pthread_self(), (unsigned long)gettid(),
+ URCU_TLS(nr_dequeues), URCU_TLS(nr_successful_dequeues));
+ count[0] = URCU_TLS(nr_dequeues);
+ count[1] = URCU_TLS(nr_successful_dequeues);
return ((void*)2);
}
#include <errno.h>
#include <urcu/arch.h>
+#include <urcu/tls-compat.h>
#ifdef __linux__
#include <syscall.h>
return !test_stop;
}
-static unsigned long long __thread nr_writes;
-static unsigned long long __thread nr_reads;
+static DEFINE_URCU_TLS(unsigned long long, nr_writes);
+static DEFINE_URCU_TLS(unsigned long long, nr_reads);
static unsigned int nr_readers;
static unsigned int nr_writers;
if (caa_unlikely(rduration))
loop_sleep(rduration);
rcu_read_unlock();
- nr_reads++;
+ URCU_TLS(nr_reads)++;
/* QS each 1024 reads */
- if (caa_unlikely((nr_reads & ((1 << 10) - 1)) == 0))
+ if (caa_unlikely((URCU_TLS(nr_reads) & ((1 << 10) - 1)) == 0))
rcu_quiescent_state();
if (caa_unlikely(!test_duration_read()))
break;
rcu_register_thread();
rcu_unregister_thread();
- *count = nr_reads;
+ *count = URCU_TLS(nr_reads);
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"reader", pthread_self(), (unsigned long)gettid());
return ((void*)1);
old->a = 0;
test_array_free(old);
rcu_copy_mutex_unlock();
- nr_writes++;
+ URCU_TLS(nr_writes)++;
if (caa_unlikely(!test_duration_write()))
break;
if (caa_unlikely(wdelay))
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"writer", pthread_self(), (unsigned long)gettid());
- *count = nr_writes;
+ *count = URCU_TLS(nr_writes);
return ((void*)2);
}
#include <errno.h>
#include <urcu/arch.h>
+#include <urcu/tls-compat.h>
#ifdef __linux__
#include <syscall.h>
return !test_stop;
}
-static unsigned long long __thread nr_writes;
-static unsigned long long __thread nr_reads;
+static DEFINE_URCU_TLS(unsigned long long, nr_writes);
+static DEFINE_URCU_TLS(unsigned long long, nr_reads);
static unsigned int nr_readers;
static unsigned int nr_writers;
if (caa_unlikely(rduration))
loop_sleep(rduration);
_rcu_read_unlock();
- nr_reads++;
+ URCU_TLS(nr_reads)++;
/* QS each 1024 reads */
- if (caa_unlikely((nr_reads & ((1 << 10) - 1)) == 0))
+ if (caa_unlikely((URCU_TLS(nr_reads) & ((1 << 10) - 1)) == 0))
_rcu_quiescent_state();
if (caa_unlikely(!test_duration_read()))
break;
rcu_unregister_thread();
- *count = nr_reads;
+ *count = URCU_TLS(nr_reads);
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"reader", pthread_self(), (unsigned long)gettid());
return ((void*)1);
if (caa_unlikely(wduration))
loop_sleep(wduration);
rcu_gc_reclaim(wtidx, old);
- nr_writes++;
+ URCU_TLS(nr_writes)++;
if (caa_unlikely(!test_duration_write()))
break;
if (caa_unlikely(wdelay))
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"writer", pthread_self(), (unsigned long)gettid());
- tot_nr_writes[wtidx] = nr_writes;
+ tot_nr_writes[wtidx] = URCU_TLS(nr_writes);
return ((void*)2);
}
#include <errno.h>
#include <urcu/arch.h>
+#include <urcu/tls-compat.h>
#ifdef __linux__
#include <syscall.h>
return !test_stop;
}
-static unsigned long long __thread nr_dequeues;
-static unsigned long long __thread nr_enqueues;
+static DEFINE_URCU_TLS(unsigned long long, nr_dequeues);
+static DEFINE_URCU_TLS(unsigned long long, nr_enqueues);
-static unsigned long long __thread nr_successful_dequeues;
-static unsigned long long __thread nr_successful_enqueues;
+static DEFINE_URCU_TLS(unsigned long long, nr_successful_dequeues);
+static DEFINE_URCU_TLS(unsigned long long, nr_successful_enqueues);
static unsigned int nr_enqueuers;
static unsigned int nr_dequeuers;
goto fail;
cds_wfq_node_init(node);
cds_wfq_enqueue(&q, node);
- nr_successful_enqueues++;
+ URCU_TLS(nr_successful_enqueues)++;
if (caa_unlikely(wdelay))
loop_sleep(wdelay);
fail:
- nr_enqueues++;
+ URCU_TLS(nr_enqueues)++;
if (caa_unlikely(!test_duration_enqueue()))
break;
}
- count[0] = nr_enqueues;
- count[1] = nr_successful_enqueues;
+ count[0] = URCU_TLS(nr_enqueues);
+ count[1] = URCU_TLS(nr_successful_enqueues);
printf_verbose("enqueuer thread_end, thread id : %lx, tid %lu, "
"enqueues %llu successful_enqueues %llu\n",
- pthread_self(), (unsigned long)gettid(), nr_enqueues,
- nr_successful_enqueues);
+ pthread_self(), (unsigned long)gettid(),
+ URCU_TLS(nr_enqueues), URCU_TLS(nr_successful_enqueues));
return ((void*)1);
}
if (node) {
free(node);
- nr_successful_dequeues++;
+ URCU_TLS(nr_successful_dequeues)++;
}
- nr_dequeues++;
+ URCU_TLS(nr_dequeues)++;
if (caa_unlikely(!test_duration_dequeue()))
break;
if (caa_unlikely(rduration))
printf_verbose("dequeuer thread_end, thread id : %lx, tid %lu, "
"dequeues %llu, successful_dequeues %llu\n",
- pthread_self(), (unsigned long)gettid(), nr_dequeues,
- nr_successful_dequeues);
- count[0] = nr_dequeues;
- count[1] = nr_successful_dequeues;
+ pthread_self(), (unsigned long)gettid(),
+ URCU_TLS(nr_dequeues), URCU_TLS(nr_successful_dequeues));
+ count[0] = URCU_TLS(nr_dequeues);
+ count[1] = URCU_TLS(nr_successful_dequeues);
return ((void*)2);
}
#include <errno.h>
#include <urcu/arch.h>
+#include <urcu/tls-compat.h>
#ifdef __linux__
#include <syscall.h>
return !test_stop;
}
-static unsigned long long __thread nr_dequeues;
-static unsigned long long __thread nr_enqueues;
+static DEFINE_URCU_TLS(unsigned long long, nr_dequeues);
+static DEFINE_URCU_TLS(unsigned long long, nr_enqueues);
-static unsigned long long __thread nr_successful_dequeues;
-static unsigned long long __thread nr_successful_enqueues;
+static DEFINE_URCU_TLS(unsigned long long, nr_successful_dequeues);
+static DEFINE_URCU_TLS(unsigned long long, nr_successful_enqueues);
static unsigned int nr_enqueuers;
static unsigned int nr_dequeuers;
goto fail;
cds_wfs_node_init(node);
cds_wfs_push(&s, node);
- nr_successful_enqueues++;
+ URCU_TLS(nr_successful_enqueues)++;
if (caa_unlikely(wdelay))
loop_sleep(wdelay);
fail:
- nr_enqueues++;
+ URCU_TLS(nr_enqueues)++;
if (caa_unlikely(!test_duration_enqueue()))
break;
}
- count[0] = nr_enqueues;
- count[1] = nr_successful_enqueues;
+ count[0] = URCU_TLS(nr_enqueues);
+ count[1] = URCU_TLS(nr_successful_enqueues);
printf_verbose("enqueuer thread_end, thread id : %lx, tid %lu, "
"enqueues %llu successful_enqueues %llu\n",
- pthread_self(), (unsigned long)gettid(), nr_enqueues,
- nr_successful_enqueues);
+ pthread_self(), (unsigned long)gettid(),
+ URCU_TLS(nr_enqueues), URCU_TLS(nr_successful_enqueues));
return ((void*)1);
}
if (node) {
free(node);
- nr_successful_dequeues++;
+ URCU_TLS(nr_successful_dequeues)++;
}
- nr_dequeues++;
+ URCU_TLS(nr_dequeues)++;
if (caa_unlikely(!test_duration_dequeue()))
break;
if (caa_unlikely(rduration))
printf_verbose("dequeuer thread_end, thread id : %lx, tid %lu, "
"dequeues %llu, successful_dequeues %llu\n",
- pthread_self(), (unsigned long)gettid(), nr_dequeues,
- nr_successful_dequeues);
- count[0] = nr_dequeues;
- count[1] = nr_successful_dequeues;
+ pthread_self(), (unsigned long)gettid(),
+ URCU_TLS(nr_dequeues), URCU_TLS(nr_successful_dequeues));
+ count[0] = URCU_TLS(nr_dequeues);
+ count[1] = URCU_TLS(nr_successful_dequeues);
return ((void*)2);
}
#include "urcu/map/urcu-bp.h"
#include "urcu/static/urcu-bp.h"
#include "urcu-pointer.h"
+#include "urcu/tls-compat.h"
/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
#undef _LGPL_SOURCE
#ifdef DEBUG_YIELD
unsigned int yield_active;
-unsigned int __thread rand_yield;
+DEFINE_URCU_TLS(unsigned int, rand_yield);
#endif
/*
* Pointer to registry elements. Written to only by each individual reader. Read
* by both the reader and the writers.
*/
-struct rcu_reader __thread *rcu_reader;
+DEFINE_URCU_TLS(struct rcu_reader *, rcu_reader);
static CDS_LIST_HEAD(registry);
rcu_reader_reg->tid = pthread_self();
assert(rcu_reader_reg->ctr == 0);
cds_list_add(&rcu_reader_reg->node, ®istry);
- rcu_reader = rcu_reader_reg;
+ URCU_TLS(rcu_reader) = rcu_reader_reg;
}
/* Called with signals off and mutex locked */
/*
* Check if a signal concurrently registered our thread since
* the check in rcu_read_lock(). */
- if (rcu_reader)
+ if (URCU_TLS(rcu_reader))
goto end;
mutex_lock(&rcu_gp_lock);
#include "urcu-pointer.h"
#include "urcu/list.h"
#include "urcu/futex.h"
+#include "urcu/tls-compat.h"
/* Data structure that identifies a call_rcu thread. */
/* Link a thread using call_rcu() to its call_rcu thread. */
-static __thread struct call_rcu_data *thread_call_rcu_data;
+static DEFINE_URCU_TLS(struct call_rcu_data *, thread_call_rcu_data);
/* Guard call_rcu thread creation. */
*/
rcu_register_thread();
- thread_call_rcu_data = crdp;
+ URCU_TLS(thread_call_rcu_data) = crdp;
if (!rt) {
uatomic_dec(&crdp->futex);
/* Decrement futex before reading call_rcu list */
{
struct call_rcu_data *crd;
- if (thread_call_rcu_data != NULL)
- return thread_call_rcu_data;
+ if (URCU_TLS(thread_call_rcu_data) != NULL)
+ return URCU_TLS(thread_call_rcu_data);
if (maxcpus > 0) {
crd = get_cpu_call_rcu_data(sched_getcpu());
struct call_rcu_data *get_thread_call_rcu_data(void)
{
- return thread_call_rcu_data;
+ return URCU_TLS(thread_call_rcu_data);
}
/*
void set_thread_call_rcu_data(struct call_rcu_data *crdp)
{
- thread_call_rcu_data = crdp;
+ URCU_TLS(thread_call_rcu_data) = crdp;
}
/*
maxcpus_reset();
free(per_cpu_call_rcu_data);
rcu_set_pointer(&per_cpu_call_rcu_data, NULL);
- thread_call_rcu_data = NULL;
+ URCU_TLS(thread_call_rcu_data) = NULL;
/* Dispose of all of the rest of the call_rcu_data structures. */
cds_list_for_each_entry_safe(crdp, next, &call_rcu_data_list, list) {
#include <urcu/uatomic.h>
#include <urcu/list.h>
#include <urcu/system.h>
+#include <urcu/tls-compat.h>
/*
* Number of entries in the per-thread defer queue. Must be power of 2.
* Written to only by each individual deferer. Read by both the deferer and
* the reclamation tread.
*/
-static struct defer_queue __thread defer_queue;
+static DEFINE_URCU_TLS(struct defer_queue, defer_queue);
static CDS_LIST_HEAD(registry_defer);
static pthread_t tid_defer;
{
unsigned long head, num_items;
- head = defer_queue.head;
- num_items = head - defer_queue.tail;
+ head = URCU_TLS(defer_queue).head;
+ num_items = head - URCU_TLS(defer_queue).tail;
if (caa_unlikely(!num_items))
return;
synchronize_rcu();
- rcu_defer_barrier_queue(&defer_queue, head);
+ rcu_defer_barrier_queue(&URCU_TLS(defer_queue), head);
}
void rcu_defer_barrier_thread(void)
* Head is only modified by ourself. Tail can be modified by reclamation
* thread.
*/
- head = defer_queue.head;
- tail = CMM_LOAD_SHARED(defer_queue.tail);
+ head = URCU_TLS(defer_queue).head;
+ tail = CMM_LOAD_SHARED(URCU_TLS(defer_queue).tail);
/*
* If queue is full, or reached threshold. Empty queue ourself.
if (caa_unlikely(head - tail >= DEFER_QUEUE_SIZE - 2)) {
assert(head - tail <= DEFER_QUEUE_SIZE);
rcu_defer_barrier_thread();
- assert(head - CMM_LOAD_SHARED(defer_queue.tail) == 0);
+ assert(head - CMM_LOAD_SHARED(URCU_TLS(defer_queue).tail) == 0);
}
/*
* Decode: see the comments before 'struct defer_queue'
* or the code in rcu_defer_barrier_queue().
*/
- if (caa_unlikely(defer_queue.last_fct_in != fct
+ if (caa_unlikely(URCU_TLS(defer_queue).last_fct_in != fct
|| DQ_IS_FCT_BIT(p)
|| p == DQ_FCT_MARK)) {
- defer_queue.last_fct_in = fct;
+ URCU_TLS(defer_queue).last_fct_in = fct;
if (caa_unlikely(DQ_IS_FCT_BIT(fct) || fct == DQ_FCT_MARK)) {
- _CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
+ _CMM_STORE_SHARED(URCU_TLS(defer_queue).q[head++ & DEFER_QUEUE_MASK],
DQ_FCT_MARK);
- _CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
+ _CMM_STORE_SHARED(URCU_TLS(defer_queue).q[head++ & DEFER_QUEUE_MASK],
fct);
} else {
DQ_SET_FCT_BIT(fct);
- _CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
+ _CMM_STORE_SHARED(URCU_TLS(defer_queue).q[head++ & DEFER_QUEUE_MASK],
fct);
}
}
- _CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], p);
+ _CMM_STORE_SHARED(URCU_TLS(defer_queue).q[head++ & DEFER_QUEUE_MASK], p);
cmm_smp_wmb(); /* Publish new pointer before head */
/* Write q[] before head. */
- CMM_STORE_SHARED(defer_queue.head, head);
+ CMM_STORE_SHARED(URCU_TLS(defer_queue).head, head);
cmm_smp_mb(); /* Write queue head before read futex */
/*
* Wake-up any waiting defer thread.
{
int was_empty;
- assert(defer_queue.last_head == 0);
- assert(defer_queue.q == NULL);
- defer_queue.q = malloc(sizeof(void *) * DEFER_QUEUE_SIZE);
- if (!defer_queue.q)
+ assert(URCU_TLS(defer_queue).last_head == 0);
+ assert(URCU_TLS(defer_queue).q == NULL);
+ URCU_TLS(defer_queue).q = malloc(sizeof(void *) * DEFER_QUEUE_SIZE);
+ if (!URCU_TLS(defer_queue).q)
return -ENOMEM;
mutex_lock_defer(&defer_thread_mutex);
mutex_lock_defer(&rcu_defer_mutex);
was_empty = cds_list_empty(®istry_defer);
- cds_list_add(&defer_queue.list, ®istry_defer);
+ cds_list_add(&URCU_TLS(defer_queue).list, ®istry_defer);
mutex_unlock(&rcu_defer_mutex);
if (was_empty)
mutex_lock_defer(&defer_thread_mutex);
mutex_lock_defer(&rcu_defer_mutex);
- cds_list_del(&defer_queue.list);
+ cds_list_del(&URCU_TLS(defer_queue).list);
_rcu_defer_barrier_thread();
- free(defer_queue.q);
- defer_queue.q = NULL;
+ free(URCU_TLS(defer_queue).q);
+ URCU_TLS(defer_queue).q = NULL;
is_empty = cds_list_empty(®istry_defer);
mutex_unlock(&rcu_defer_mutex);
#define BUILD_QSBR_LIB
#include "urcu/static/urcu-qsbr.h"
#include "urcu-pointer.h"
+#include "urcu/tls-compat.h"
/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
#undef _LGPL_SOURCE
* Written to only by each individual reader. Read by both the reader and the
* writers.
*/
-struct rcu_reader __thread rcu_reader;
+DEFINE_URCU_TLS(struct rcu_reader, rcu_reader);
#ifdef DEBUG_YIELD
unsigned int yield_active;
-unsigned int __thread rand_yield;
+DEFINE_URCU_TLS(unsigned int, rand_yield);
#endif
static CDS_LIST_HEAD(registry);
* quiescent state. Failure to do so could result in the writer
* waiting forever while new readers are always accessing data
* (no progress). Enforce compiler-order of store to rcu_gp_ctr
- * before load rcu_reader ctr.
+ * before load URCU_TLS(rcu_reader).ctr.
*/
cmm_barrier();
{
unsigned long was_online;
- was_online = rcu_reader.ctr;
+ was_online = URCU_TLS(rcu_reader).ctr;
/* All threads should read qparity before accessing data structure
* where new ptr points to. In the "then" case, rcu_thread_offline
* committing next rcu_gp_ctr update to memory. Failure to
* do so could result in the writer waiting forever while new
* readers are always accessing data (no progress). Enforce
- * compiler-order of load rcu_reader ctr before store to
+ * compiler-order of load URCU_TLS(rcu_reader).ctr before store to
* rcu_gp_ctr.
*/
cmm_barrier();
{
unsigned long was_online;
- was_online = rcu_reader.ctr;
+ was_online = URCU_TLS(rcu_reader).ctr;
/*
* Mark the writer thread offline to make sure we don't wait for
void rcu_register_thread(void)
{
- rcu_reader.tid = pthread_self();
- assert(rcu_reader.ctr == 0);
+ URCU_TLS(rcu_reader).tid = pthread_self();
+ assert(URCU_TLS(rcu_reader).ctr == 0);
mutex_lock(&rcu_gp_lock);
- cds_list_add(&rcu_reader.node, ®istry);
+ cds_list_add(&URCU_TLS(rcu_reader).node, ®istry);
mutex_unlock(&rcu_gp_lock);
_rcu_thread_online();
}
*/
_rcu_thread_offline();
mutex_lock(&rcu_gp_lock);
- cds_list_del(&rcu_reader.node);
+ cds_list_del(&URCU_TLS(rcu_reader).node);
mutex_unlock(&rcu_gp_lock);
}
#include "urcu/map/urcu.h"
#include "urcu/static/urcu.h"
#include "urcu-pointer.h"
+#include "urcu/tls-compat.h"
/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
#undef _LGPL_SOURCE
* Written to only by each individual reader. Read by both the reader and the
* writers.
*/
-struct rcu_reader __thread rcu_reader;
+DEFINE_URCU_TLS(struct rcu_reader, rcu_reader);
#ifdef DEBUG_YIELD
unsigned int yield_active;
-unsigned int __thread rand_yield;
+DEFINE_URCU_TLS(unsigned int, rand_yield);
#endif
static CDS_LIST_HEAD(registry);
perror("Error in pthread mutex lock");
exit(-1);
}
- if (CMM_LOAD_SHARED(rcu_reader.need_mb)) {
+ if (CMM_LOAD_SHARED(URCU_TLS(rcu_reader).need_mb)) {
cmm_smp_mb();
- _CMM_STORE_SHARED(rcu_reader.need_mb, 0);
+ _CMM_STORE_SHARED(URCU_TLS(rcu_reader).need_mb, 0);
cmm_smp_mb();
}
poll(NULL,0,10);
cmm_smp_mb();
/*
- * Wait for each thread rcu_reader.ctr count to become 0.
+ * Wait for each thread URCU_TLS(rcu_reader).ctr count to become 0.
*/
for (;;) {
wait_loops++;
#else /* #ifndef HAS_INCOHERENT_CACHES */
/*
* BUSY-LOOP. Force the reader thread to commit its
- * rcu_reader.ctr update to memory if we wait for too long.
+ * URCU_TLS(rcu_reader).ctr update to memory if we wait
+ * for too long.
*/
if (cds_list_empty(®istry)) {
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
* committing next rcu_gp_ctr update to memory. Failure to do so could
* result in the writer waiting forever while new readers are always
* accessing data (no progress). Enforce compiler-order of load
- * rcu_reader ctr before store to rcu_gp_ctr.
+ * URCU_TLS(rcu_reader).ctr before store to rcu_gp_ctr.
*/
cmm_barrier();
void rcu_register_thread(void)
{
- rcu_reader.tid = pthread_self();
- assert(rcu_reader.need_mb == 0);
- assert(!(rcu_reader.ctr & RCU_GP_CTR_NEST_MASK));
+ URCU_TLS(rcu_reader).tid = pthread_self();
+ assert(URCU_TLS(rcu_reader).need_mb == 0);
+ assert(!(URCU_TLS(rcu_reader).ctr & RCU_GP_CTR_NEST_MASK));
mutex_lock(&rcu_gp_lock);
rcu_init(); /* In case gcc does not support constructor attribute */
- cds_list_add(&rcu_reader.node, ®istry);
+ cds_list_add(&URCU_TLS(rcu_reader).node, ®istry);
mutex_unlock(&rcu_gp_lock);
}
void rcu_unregister_thread(void)
{
mutex_lock(&rcu_gp_lock);
- cds_list_del(&rcu_reader.node);
+ cds_list_del(&URCU_TLS(rcu_reader).node);
mutex_unlock(&rcu_gp_lock);
}
* executed on.
*/
cmm_smp_mb();
- _CMM_STORE_SHARED(rcu_reader.need_mb, 0);
+ _CMM_STORE_SHARED(URCU_TLS(rcu_reader).need_mb, 0);
cmm_smp_mb();
}
#include <urcu/system.h>
#include <urcu/uatomic.h>
#include <urcu/list.h>
+#include <urcu/tls-compat.h>
/*
* This code section can only be included in LGPL 2.1 compatible source code.
#define MAX_SLEEP 50
extern unsigned int yield_active;
-extern unsigned int __thread rand_yield;
+extern DECLARE_URCU_TLS(unsigned int, rand_yield);
static inline void debug_yield_read(void)
{
if (yield_active & YIELD_READ)
- if (rand_r(&rand_yield) & 0x1)
- usleep(rand_r(&rand_yield) % MAX_SLEEP);
+ if (rand_r(&URCU_TLS(rand_yield)) & 0x1)
+ usleep(rand_r(&URCU_TLS(rand_yield)) % MAX_SLEEP);
}
static inline void debug_yield_write(void)
{
if (yield_active & YIELD_WRITE)
- if (rand_r(&rand_yield) & 0x1)
- usleep(rand_r(&rand_yield) % MAX_SLEEP);
+ if (rand_r(&URCU_TLS(rand_yield)) & 0x1)
+ usleep(rand_r(&URCU_TLS(rand_yield)) % MAX_SLEEP);
}
static inline void debug_yield_init(void)
{
- rand_yield = time(NULL) ^ pthread_self();
+ URCU_TLS(rand_yield) = time(NULL) ^ pthread_self();
}
#else
static inline void debug_yield_read(void)
* Adds a pointer dereference on the read-side, but won't require to unregister
* the reader thread.
*/
-extern struct rcu_reader __thread *rcu_reader;
+extern DECLARE_URCU_TLS(struct rcu_reader *, rcu_reader);
static inline int rcu_old_gp_ongoing(long *value)
{
long tmp;
/* Check if registered */
- if (caa_unlikely(!rcu_reader))
+ if (caa_unlikely(!URCU_TLS(rcu_reader)))
rcu_bp_register();
cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
- tmp = rcu_reader->ctr;
+ tmp = URCU_TLS(rcu_reader)->ctr;
/*
* rcu_gp_ctr is
* RCU_GP_COUNT | (~RCU_GP_CTR_PHASE or RCU_GP_CTR_PHASE)
*/
if (caa_likely(!(tmp & RCU_GP_CTR_NEST_MASK))) {
- _CMM_STORE_SHARED(rcu_reader->ctr, _CMM_LOAD_SHARED(rcu_gp_ctr));
+ _CMM_STORE_SHARED(URCU_TLS(rcu_reader)->ctr, _CMM_LOAD_SHARED(rcu_gp_ctr));
/*
* Set active readers count for outermost nesting level before
* accessing the pointer.
*/
cmm_smp_mb();
} else {
- _CMM_STORE_SHARED(rcu_reader->ctr, tmp + RCU_GP_COUNT);
+ _CMM_STORE_SHARED(URCU_TLS(rcu_reader)->ctr, tmp + RCU_GP_COUNT);
}
}
* Finish using rcu before decrementing the pointer.
*/
cmm_smp_mb();
- _CMM_STORE_SHARED(rcu_reader->ctr, rcu_reader->ctr - RCU_GP_COUNT);
+ _CMM_STORE_SHARED(URCU_TLS(rcu_reader)->ctr, URCU_TLS(rcu_reader)->ctr - RCU_GP_COUNT);
cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
}
#include <urcu/uatomic.h>
#include <urcu/list.h>
#include <urcu/futex.h>
+#include <urcu/tls-compat.h>
#ifdef __cplusplus
extern "C" {
#define MAX_SLEEP 50
extern unsigned int yield_active;
-extern unsigned int __thread rand_yield;
+extern DECLARE_URCU_TLS(unsigned int, rand_yield);
static inline void debug_yield_read(void)
{
if (yield_active & YIELD_READ)
- if (rand_r(&rand_yield) & 0x1)
- usleep(rand_r(&rand_yield) % MAX_SLEEP);
+ if (rand_r(&URCU_TLS(rand_yield)) & 0x1)
+ usleep(rand_r(&URCU_TLS(rand_yield)) % MAX_SLEEP);
}
static inline void debug_yield_write(void)
{
if (yield_active & YIELD_WRITE)
- if (rand_r(&rand_yield) & 0x1)
- usleep(rand_r(&rand_yield) % MAX_SLEEP);
+ if (rand_r(&URCU_TLS(rand_yield)) & 0x1)
+ usleep(rand_r(&URCU_TLS(rand_yield)) % MAX_SLEEP);
}
static inline void debug_yield_init(void)
{
- rand_yield = time(NULL) ^ pthread_self();
+ URCU_TLS(rand_yield) = time(NULL) ^ pthread_self();
}
#else
static inline void debug_yield_read(void)
pthread_t tid;
};
-extern struct rcu_reader __thread rcu_reader;
+extern DECLARE_URCU_TLS(struct rcu_reader, rcu_reader);
extern int32_t gp_futex;
*/
static inline void wake_up_gp(void)
{
- if (caa_unlikely(_CMM_LOAD_SHARED(rcu_reader.waiting))) {
- _CMM_STORE_SHARED(rcu_reader.waiting, 0);
+ if (caa_unlikely(_CMM_LOAD_SHARED(URCU_TLS(rcu_reader).waiting))) {
+ _CMM_STORE_SHARED(URCU_TLS(rcu_reader).waiting, 0);
cmm_smp_mb();
if (uatomic_read(&gp_futex) != -1)
return;
static inline void _rcu_read_lock(void)
{
- rcu_assert(rcu_reader.ctr);
+ rcu_assert(URCU_TLS(rcu_reader).ctr);
}
static inline void _rcu_read_unlock(void)
static inline void _rcu_quiescent_state(void)
{
cmm_smp_mb();
- _CMM_STORE_SHARED(rcu_reader.ctr, _CMM_LOAD_SHARED(rcu_gp_ctr));
- cmm_smp_mb(); /* write rcu_reader.ctr before read futex */
+ _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, _CMM_LOAD_SHARED(rcu_gp_ctr));
+ cmm_smp_mb(); /* write URCU_TLS(rcu_reader).ctr before read futex */
wake_up_gp();
cmm_smp_mb();
}
static inline void _rcu_thread_offline(void)
{
cmm_smp_mb();
- CMM_STORE_SHARED(rcu_reader.ctr, 0);
- cmm_smp_mb(); /* write rcu_reader.ctr before read futex */
+ CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, 0);
+ cmm_smp_mb(); /* write URCU_TLS(rcu_reader).ctr before read futex */
wake_up_gp();
cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
}
static inline void _rcu_thread_online(void)
{
cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
- _CMM_STORE_SHARED(rcu_reader.ctr, CMM_LOAD_SHARED(rcu_gp_ctr));
+ _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, CMM_LOAD_SHARED(rcu_gp_ctr));
cmm_smp_mb();
}
#include <urcu/uatomic.h>
#include <urcu/list.h>
#include <urcu/futex.h>
+#include <urcu/tls-compat.h>
#ifdef __cplusplus
extern "C" {
#endif
extern unsigned int yield_active;
-extern unsigned int __thread rand_yield;
+extern DECLARE_URCU_TLS(unsigned int, rand_yield);
static inline void debug_yield_read(void)
{
if (yield_active & YIELD_READ)
- if (rand_r(&rand_yield) & 0x1)
- usleep(rand_r(&rand_yield) % MAX_SLEEP);
+ if (rand_r(&URCU_TLS(rand_yield)) & 0x1)
+ usleep(rand_r(&URCU_TLS(rand_yield)) % MAX_SLEEP);
}
static inline void debug_yield_write(void)
{
if (yield_active & YIELD_WRITE)
- if (rand_r(&rand_yield) & 0x1)
- usleep(rand_r(&rand_yield) % MAX_SLEEP);
+ if (rand_r(&URCU_TLS(rand_yield)) & 0x1)
+ usleep(rand_r(&URCU_TLS(rand_yield)) % MAX_SLEEP);
}
static inline void debug_yield_init(void)
{
- rand_yield = time(NULL) ^ (unsigned long) pthread_self();
+ URCU_TLS(rand_yield) = time(NULL) ^ (unsigned long) pthread_self();
}
#else
static inline void debug_yield_read(void)
pthread_t tid;
};
-extern struct rcu_reader __thread rcu_reader;
+extern DECLARE_URCU_TLS(struct rcu_reader, rcu_reader);
extern int32_t gp_futex;
unsigned long tmp;
cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
- tmp = rcu_reader.ctr;
+ tmp = URCU_TLS(rcu_reader).ctr;
/*
* rcu_gp_ctr is
* RCU_GP_COUNT | (~RCU_GP_CTR_PHASE or RCU_GP_CTR_PHASE)
*/
if (caa_likely(!(tmp & RCU_GP_CTR_NEST_MASK))) {
- _CMM_STORE_SHARED(rcu_reader.ctr, _CMM_LOAD_SHARED(rcu_gp_ctr));
+ _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, _CMM_LOAD_SHARED(rcu_gp_ctr));
/*
* Set active readers count for outermost nesting level before
* accessing the pointer. See smp_mb_master().
*/
smp_mb_slave(RCU_MB_GROUP);
} else {
- _CMM_STORE_SHARED(rcu_reader.ctr, tmp + RCU_GP_COUNT);
+ _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, tmp + RCU_GP_COUNT);
}
}
{
unsigned long tmp;
- tmp = rcu_reader.ctr;
+ tmp = URCU_TLS(rcu_reader).ctr;
/*
* Finish using rcu before decrementing the pointer.
* See smp_mb_master().
*/
if (caa_likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) {
smp_mb_slave(RCU_MB_GROUP);
- _CMM_STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT);
- /* write rcu_reader.ctr before read futex */
+ _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, URCU_TLS(rcu_reader).ctr - RCU_GP_COUNT);
+ /* write URCU_TLS(rcu_reader).ctr before read futex */
smp_mb_slave(RCU_MB_GROUP);
wake_up_gp();
} else {
- _CMM_STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT);
+ _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, URCU_TLS(rcu_reader).ctr - RCU_GP_COUNT);
}
cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
}