#include <assert.h>
#include <sys/syscall.h>
#include <sched.h>
-#include "../urcu-ht.h"
+#include <urcu-ht.h>
+#include <urcu-defer.h>
+#include <errno.h>
#include "../arch.h"
+#define HASH_SIZE 32
+#define RAND_POOL 1000
+
/* Make this big enough to include the POWER5+ L3 cacheline size of 256B */
#define CACHE_LINE_SIZE 4096
#endif
#include "../urcu.h"
+static unsigned int __thread rand_lookup;
+static unsigned long __thread nr_add;
+static unsigned long __thread nr_addexist;
+static unsigned long __thread nr_del;
+static unsigned long __thread nr_delnoent;
+static unsigned long __thread lookup_fail;
+static unsigned long __thread lookup_ok;
+
static struct rcu_ht *test_ht;
-struct test_array {
+struct test_data {
int a;
+ int b;
};
static volatile int test_go, test_stop;
static unsigned long wdelay;
-static struct test_array *test_rcu_pointer;
-
static unsigned long duration;
/* read-side C.S. duration, in loops */
}
}
-/*
- * malloc/free are reusing memory areas too quickly, which does not let us
- * test races appropriately. Use a large circular array for allocations.
- * ARRAY_SIZE is larger than nr_writers, which insures we never run over our tail.
- */
-#define ARRAY_SIZE (1048576 * nr_writers)
#define ARRAY_POISON 0xDEADBEEF
-static int array_index;
-static struct test_array *test_array;
-
-static struct test_array *test_array_alloc(void)
-{
- struct test_array *ret;
- int index;
-
- rcu_copy_mutex_lock();
- index = array_index % ARRAY_SIZE;
- assert(test_array[index].a == ARRAY_POISON ||
- test_array[index].a == 0);
- ret = &test_array[index];
- array_index++;
- if (array_index == ARRAY_SIZE)
- array_index = 0;
- rcu_copy_mutex_unlock();
- return ret;
-}
-
-static void test_array_free(struct test_array *ptr)
-{
- if (!ptr)
- return;
- rcu_copy_mutex_lock();
- ptr->a = ARRAY_POISON;
- rcu_copy_mutex_unlock();
-}
void *thr_reader(void *_count)
{
unsigned long long *count = _count;
- struct test_array *local_ptr;
+ struct test_data *local_ptr;
printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n",
"reader", pthread_self(), (unsigned long)gettid());
for (;;) {
rcu_read_lock();
- local_ptr = rcu_dereference(test_rcu_pointer);
+ local_ptr = ht_lookup(test_ht,
+ (void *)(unsigned long)(rand_r(&rand_lookup) % RAND_POOL));
+ if (local_ptr == NULL)
+ lookup_fail++;
+ else
+ lookup_ok++;
debug_yield_read();
- if (local_ptr)
- assert(local_ptr->a == 8);
if (unlikely(rduration))
loop_sleep(rduration);
rcu_read_unlock();
*count = nr_reads;
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"reader", pthread_self(), (unsigned long)gettid());
+ printf_verbose("readid : %lx, lookupfail %lu, lookupok %lu\n",
+ pthread_self(), lookup_fail, lookup_ok);
return ((void*)1);
}
void *thr_writer(void *_count)
{
unsigned long long *count = _count;
- struct test_array *new, *old;
+ struct test_data *data;
+ int ret;
printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n",
"writer", pthread_self(), (unsigned long)gettid());
set_affinity();
+ rcu_register_thread();
+ rcu_defer_register_thread();
+
while (!test_go)
{
}
smp_mb();
for (;;) {
- new = test_array_alloc();
- new->a = 8;
- old = rcu_publish_content(&test_rcu_pointer, new);
- if (old)
- old->a = 0;
- test_array_free(old);
+ if (rand_r(&rand_lookup) & 1) {
+ data = malloc(sizeof(struct test_data));
+ ret = ht_add(test_ht,
+ (void *)(unsigned long)(rand_r(&rand_lookup) % RAND_POOL),
+ data);
+ if (ret == -EEXIST) {
+ free(data);
+ nr_addexist++;
+ } else {
+ nr_add++;
+ }
+ } else {
+ /* May delete */
+ ret = ht_delete(test_ht,
+ (void *)(unsigned long)(rand_r(&rand_lookup) % RAND_POOL));
+ if (ret == -ENOENT)
+ nr_delnoent++;
+ else
+ nr_del++;
+ }
nr_writes++;
if (unlikely(!test_duration_write()))
break;
loop_sleep(wdelay);
}
+ rcu_defer_unregister_thread();
+ rcu_unregister_thread();
+
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
"writer", pthread_self(), (unsigned long)gettid());
+ printf_verbose("info id %lx: nr_add %lu, nr_addexist %lu, nr_del %lu, "
+ "nr_delnoent %lu\n", pthread_self(), nr_add,
+ nr_addexist, nr_del, nr_delnoent);
*count = nr_writes;
return ((void*)2);
}
void *tret;
unsigned long long *count_reader, *count_writer;
unsigned long long tot_reads = 0, tot_writes = 0;
- int i, a;
+ int i, a, ret;
if (argc < 4) {
show_usage(argc, argv);
printf_verbose("thread %-6s, thread id : %lx, tid %lu\n",
"main", pthread_self(), (unsigned long)gettid());
- test_array = malloc(sizeof(*test_array) * ARRAY_SIZE);
tid_reader = malloc(sizeof(*tid_reader) * nr_readers);
tid_writer = malloc(sizeof(*tid_writer) * nr_writers);
count_reader = malloc(sizeof(*count_reader) * nr_readers);
count_writer = malloc(sizeof(*count_writer) * nr_writers);
- test_ht = ht_new(stupid_hash, free);
+ test_ht = ht_new(ht_jhash, free, HASH_SIZE, sizeof(unsigned long),
+ 43223455);
next_aff = 0;
for (i = 0; i < nr_readers; i++) {
exit(1);
tot_writes += count_writer[i];
}
- ht_destroy(test_ht);
+ rcu_register_thread();
+ rcu_defer_register_thread();
+ ret = ht_destroy(test_ht);
+ rcu_defer_unregister_thread();
+ rcu_unregister_thread();
+ printf_verbose("final delete: %d items\n", ret);
printf_verbose("total number of reads : %llu, writes %llu\n", tot_reads,
tot_writes);
printf("SUMMARY %-25s testdur %4lu nr_readers %3u rdur %6lu "
argv[0], duration, nr_readers, rduration,
nr_writers, wdelay, tot_reads, tot_writes,
tot_reads + tot_writes);
- test_array_free(test_rcu_pointer);
- free(test_array);
free(tid_reader);
free(tid_writer);
free(count_reader);
+/*
+ * TODO: keys are currently assumed <= sizeof(void *). Key target never freed.
+ */
+
#define _LGPL_SOURCE
#include <stdlib.h>
#include <urcu.h>
#include <errno.h>
#include <urcu-ht.h>
#include <urcu/jhash.h>
+#include <stdio.h>
struct rcu_ht_node;
struct rcu_ht_node **tbl;
ht_hash_fct hash_fct;
void (*free_fct)(void *data); /* fct to free data */
+ uint32_t keylen;
+ uint32_t hashseed;
struct ht_size {
unsigned long add;
unsigned long lookup;
};
struct rcu_ht *ht_new(ht_hash_fct hash_fct, void (*free_fct)(void *data),
- unsigned long init_size)
+ unsigned long init_size, uint32_t keylen,
+ uint32_t hashseed)
{
struct rcu_ht *ht;
ht->free_fct = free_fct;
ht->size.add = init_size;
ht->size.lookup = init_size;
+ ht->keylen = keylen;
+ ht->hashseed = hashseed;
ht->tbl = calloc(init_size, sizeof(struct rcu_ht_node *));
return ht;
}
struct rcu_ht_node *node;
void *ret;
- hash = ht->hash_fct(key) % ht->size.lookup;
+ hash = ht->hash_fct(key, ht->keylen, ht->hashseed) % ht->size.lookup;
rcu_read_lock();
- node = rcu_dereference(*ht->tbl[hash]);
+ node = rcu_dereference(ht->tbl[hash]);
for (;;) {
if (likely(!node)) {
ret = NULL;
new_head = calloc(1, sizeof(struct rcu_ht_node));
new_head->key = key;
new_head->data = data;
- hash = ht->hash_fct(key) % ht->size.add;
-
/* here comes the fun and tricky part.
* Add at the beginning with a cmpxchg.
* Hold a read lock between the moment the first element is read
retry:
rcu_read_lock();
- old_head = node = rcu_dereference(*ht->tbl[hash]);
+ hash = ht->hash_fct(key, ht->keylen, ht->hashseed) % ht->size.add;
+
+ old_head = node = rcu_dereference(ht->tbl[hash]);
for (;;) {
if (likely(!node)) {
break;
}
node = rcu_dereference(node->next);
}
- if (rcu_cmpxchg_pointer(ht->tbl[hash], old_head, new_head) != old_head)
+ new_head->next = old_head;
+ if (rcu_cmpxchg_pointer(&ht->tbl[hash], old_head, new_head) != old_head)
goto restart;
end:
rcu_read_unlock();
unsigned long hash;
void *data;
- hash = ht->hash_fct(key) % ht->size.lookup;
-
retry:
rcu_read_lock();
- prev = ht->tbl[hash];
+ hash = ht->hash_fct(key, ht->keylen, ht->hashseed) % ht->size.lookup;
+
+ prev = &ht->tbl[hash];
node = rcu_dereference(*prev);
for (;;) {
if (likely(!node)) {
data = (void *)(unsigned long)-ENOENT;
- goto end;
+ goto error;
}
if (node->key == key) {
break;
/* Found it ! pointer to object is in "prev" */
if (rcu_cmpxchg_pointer(prev, node, node->next) != node)
goto restart;
-end:
+
+ /* From that point, we own node. We can free it outside of read lock */
rcu_read_unlock();
data = node->data;
call_rcu(free, node);
+ return data;
+error:
+ rcu_read_unlock();
return data;
/* restart loop, release and re-take the read lock to be kind to GP */
void *data;
data = ht_steal(ht, key);
- if (data) {
- if (ht->free_fct && data)
+ if (data && data != (void *)(unsigned long)-ENOENT) {
+ if (ht->free_fct)
call_rcu(ht->free_fct, data);
return 0;
} else {
}
/* Delete all old elements. Allow concurrent writer accesses. */
-void ht_delete_all(struct rcu_ht *ht)
+int ht_delete_all(struct rcu_ht *ht)
{
unsigned long i;
struct rcu_ht_node **prev, *node, *inext;
+ int cnt = 0;
for (i = 0; i < ht->size.lookup; i++) {
rcu_read_lock();
-cut_head:
- prev = ht->tbl[i];
- if (prev) {
- /*
- * Cut the head. After that, we own the first element.
- */
- node = rcu_xchg_pointer(prev, NULL);
- } else {
+ prev = &ht->tbl[i];
+ /*
+ * Cut the head. After that, we own the first element.
+ */
+ node = rcu_xchg_pointer(prev, NULL);
+ if (!node) {
rcu_read_unlock();
continue;
}
* iteratively.
*/
for (;;) {
+ inext = NULL;
prev = &node->next;
- inext = rcu_xchg_pointer(prev, NULL);
+ if (prev)
+ inext = rcu_xchg_pointer(prev, NULL);
/*
* "node" is the first element of the list we have cut.
* We therefore own it, no concurrent writer may delete
if (node->data)
call_rcu(ht->free_fct, node->data);
call_rcu(free, node);
+ cnt++;
if (likely(!inext))
break;
rcu_read_lock();
node = inext;
}
}
+ return cnt;
}
/*
* Should only be called when no more concurrent readers nor writers can
* possibly access the table.
*/
-void ht_destroy(struct rcu_ht *ht)
+int ht_destroy(struct rcu_ht *ht)
{
- ht_delete_all(ht);
+ int ret;
+
+ ret = ht_delete_all(ht);
free(ht->tbl);
free(ht);
+ return ret;
}
+/*
+ * Expects keys <= than pointer size to be encoded in the pointer itself.
+ */
uint32_t ht_jhash(void *key, uint32_t length, uint32_t initval)
{
- return jhash(key, length, initval);
+ uint32_t ret;
+ void *vkey;
+
+ if (length <= sizeof(u32))
+ vkey = &key;
+ else
+ vkey = key;
+ ret = jhash(vkey, length, initval);
+ return ret;
}