for (;;) {
if (rand_r(&rand_lookup) & 1) {
data = malloc(sizeof(struct test_data));
+ //rcu_copy_mutex_lock();
ret = ht_add(test_ht,
(void *)(unsigned long)(rand_r(&rand_lookup) % RAND_POOL),
data);
} else {
nr_add++;
}
+ //rcu_copy_mutex_unlock();
} else {
/* May delete */
+ //rcu_copy_mutex_lock();
ret = ht_delete(test_ht,
(void *)(unsigned long)(rand_r(&rand_lookup) % RAND_POOL));
if (ret == -ENOENT)
nr_delnoent++;
else
nr_del++;
+ //rcu_copy_mutex_unlock();
+ }
+ //if (nr_writes % 100000 == 0) {
+ if (nr_writes % 1000 == 0) {
+ if (rand_r(&rand_lookup) & 1) {
+ ht_resize(test_ht, 1);
+ } else {
+ ht_resize(test_ht, -1);
+ }
}
nr_writes++;
if (unlikely(!test_duration_write()))
#include <pthread.h>
#include <urcu-ht.h>
+/*
+ * Maximum number of hash table buckets: 256M on 64-bit.
+ * Should take about 512MB max if we assume 1 node per 4 buckets.
+ */
+#define MAX_HT_BUCKETS ((256 << 10) / sizeof(void *))
+
/* node flags */
#define NODE_STOLEN (1 << 0)
ht = calloc(1, sizeof(struct rcu_ht));
ht->hash_fct = hash_fct;
ht->free_fct = free_fct;
- ht->size = init_size;
+ ht->size = init_size; /* shared */
ht->keylen = keylen;
ht->hashseed = hashseed;
/* this mutex should not nest in read-side C.S. */
pthread_mutex_init(&ht->resize_mutex, NULL);
- ht->resize_ongoing = 0;
+ ht->resize_ongoing = 0; /* shared */
ht->tbl = calloc(init_size, sizeof(struct rcu_ht_node *));
return ht;
}
retry:
rcu_read_lock();
- if (unlikely(ht->resize_ongoing)) {
+ if (unlikely(LOAD_SHARED(ht->resize_ongoing))) {
rcu_read_unlock();
/*
* Wait for resize to complete before continuing.
goto retry;
}
- hash = ht->hash_fct(key, ht->keylen, ht->hashseed) % ht->size;
+ hash = ht->hash_fct(key, ht->keylen, ht->hashseed)
+ % LOAD_SHARED(ht->size);
old_head = node = rcu_dereference(ht->tbl[hash]);
for (;;) {
retry:
rcu_read_lock();
- if (unlikely(ht->resize_ongoing)) {
+ if (unlikely(LOAD_SHARED(ht->resize_ongoing))) {
rcu_read_unlock();
/*
* Wait for resize to complete before continuing.
goto retry;
}
- hash = ht->hash_fct(key, ht->keylen, ht->hashseed) % ht->size;
+ hash = ht->hash_fct(key, ht->keylen, ht->hashseed)
+ % LOAD_SHARED(ht->size);
prev = &ht->tbl[hash];
node = rcu_dereference(*prev);
old_size = ht->size;
- if (old_size == 1)
+ if (old_size == MAX_HT_BUCKETS)
return;
+ old_tbl = ht->tbl;
new_size = old_size << 1;
new_tbl = calloc(new_size, sizeof(struct rcu_ht_node *));
* if it's in the table.
* Copy each node. (just the node, not ->data)
*/
- node = ht->tbl[i];
+ node = old_tbl[i];
while (node) {
hash = ht->hash_fct(node->key, ht->keylen, ht->hashseed)
% new_size;
new_node = malloc(sizeof(struct rcu_ht_node));
new_node->key = node->key;
new_node->data = node->data;
- new_node->next = new_tbl[i]; /* add to head */
- new_tbl[i] = new_node;
+ new_node->next = new_tbl[hash]; /* add to head */
+ new_tbl[hash] = new_node;
node = node->next;
}
}
- old_tbl = ht->tbl;
ht->tbl = new_tbl;
smp_wmb(); /* write links and table before changing size */
- ht->size = new_size;
+ STORE_SHARED(ht->size, new_size);
/* Ensure all concurrent lookups use new size and table */
synchronize_rcu();
new_size = ht->size >> 1;
for (i = 0; i < new_size; i++) {
- /* Link end with first entry of 2*i */
+ /* Link end with first entry of i + new_size */
prev = &ht->tbl[i];
node = *prev;
while (node) {
prev = &node->next;
node = *prev;
}
- *prev = ht->tbl[i << 1];
+ *prev = ht->tbl[i + new_size];
}
smp_wmb(); /* write links before changing size */
- ht->size = new_size;
+ STORE_SHARED(ht->size, new_size);
/* Ensure all concurrent lookups use new size */
synchronize_rcu();
ret = pthread_mutex_lock(&ht->resize_mutex);
assert(!ret);
- ht->resize_ongoing = 1;
+ STORE_SHARED(ht->resize_ongoing, 1);
synchronize_rcu();
/* All add/remove are waiting on the mutex. */
if (growth > 0)
else if (growth < 0)
ht_resize_shrink(ht);
smp_mb();
- ht->resize_ongoing = 0;
+ STORE_SHARED(ht->resize_ongoing, 0);
ret = pthread_mutex_unlock(&ht->resize_mutex);
assert(!ret);
}