mutex in its dependency chain) should not be acquired from within a RCU
read-side critical section.
+ This is especially important to understand in the context of the
+ QSBR flavor: a registered reader thread being "online" by
+ default should be considered as within a RCU read-side critical
+ section unless explicitly put "offline". Therefore, if
+ synchronize_rcu() is called with a mutex held, this mutex, as
+ well as any mutex which has this mutex in its dependency chain
+ should only be taken when the RCU reader thread is "offline"
+ (this can be performed by calling rcu_thread_offline()).
+
Usage of DEBUG_RCU
DEBUG_RCU is used to add internal debugging self-checks to the
#define CHAIN_LEN_RESIZE_THRESHOLD 3
/*
- * Define the minimum table size. Protects against hash table resize overload
- * when too many entries are added quickly before the resize can complete.
- * This is especially the case if the table could be shrinked to a size of 1.
- * TODO: we might want to make the add/remove operations help the resize to
- * add or remove dummy nodes when a resize is ongoing to ensure upper-bound on
- * chain length.
+ * Define the minimum table size.
*/
-#define MIN_TABLE_SIZE 128
+#define MIN_TABLE_SIZE 1
#if (CAA_BITS_PER_LONG == 32)
#define MAX_TABLE_ORDER 32
#define DUMMY_FLAG (1UL << 1)
#define FLAGS_MASK ((1UL << 2) - 1)
+/* Value of the end pointer. Should not interact with flags. */
+#define END_VALUE NULL
+
struct ht_items_count {
unsigned long add, remove;
} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
struct cds_lfht *ht;
};
+static
+struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht,
+ unsigned long size,
+ struct cds_lfht_node *node,
+ int unique, int dummy);
+
/*
* Algorithm to reverse bits in a word by lookup table, extended to
* 64-bit words.
{
return (struct cds_lfht_node *) (((unsigned long) node) | DUMMY_FLAG);
}
-
+
+static
+struct cds_lfht_node *get_end(void)
+{
+ return (struct cds_lfht_node *) END_VALUE;
+}
+
+static
+int is_end(struct cds_lfht_node *node)
+{
+ return clear_flag(node) == (struct cds_lfht_node *) END_VALUE;
+}
+
static
unsigned long _uatomic_max(unsigned long *ptr, unsigned long v)
{
*/
assert(dummy != node);
for (;;) {
- if (unlikely(!clear_flag(iter)))
+ if (unlikely(is_end(iter)))
return;
if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash))
return;
new_next = clear_flag(next);
(void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
}
+ return;
}
static
assert(!is_removed(node));
if (!size) {
assert(dummy);
- node->p.next = flag_dummy(NULL);
+ node->p.next = flag_dummy(get_end());
return node; /* Initial first add (head) */
}
hash = bit_reverse_ulong(node->p.reverse_hash);
iter = rcu_dereference(iter_prev->p.next);
assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
for (;;) {
- if (unlikely(!clear_flag(iter)))
+ if (unlikely(is_end(iter)))
goto insert;
if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash))
goto insert;
new_next = flag_dummy(clear_flag(next));
else
new_next = clear_flag(next);
+ assert(new_next != NULL);
(void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
/* retry */
}
assert(is_dummy(next));
else
assert(!is_dummy(next));
+ assert(next != NULL);
old = uatomic_cmpxchg(&node->p.next, next,
flag_removed(next));
} while (old != next);
init_table_link(ht, i, len);
/*
- * Update table size (after init for now, because no
- * concurrent updater help (TODO)).
+ * Update table size.
*/
cmm_smp_wmb(); /* populate data before RCU size */
CMM_STORE_SHARED(ht->t.size, !i ? 1 : (1UL << i));
+
dbg_printf("init new size: %lu\n", !i ? 1 : (1UL << i));
if (CMM_LOAD_SHARED(ht->in_progress_destroy))
break;
struct cds_lfht_node *cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key_len)
{
- struct cds_lfht_node *node, *next;
+ struct cds_lfht_node *node, *next, *dummy_node;
struct _cds_lfht_node *lookup;
unsigned long hash, reverse_hash, index, order, size;
lookup = &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1))) - 1)];
dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n",
hash, index, order, index & (!order ? 0 : ((1UL << (order - 1)) - 1)));
- node = (struct cds_lfht_node *) lookup;
+ dummy_node = (struct cds_lfht_node *) lookup;
+ /* We can always skip the dummy node initially */
+ node = rcu_dereference(dummy_node->p.next);
+ node = clear_flag(node);
for (;;) {
- if (unlikely(!node))
+ if (unlikely(is_end(node))) {
+ node = NULL;
break;
+ }
if (unlikely(node->p.reverse_hash > reverse_hash)) {
node = NULL;
break;
node = clear_flag(next);
for (;;) {
- if (unlikely(!node))
+ if (unlikely(is_end(node))) {
+ node = NULL;
break;
+ }
if (unlikely(node->p.reverse_hash > reverse_hash)) {
node = NULL;
break;
size = rcu_dereference(ht->t.size);
ret = _cds_lfht_add(ht, size, node, 1, 0);
- if (ret != node)
+ if (ret == node)
ht_count_add(ht, size);
return ret;
}
if (!is_dummy(node))
return -EPERM;
assert(!is_removed(node));
- } while (clear_flag(node));
+ } while (!is_end(node));
/*
* size accessed without rcu_dereference because hash table is
* being destroyed.
else
(nr_dummy)++;
node = clear_flag(next);
- } while (node);
+ } while (!is_end(node));
dbg_printf("number of dummy nodes: %lu\n", nr_dummy);
}
static unsigned long init_hash_size = DEFAULT_HASH_SIZE;
static unsigned long init_populate;
-static unsigned long rand_pool = DEFAULT_RAND_POOL;
static int opt_auto_resize;
static int add_only, add_unique;
+static unsigned long init_pool_offset, lookup_pool_offset, write_pool_offset;
+static unsigned long init_pool_size = DEFAULT_RAND_POOL,
+ lookup_pool_size = DEFAULT_RAND_POOL,
+ write_pool_size = DEFAULT_RAND_POOL;
+static int validate_lookup;
+
static inline void loop_sleep(unsigned long l)
{
while(l-- != 0)
for (;;) {
rcu_read_lock();
node = cds_lfht_lookup(test_ht,
- (void *)(unsigned long)(rand_r(&rand_lookup) % rand_pool),
+ (void *)(((unsigned long) rand_r(&rand_lookup) % lookup_pool_size) + lookup_pool_offset),
sizeof(void *));
- if (node == NULL)
+ if (node == NULL) {
+ if (validate_lookup) {
+ printf("[ERROR] Lookup cannot find initial node.\n");
+ exit(-1);
+ }
lookup_fail++;
- else
+ } else {
lookup_ok++;
+ }
debug_yield_read();
if (unlikely(rduration))
loop_sleep(rduration);
node = malloc(sizeof(struct cds_lfht_node));
rcu_read_lock();
cds_lfht_node_init(node,
- (void *)(unsigned long)(rand_r(&rand_lookup) % rand_pool),
+ (void *)(((unsigned long) rand_r(&rand_lookup) % write_pool_size) + write_pool_offset),
sizeof(void *));
if (add_unique)
ret_node = cds_lfht_add_unique(test_ht, node);
/* May delete */
rcu_read_lock();
node = cds_lfht_lookup(test_ht,
- (void *)(unsigned long)(rand_r(&rand_lookup) % rand_pool),
+ (void *)(((unsigned long) rand_r(&rand_lookup) % write_pool_size) + write_pool_offset),
sizeof(void *));
if (node)
ret = cds_lfht_remove(test_ht, node);
if (!init_populate)
return 0;
- if (add_unique && init_populate * 10 > rand_pool) {
+ if (add_unique && init_populate * 10 > init_pool_size) {
printf("WARNING: required to populate %lu nodes (-k), but random "
"pool is quite small (%lu values) and we are in add_unique (-u) mode. Try with a "
-"larger random pool (-p option).\n", init_populate, rand_pool);
- return -1;
+"larger random pool (-p option). This may take a while...\n", init_populate, init_pool_size);
}
while (nr_add < init_populate) {
node = malloc(sizeof(struct cds_lfht_node));
cds_lfht_node_init(node,
- (void *)(unsigned long)(rand_r(&rand_lookup) % rand_pool),
+ (void *)(((unsigned long) rand_r(&rand_lookup) % init_pool_size) + init_pool_offset),
sizeof(void *));
if (add_unique)
ret_node = cds_lfht_add_unique(test_ht, node);
printf(" [-c duration] (reader C.S. duration (in loops))");
printf(" [-v] (verbose output)");
printf(" [-a cpu#] [-a cpu#]... (affinity)");
- printf(" [-p size] (random key value pool size)");
printf(" [-h size] (initial hash table size)");
printf(" [-u] Uniquify add.");
printf(" [-i] Add only (no removal).");
printf(" [-k nr_nodes] Number of nodes to insert initially.");
printf(" [-A] Automatically resize hash table.");
+ printf(" [-R offset] Lookup pool offset.");
+ printf(" [-S offset] Write pool offset.");
+ printf(" [-T offset] Init pool offset.");
+ printf(" [-M size] Lookup pool size.");
+ printf(" [-N size] Write pool size.");
+ printf(" [-O size] Init pool size.");
+ printf(" [-V] Validate lookups of init values (use with filled init pool, same lookup range, with different write range).");
printf("\n");
}
case 'v':
verbose_mode = 1;
break;
- case 'p':
- if (argc < i + 2) {
- show_usage(argc, argv);
- return -1;
- }
- rand_pool = atol(argv[++i]);
- break;
case 'h':
if (argc < i + 2) {
show_usage(argc, argv);
case 'A':
opt_auto_resize = 1;
break;
+ case 'R':
+ lookup_pool_offset = atol(argv[++i]);
+ break;
+ case 'S':
+ write_pool_offset = atol(argv[++i]);
+ break;
+ case 'T':
+ init_pool_offset = atol(argv[++i]);
+ break;
+ case 'M':
+ lookup_pool_size = atol(argv[++i]);
+ break;
+ case 'N':
+ write_pool_size = atol(argv[++i]);
+ break;
+ case 'O':
+ init_pool_size = atol(argv[++i]);
+ break;
+ case 'V':
+ validate_lookup = 1;
+ break;
+
}
}
duration, nr_readers, nr_writers);
printf_verbose("Writer delay : %lu loops.\n", wdelay);
printf_verbose("Reader duration : %lu loops.\n", rduration);
- printf_verbose("Random pool size : %lu.\n", rand_pool);
printf_verbose("Mode:%s%s.\n",
add_only ? " add only" : " add/remove",
add_unique ? " uniquify" : "");
printf_verbose("Initial hash table size: %lu buckets.\n", init_hash_size);
+ printf_verbose("Init pool size offset %lu size %lu.\n",
+ init_pool_offset, init_pool_size);
+ printf_verbose("Lookup pool size offset %lu size %lu.\n",
+ lookup_pool_offset, lookup_pool_size);
+ printf_verbose("Update pool size offset %lu size %lu.\n",
+ write_pool_offset, write_pool_size);
printf_verbose("thread %-6s, thread id : %lx, tid %lu\n",
"main", pthread_self(), (unsigned long)gettid());
tot_writes);
printf("SUMMARY %-25s testdur %4lu nr_readers %3u rdur %6lu "
"nr_writers %3u "
- "wdelay %6lu rand_pool %12llu nr_reads %12llu nr_writes %12llu nr_ops %12llu "
+ "wdelay %6lu nr_reads %12llu nr_writes %12llu nr_ops %12llu "
"nr_add %12llu nr_add_fail %12llu nr_remove %12llu nr_leaked %12lld\n",
argv[0], duration, nr_readers, rduration,
- nr_writers, wdelay, rand_pool, tot_reads, tot_writes,
+ nr_writers, wdelay, tot_reads, tot_writes,
tot_reads + tot_writes, tot_add, tot_add_exist, tot_remove,
(long long) tot_add + init_populate - tot_remove - count);
free_all_cpu_call_rcu_data();
int set_cpu_call_rcu_data(int cpu, struct call_rcu_data *crdp)
{
- int warned = 0;
+ static int warned = 0;
call_rcu_lock(&call_rcu_mutex);
if (cpu < 0 || maxcpus <= cpu) {
void rcu_exit(void)
{
- assert(cds_list_empty(®istry));
+ /*
+ * Assertion disabled because call_rcu threads are now rcu
+ * readers, and left running at exit.
+ * assert(cds_list_empty(®istry));
+ */
}
#include "urcu-call-rcu-impl.h"