From: Mathieu Desnoyers Date: Wed, 30 Sep 2009 22:43:40 +0000 (-0400) Subject: Update urcu-ht X-Git-Tag: v0.7.0~43^2~238 X-Git-Url: https://git.lttng.org./?a=commitdiff_plain;h=44395fb70714eea662bb1b76a82f704ffd874222;p=userspace-rcu.git Update urcu-ht Signed-off-by: Mathieu Desnoyers --- diff --git a/tests/test_ht.c b/tests/test_ht.c index 2636145..cdc78d5 100644 --- a/tests/test_ht.c +++ b/tests/test_ht.c @@ -249,6 +249,7 @@ void *thr_writer(void *_count) for (;;) { if (rand_r(&rand_lookup) & 1) { data = malloc(sizeof(struct test_data)); + //rcu_copy_mutex_lock(); ret = ht_add(test_ht, (void *)(unsigned long)(rand_r(&rand_lookup) % RAND_POOL), data); @@ -258,14 +259,25 @@ void *thr_writer(void *_count) } else { nr_add++; } + //rcu_copy_mutex_unlock(); } else { /* May delete */ + //rcu_copy_mutex_lock(); ret = ht_delete(test_ht, (void *)(unsigned long)(rand_r(&rand_lookup) % RAND_POOL)); if (ret == -ENOENT) nr_delnoent++; else nr_del++; + //rcu_copy_mutex_unlock(); + } + //if (nr_writes % 100000 == 0) { + if (nr_writes % 1000 == 0) { + if (rand_r(&rand_lookup) & 1) { + ht_resize(test_ht, 1); + } else { + ht_resize(test_ht, -1); + } } nr_writes++; if (unlikely(!test_duration_write())) diff --git a/urcu-ht.c b/urcu-ht.c index 3b9012d..b6a1301 100644 --- a/urcu-ht.c +++ b/urcu-ht.c @@ -19,6 +19,12 @@ #include #include +/* + * Maximum number of hash table buckets: 256M on 64-bit. + * Should take about 512MB max if we assume 1 node per 4 buckets. + */ +#define MAX_HT_BUCKETS ((256 << 10) / sizeof(void *)) + /* node flags */ #define NODE_STOLEN (1 << 0) @@ -51,12 +57,12 @@ struct rcu_ht *ht_new(ht_hash_fct hash_fct, void (*free_fct)(void *data), ht = calloc(1, sizeof(struct rcu_ht)); ht->hash_fct = hash_fct; ht->free_fct = free_fct; - ht->size = init_size; + ht->size = init_size; /* shared */ ht->keylen = keylen; ht->hashseed = hashseed; /* this mutex should not nest in read-side C.S. */ pthread_mutex_init(&ht->resize_mutex, NULL); - ht->resize_ongoing = 0; + ht->resize_ongoing = 0; /* shared */ ht->tbl = calloc(init_size, sizeof(struct rcu_ht_node *)); return ht; } @@ -114,7 +120,7 @@ int ht_add(struct rcu_ht *ht, void *key, void *data) retry: rcu_read_lock(); - if (unlikely(ht->resize_ongoing)) { + if (unlikely(LOAD_SHARED(ht->resize_ongoing))) { rcu_read_unlock(); /* * Wait for resize to complete before continuing. @@ -126,7 +132,8 @@ retry: goto retry; } - hash = ht->hash_fct(key, ht->keylen, ht->hashseed) % ht->size; + hash = ht->hash_fct(key, ht->keylen, ht->hashseed) + % LOAD_SHARED(ht->size); old_head = node = rcu_dereference(ht->tbl[hash]); for (;;) { @@ -174,7 +181,7 @@ void *ht_steal(struct rcu_ht *ht, void *key) retry: rcu_read_lock(); - if (unlikely(ht->resize_ongoing)) { + if (unlikely(LOAD_SHARED(ht->resize_ongoing))) { rcu_read_unlock(); /* * Wait for resize to complete before continuing. @@ -186,7 +193,8 @@ retry: goto retry; } - hash = ht->hash_fct(key, ht->keylen, ht->hashseed) % ht->size; + hash = ht->hash_fct(key, ht->keylen, ht->hashseed) + % LOAD_SHARED(ht->size); prev = &ht->tbl[hash]; node = rcu_dereference(*prev); @@ -343,9 +351,10 @@ static void ht_resize_grow(struct rcu_ht *ht) old_size = ht->size; - if (old_size == 1) + if (old_size == MAX_HT_BUCKETS) return; + old_tbl = ht->tbl; new_size = old_size << 1; new_tbl = calloc(new_size, sizeof(struct rcu_ht_node *)); @@ -356,23 +365,22 @@ static void ht_resize_grow(struct rcu_ht *ht) * if it's in the table. * Copy each node. (just the node, not ->data) */ - node = ht->tbl[i]; + node = old_tbl[i]; while (node) { hash = ht->hash_fct(node->key, ht->keylen, ht->hashseed) % new_size; new_node = malloc(sizeof(struct rcu_ht_node)); new_node->key = node->key; new_node->data = node->data; - new_node->next = new_tbl[i]; /* add to head */ - new_tbl[i] = new_node; + new_node->next = new_tbl[hash]; /* add to head */ + new_tbl[hash] = new_node; node = node->next; } } - old_tbl = ht->tbl; ht->tbl = new_tbl; smp_wmb(); /* write links and table before changing size */ - ht->size = new_size; + STORE_SHARED(ht->size, new_size); /* Ensure all concurrent lookups use new size and table */ synchronize_rcu(); @@ -400,17 +408,17 @@ static void ht_resize_shrink(struct rcu_ht *ht) new_size = ht->size >> 1; for (i = 0; i < new_size; i++) { - /* Link end with first entry of 2*i */ + /* Link end with first entry of i + new_size */ prev = &ht->tbl[i]; node = *prev; while (node) { prev = &node->next; node = *prev; } - *prev = ht->tbl[i << 1]; + *prev = ht->tbl[i + new_size]; } smp_wmb(); /* write links before changing size */ - ht->size = new_size; + STORE_SHARED(ht->size, new_size); /* Ensure all concurrent lookups use new size */ synchronize_rcu(); @@ -429,7 +437,7 @@ void ht_resize(struct rcu_ht *ht, int growth) ret = pthread_mutex_lock(&ht->resize_mutex); assert(!ret); - ht->resize_ongoing = 1; + STORE_SHARED(ht->resize_ongoing, 1); synchronize_rcu(); /* All add/remove are waiting on the mutex. */ if (growth > 0) @@ -437,7 +445,7 @@ void ht_resize(struct rcu_ht *ht, int growth) else if (growth < 0) ht_resize_shrink(ht); smp_mb(); - ht->resize_ongoing = 0; + STORE_SHARED(ht->resize_ongoing, 0); ret = pthread_mutex_unlock(&ht->resize_mutex); assert(!ret); }