From: Mathieu Desnoyers Date: Sun, 29 Apr 2012 20:19:09 +0000 (-0400) Subject: rculfhash tests: add uniqueness test X-Git-Tag: v0.7.0~24 X-Git-Url: https://git.lttng.org./?a=commitdiff_plain;h=20adf7802bcec449566b9cbb900504fd409f62db;p=urcu.git rculfhash tests: add uniqueness test Signed-off-by: Mathieu Desnoyers --- diff --git a/tests/Makefile.am b/tests/Makefile.am index e9d2046..0efc034 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -176,7 +176,7 @@ test_urcu_wfs_dynlink_CFLAGS = -DDYNAMIC_LINK_TEST $(AM_CFLAGS) test_urcu_wfs_dynlink_LDADD = $(URCU_COMMON_LIB) test_urcu_hash_SOURCES = test_urcu_hash.c test_urcu_hash.h \ - test_urcu_hash_rw.c $(COMPAT) + test_urcu_hash_rw.c test_urcu_hash_unique.c $(COMPAT) test_urcu_hash_CFLAGS = -DRCU_QSBR $(AM_CFLAGS) test_urcu_hash_LDADD = $(URCU_QSBR_LIB) $(URCU_CDS_LIB) diff --git a/tests/test_urcu_hash.c b/tests/test_urcu_hash.c index be632ee..3c7cc35 100644 --- a/tests/test_urcu_hash.c +++ b/tests/test_urcu_hash.c @@ -25,6 +25,7 @@ enum test_hash { TEST_HASH_RW, + TEST_HASH_UNIQUE, }; struct test_hash_cb { @@ -44,6 +45,14 @@ struct test_hash_cb test_hash_cb[] = { test_hash_rw_thr_writer, test_hash_rw_populate_hash, }, + [TEST_HASH_UNIQUE] = { + test_hash_unique_sigusr1_handler, + test_hash_unique_sigusr2_handler, + test_hash_unique_thr_reader, + test_hash_unique_thr_writer, + test_hash_unique_populate_hash, + }, + }; static enum test_hash test_choice = TEST_HASH_RW; @@ -280,6 +289,7 @@ printf(" [not -u nor -s] Add entries (supports redundant keys).\n"); printf(" [-N size] Write pool size.\n"); printf(" [-O size] Init pool size.\n"); printf(" [-V] Validate lookups of init values (use with filled init pool, same lookup range, with different write range).\n"); + printf(" [-U] Uniqueness test.\n"); printf("\n\n"); } @@ -443,7 +453,9 @@ int main(int argc, char **argv) case 'V': validate_lookup = 1; break; - + case 'U': + test_choice = TEST_HASH_UNIQUE; + break; } } diff --git a/tests/test_urcu_hash.h b/tests/test_urcu_hash.h index 9abb2e0..a935e94 100644 --- a/tests/test_urcu_hash.h +++ b/tests/test_urcu_hash.h @@ -382,4 +382,11 @@ void *test_hash_rw_thr_reader(void *_count); void *test_hash_rw_thr_writer(void *_count); int test_hash_rw_populate_hash(void); +/* unique test */ +void test_hash_unique_sigusr1_handler(int signo); +void test_hash_unique_sigusr2_handler(int signo); +void *test_hash_unique_thr_reader(void *_count); +void *test_hash_unique_thr_writer(void *_count); +int test_hash_unique_populate_hash(void); + #endif /* _TEST_URCU_HASH_H */ diff --git a/tests/test_urcu_hash_unique.c b/tests/test_urcu_hash_unique.c new file mode 100644 index 0000000..c934feb --- /dev/null +++ b/tests/test_urcu_hash_unique.c @@ -0,0 +1,269 @@ +/* + * test_urcu_hash_unique.c + * + * Userspace RCU library - test program + * + * Copyright 2009-2012 - Mathieu Desnoyers + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#define _GNU_SOURCE +#include "test_urcu_hash.h" + +enum urcu_hash_addremove { + AR_RANDOM = 0, + AR_ADD = 1, + AR_REMOVE = -1, +}; /* 1: add, -1 remove, 0: random */ + +static enum urcu_hash_addremove addremove; /* 1: add, -1 remove, 0: random */ + +void test_hash_unique_sigusr1_handler(int signo) +{ + switch (addremove) { + case AR_ADD: + printf("Add/Remove: random.\n"); + addremove = AR_RANDOM; + break; + case AR_RANDOM: + printf("Add/Remove: remove only.\n"); + addremove = AR_REMOVE; + break; + case AR_REMOVE: + printf("Add/Remove: add only.\n"); + addremove = AR_ADD; + break; + } +} + +void test_hash_unique_sigusr2_handler(int signo) +{ + char msg[1] = { 0x42 }; + ssize_t ret; + + do { + ret = write(count_pipe[1], msg, 1); /* wakeup thread */ + } while (ret == -1L && errno == EINTR); +} + +void *test_hash_unique_thr_reader(void *_count) +{ + unsigned long long *count = _count; + + printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n", + "reader", pthread_self(), (unsigned long)gettid()); + + set_affinity(); + + rcu_register_thread(); + + while (!test_go) + { + } + cmm_smp_mb(); + + for (;;) { + struct lfht_test_node *node; + struct cds_lfht_iter iter; + /* + * iterate on whole table, ensuring that no duplicate is + * found. + */ + rcu_read_lock(); + cds_lfht_for_each_entry(test_ht, &iter, node, node) { + struct cds_lfht_iter dup_iter; + + dup_iter = iter; + cds_lfht_next_duplicate(test_ht, test_match, + node->key, &dup_iter); + if (dup_iter.node != NULL) { + printf("[ERROR] Duplicate key %p found\n", node->key); + } + } + rcu_read_unlock(); + + debug_yield_read(); + if (caa_unlikely(rduration)) + loop_sleep(rduration); + nr_reads++; + if (caa_unlikely(!test_duration_read())) + break; + if (caa_unlikely((nr_reads & ((1 << 10) - 1)) == 0)) + rcu_quiescent_state(); + } + + rcu_unregister_thread(); + + *count = nr_reads; + printf_verbose("thread_end %s, thread id : %lx, tid %lu\n", + "reader", pthread_self(), (unsigned long)gettid()); + printf_verbose("readid : %lx, lookupfail %lu, lookupok %lu\n", + pthread_self(), lookup_fail, lookup_ok); + return ((void*)1); + +} + +void *test_hash_unique_thr_writer(void *_count) +{ + struct lfht_test_node *node; + struct cds_lfht_node *ret_node; + struct cds_lfht_iter iter; + struct wr_count *count = _count; + int ret; + int loc_add_unique; + + printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n", + "writer", pthread_self(), (unsigned long)gettid()); + + set_affinity(); + + rcu_register_thread(); + + while (!test_go) + { + } + cmm_smp_mb(); + + for (;;) { + /* + * add unique/add replace with new node key from range. + */ + if (1 || (addremove == AR_ADD || add_only) + || (addremove == AR_RANDOM && rand_r(&rand_lookup) & 1)) { + node = malloc(sizeof(struct lfht_test_node)); + lfht_test_node_init(node, + (void *)(((unsigned long) rand_r(&rand_lookup) % write_pool_size) + write_pool_offset), + sizeof(void *)); + rcu_read_lock(); + loc_add_unique = rand_r(&rand_lookup) & 1; + if (loc_add_unique) { + ret_node = cds_lfht_add_unique(test_ht, + test_hash(node->key, node->key_len, TEST_HASH_SEED), + test_match, node->key, &node->node); + } else { + ret_node = cds_lfht_add_replace(test_ht, + test_hash(node->key, node->key_len, TEST_HASH_SEED), + test_match, node->key, &node->node); +#if 0 //generate an error on purpose + cds_lfht_add(test_ht, + test_hash(node->key, node->key_len, TEST_HASH_SEED), + &node->node); + ret_node = NULL; +#endif //0 + } + rcu_read_unlock(); + if (loc_add_unique) { + if (ret_node != &node->node) { + free(node); + nr_addexist++; + } else { + nr_add++; + } + } else { + if (ret_node) { + call_rcu(&to_test_node(ret_node)->head, + free_node_cb); + nr_addexist++; + } else { + nr_add++; + } + } + } else { + /* May delete */ + rcu_read_lock(); + cds_lfht_test_lookup(test_ht, + (void *)(((unsigned long) rand_r(&rand_lookup) % write_pool_size) + write_pool_offset), + sizeof(void *), &iter); + ret = cds_lfht_del(test_ht, cds_lfht_iter_get_node(&iter)); + rcu_read_unlock(); + if (ret == 0) { + node = cds_lfht_iter_get_test_node(&iter); + call_rcu(&node->head, free_node_cb); + nr_del++; + } else + nr_delnoent++; + } +#if 0 + //if (nr_writes % 100000 == 0) { + if (nr_writes % 1000 == 0) { + rcu_read_lock(); + if (rand_r(&rand_lookup) & 1) { + ht_resize(test_ht, 1); + } else { + ht_resize(test_ht, -1); + } + rcu_read_unlock(); + } +#endif //0 + nr_writes++; + if (caa_unlikely(!test_duration_write())) + break; + if (caa_unlikely(wdelay)) + loop_sleep(wdelay); + if (caa_unlikely((nr_writes & ((1 << 10) - 1)) == 0)) + rcu_quiescent_state(); + } + + rcu_unregister_thread(); + + printf_verbose("thread_end %s, thread id : %lx, tid %lu\n", + "writer", pthread_self(), (unsigned long)gettid()); + printf_verbose("info id %lx: nr_add %lu, nr_addexist %lu, nr_del %lu, " + "nr_delnoent %lu\n", pthread_self(), nr_add, + nr_addexist, nr_del, nr_delnoent); + count->update_ops = nr_writes; + count->add = nr_add; + count->add_exist = nr_addexist; + count->remove = nr_del; + return ((void*)2); +} + +int test_hash_unique_populate_hash(void) +{ + struct lfht_test_node *node; + struct cds_lfht_node *ret_node; + + printf("Starting uniqueness test.\n"); + + if (!init_populate) + return 0; + + if (init_populate * 10 > init_pool_size) { + printf("WARNING: required to populate %lu nodes (-k), but random " +"pool is quite small (%lu values) and we are in add_unique (-u) or add_replace (-s) mode. Try with a " +"larger random pool (-p option). This may take a while...\n", init_populate, init_pool_size); + } + + while (nr_add < init_populate) { + node = malloc(sizeof(struct lfht_test_node)); + lfht_test_node_init(node, + (void *)(((unsigned long) rand_r(&rand_lookup) % init_pool_size) + init_pool_offset), + sizeof(void *)); + rcu_read_lock(); + ret_node = cds_lfht_add_replace(test_ht, + test_hash(node->key, node->key_len, TEST_HASH_SEED), + test_match, node->key, &node->node); + rcu_read_unlock(); + if (ret_node) { + call_rcu(&to_test_node(ret_node)->head, free_node_cb); + nr_addexist++; + } else { + nr_add++; + } + nr_writes++; + } + return 0; +}