X-Git-Url: http://git.lttng.org./?a=blobdiff_plain;f=tests%2Ftest_urcu_hash.c;h=c8d8a89b411c4679b1e042f329571e2497f96aeb;hb=fb6d173d7da9ab07d5bf8acf92a394ace66fce00;hp=8c6f0dac218a44f30b6bd16211fd0156e8fc3943;hpb=caf3653dbe9dec81dc37c8e81267f27d3163dd19;p=userspace-rcu.git diff --git a/tests/test_urcu_hash.c b/tests/test_urcu_hash.c index 8c6f0da..c8d8a89 100644 --- a/tests/test_urcu_hash.c +++ b/tests/test_urcu_hash.c @@ -21,6 +21,7 @@ */ #define _GNU_SOURCE +#include "../config.h" #include #include #include @@ -32,6 +33,7 @@ #include #include #include +#include #ifdef __linux__ #include @@ -41,6 +43,12 @@ #define DEFAULT_MIN_ALLOC_SIZE 1 #define DEFAULT_RAND_POOL 1000000 +/* + * Note: the hash seed should be a random value for hash tables + * targeting production environments to provide protection against + * denial of service attacks. We keep it a static value within this test + * program to compare identical benchmark runs. + */ #define TEST_HASH_SEED 0x42UL /* Make this big enough to include the POWER5+ L3 cacheline size of 256B */ @@ -181,6 +189,12 @@ static int use_affinity = 0; pthread_mutex_t affinity_mutex = PTHREAD_MUTEX_INITIALIZER; +#ifndef HAVE_CPU_SET_T +typedef unsigned long cpu_set_t; +# define CPU_ZERO(cpuset) do { *(cpuset) = 0; } while(0) +# define CPU_SET(cpu, cpuset) do { *(cpuset) |= (1UL << (cpu)); } while(0) +#endif + static void set_affinity(void) { cpu_set_t mask; @@ -190,6 +204,7 @@ static void set_affinity(void) if (!use_affinity) return; +#if HAVE_SCHED_SETAFFINITY ret = pthread_mutex_lock(&affinity_mutex); if (ret) { perror("Error in pthread mutex lock"); @@ -203,7 +218,12 @@ static void set_affinity(void) } CPU_ZERO(&mask); CPU_SET(cpu, &mask); - sched_setaffinity(0, sizeof(mask), &mask); +#if SCHED_SETAFFINITY_ARGS == 2 + sched_setaffinity(0, &mask); +#else + sched_setaffinity(0, sizeof(mask), &mask); +#endif +#endif /* HAVE_SCHED_SETAFFINITY */ } static enum { @@ -610,7 +630,7 @@ void *thr_writer(void *_count) cds_lfht_test_lookup(test_ht, (void *)(((unsigned long) rand_r(&rand_lookup) % write_pool_size) + write_pool_offset), sizeof(void *), &iter); - ret = cds_lfht_del(test_ht, &iter); + ret = cds_lfht_del(test_ht, cds_lfht_iter_get_node(&iter)); rcu_read_unlock(); if (ret == 0) { node = cds_lfht_iter_get_test_node(&iter); @@ -715,7 +735,7 @@ void test_delete_all_nodes(struct cds_lfht *ht) cds_lfht_for_each_entry(ht, &iter, node, node) { int ret; - ret = cds_lfht_del(test_ht, &iter); + ret = cds_lfht_del(test_ht, cds_lfht_iter_get_node(&iter)); assert(!ret); call_rcu(&node->head, free_node_cb); count++;