--- /dev/null
+/*
+ * test_ht.c
+ *
+ * Userspace RCU library - test program
+ *
+ * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <pthread.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <assert.h>
+#include <sys/syscall.h>
+#include <sched.h>
+#include "../urcu-ht.h"
+
+#include "../arch.h"
+
+/* Make this big enough to include the POWER5+ L3 cacheline size of 256B */
+#define CACHE_LINE_SIZE 4096
+
+/* hardcoded number of CPUs */
+#define NR_CPUS 16384
+
+#if defined(_syscall0)
+_syscall0(pid_t, gettid)
+#elif defined(__NR_gettid)
+static inline pid_t gettid(void)
+{
+ return syscall(__NR_gettid);
+}
+#else
+#warning "use pid as tid"
+static inline pid_t gettid(void)
+{
+ return getpid();
+}
+#endif
+
+#ifndef DYNAMIC_LINK_TEST
+#define _LGPL_SOURCE
+#else
+#define debug_yield_read()
+#endif
+#include "../urcu.h"
+
+static struct rcu_ht *test_ht;
+
+struct test_array {
+ int a;
+};
+
+static volatile int test_go, test_stop;
+
+static unsigned long wdelay;
+
+static struct test_array *test_rcu_pointer;
+
+static unsigned long duration;
+
+/* read-side C.S. duration, in loops */
+static unsigned long rduration;
+
+static inline void loop_sleep(unsigned long l)
+{
+ while(l-- != 0)
+ cpu_relax();
+}
+
+static int verbose_mode;
+
+#define printf_verbose(fmt, args...) \
+ do { \
+ if (verbose_mode) \
+ printf(fmt, args); \
+ } while (0)
+
+static unsigned int cpu_affinities[NR_CPUS];
+static unsigned int next_aff = 0;
+static int use_affinity = 0;
+
+pthread_mutex_t affinity_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+static void set_affinity(void)
+{
+ cpu_set_t mask;
+ int cpu;
+ int ret;
+
+ if (!use_affinity)
+ return;
+
+ ret = pthread_mutex_lock(&affinity_mutex);
+ if (ret) {
+ perror("Error in pthread mutex lock");
+ exit(-1);
+ }
+ cpu = cpu_affinities[next_aff++];
+ ret = pthread_mutex_unlock(&affinity_mutex);
+ if (ret) {
+ perror("Error in pthread mutex unlock");
+ exit(-1);
+ }
+ CPU_ZERO(&mask);
+ CPU_SET(cpu, &mask);
+ sched_setaffinity(0, sizeof(mask), &mask);
+}
+
+/*
+ * returns 0 if test should end.
+ */
+static int test_duration_write(void)
+{
+ return !test_stop;
+}
+
+static int test_duration_read(void)
+{
+ return !test_stop;
+}
+
+static unsigned long long __thread nr_writes;
+static unsigned long long __thread nr_reads;
+
+static unsigned int nr_readers;
+static unsigned int nr_writers;
+
+pthread_mutex_t rcu_copy_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+void rcu_copy_mutex_lock(void)
+{
+ int ret;
+ ret = pthread_mutex_lock(&rcu_copy_mutex);
+ if (ret) {
+ perror("Error in pthread mutex lock");
+ exit(-1);
+ }
+}
+
+void rcu_copy_mutex_unlock(void)
+{
+ int ret;
+
+ ret = pthread_mutex_unlock(&rcu_copy_mutex);
+ if (ret) {
+ perror("Error in pthread mutex unlock");
+ exit(-1);
+ }
+}
+
+/*
+ * malloc/free are reusing memory areas too quickly, which does not let us
+ * test races appropriately. Use a large circular array for allocations.
+ * ARRAY_SIZE is larger than nr_writers, which insures we never run over our tail.
+ */
+#define ARRAY_SIZE (1048576 * nr_writers)
+#define ARRAY_POISON 0xDEADBEEF
+static int array_index;
+static struct test_array *test_array;
+
+static struct test_array *test_array_alloc(void)
+{
+ struct test_array *ret;
+ int index;
+
+ rcu_copy_mutex_lock();
+ index = array_index % ARRAY_SIZE;
+ assert(test_array[index].a == ARRAY_POISON ||
+ test_array[index].a == 0);
+ ret = &test_array[index];
+ array_index++;
+ if (array_index == ARRAY_SIZE)
+ array_index = 0;
+ rcu_copy_mutex_unlock();
+ return ret;
+}
+
+static void test_array_free(struct test_array *ptr)
+{
+ if (!ptr)
+ return;
+ rcu_copy_mutex_lock();
+ ptr->a = ARRAY_POISON;
+ rcu_copy_mutex_unlock();
+}
+
+void *thr_reader(void *_count)
+{
+ unsigned long long *count = _count;
+ struct test_array *local_ptr;
+
+ printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n",
+ "reader", pthread_self(), (unsigned long)gettid());
+
+ set_affinity();
+
+ rcu_register_thread();
+
+ while (!test_go)
+ {
+ }
+ smp_mb();
+
+ for (;;) {
+ rcu_read_lock();
+ local_ptr = rcu_dereference(test_rcu_pointer);
+ debug_yield_read();
+ if (local_ptr)
+ assert(local_ptr->a == 8);
+ if (unlikely(rduration))
+ loop_sleep(rduration);
+ rcu_read_unlock();
+ nr_reads++;
+ if (unlikely(!test_duration_read()))
+ break;
+ }
+
+ rcu_unregister_thread();
+
+ *count = nr_reads;
+ printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
+ "reader", pthread_self(), (unsigned long)gettid());
+ return ((void*)1);
+
+}
+
+void *thr_writer(void *_count)
+{
+ unsigned long long *count = _count;
+ struct test_array *new, *old;
+
+ printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n",
+ "writer", pthread_self(), (unsigned long)gettid());
+
+ set_affinity();
+
+ while (!test_go)
+ {
+ }
+ smp_mb();
+
+ for (;;) {
+ new = test_array_alloc();
+ new->a = 8;
+ old = rcu_publish_content(&test_rcu_pointer, new);
+ if (old)
+ old->a = 0;
+ test_array_free(old);
+ nr_writes++;
+ if (unlikely(!test_duration_write()))
+ break;
+ if (unlikely(wdelay))
+ loop_sleep(wdelay);
+ }
+
+ printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
+ "writer", pthread_self(), (unsigned long)gettid());
+ *count = nr_writes;
+ return ((void*)2);
+}
+
+void show_usage(int argc, char **argv)
+{
+ printf("Usage : %s nr_readers nr_writers duration (s)", argv[0]);
+#ifdef DEBUG_YIELD
+ printf(" [-r] [-w] (yield reader and/or writer)");
+#endif
+ printf(" [-d delay] (writer period (us))");
+ printf(" [-c duration] (reader C.S. duration (in loops))");
+ printf(" [-v] (verbose output)");
+ printf(" [-a cpu#] [-a cpu#]... (affinity)");
+ printf("\n");
+}
+
+int main(int argc, char **argv)
+{
+ int err;
+ pthread_t *tid_reader, *tid_writer;
+ void *tret;
+ unsigned long long *count_reader, *count_writer;
+ unsigned long long tot_reads = 0, tot_writes = 0;
+ int i, a;
+
+ if (argc < 4) {
+ show_usage(argc, argv);
+ return -1;
+ }
+
+ err = sscanf(argv[1], "%u", &nr_readers);
+ if (err != 1) {
+ show_usage(argc, argv);
+ return -1;
+ }
+
+ err = sscanf(argv[2], "%u", &nr_writers);
+ if (err != 1) {
+ show_usage(argc, argv);
+ return -1;
+ }
+
+ err = sscanf(argv[3], "%lu", &duration);
+ if (err != 1) {
+ show_usage(argc, argv);
+ return -1;
+ }
+
+ for (i = 4; i < argc; i++) {
+ if (argv[i][0] != '-')
+ continue;
+ switch (argv[i][1]) {
+#ifdef DEBUG_YIELD
+ case 'r':
+ yield_active |= YIELD_READ;
+ break;
+ case 'w':
+ yield_active |= YIELD_WRITE;
+ break;
+#endif
+ case 'a':
+ if (argc < i + 2) {
+ show_usage(argc, argv);
+ return -1;
+ }
+ a = atoi(argv[++i]);
+ cpu_affinities[next_aff++] = a;
+ use_affinity = 1;
+ printf_verbose("Adding CPU %d affinity\n", a);
+ break;
+ case 'c':
+ if (argc < i + 2) {
+ show_usage(argc, argv);
+ return -1;
+ }
+ rduration = atol(argv[++i]);
+ break;
+ case 'd':
+ if (argc < i + 2) {
+ show_usage(argc, argv);
+ return -1;
+ }
+ wdelay = atol(argv[++i]);
+ break;
+ case 'v':
+ verbose_mode = 1;
+ break;
+ }
+ }
+
+ printf_verbose("running test for %lu seconds, %u readers, %u writers.\n",
+ duration, nr_readers, nr_writers);
+ printf_verbose("Writer delay : %lu loops.\n", wdelay);
+ printf_verbose("Reader duration : %lu loops.\n", rduration);
+ printf_verbose("thread %-6s, thread id : %lx, tid %lu\n",
+ "main", pthread_self(), (unsigned long)gettid());
+
+ test_array = malloc(sizeof(*test_array) * ARRAY_SIZE);
+ tid_reader = malloc(sizeof(*tid_reader) * nr_readers);
+ tid_writer = malloc(sizeof(*tid_writer) * nr_writers);
+ count_reader = malloc(sizeof(*count_reader) * nr_readers);
+ count_writer = malloc(sizeof(*count_writer) * nr_writers);
+ test_ht = ht_new(stupid_hash, free);
+ next_aff = 0;
+
+ for (i = 0; i < nr_readers; i++) {
+ err = pthread_create(&tid_reader[i], NULL, thr_reader,
+ &count_reader[i]);
+ if (err != 0)
+ exit(1);
+ }
+ for (i = 0; i < nr_writers; i++) {
+ err = pthread_create(&tid_writer[i], NULL, thr_writer,
+ &count_writer[i]);
+ if (err != 0)
+ exit(1);
+ }
+
+ smp_mb();
+
+ test_go = 1;
+
+ sleep(duration);
+
+ test_stop = 1;
+
+ for (i = 0; i < nr_readers; i++) {
+ err = pthread_join(tid_reader[i], &tret);
+ if (err != 0)
+ exit(1);
+ tot_reads += count_reader[i];
+ }
+ for (i = 0; i < nr_writers; i++) {
+ err = pthread_join(tid_writer[i], &tret);
+ if (err != 0)
+ exit(1);
+ tot_writes += count_writer[i];
+ }
+ ht_destroy(test_ht);
+
+ printf_verbose("total number of reads : %llu, writes %llu\n", tot_reads,
+ tot_writes);
+ printf("SUMMARY %-25s testdur %4lu nr_readers %3u rdur %6lu "
+ "nr_writers %3u "
+ "wdelay %6lu nr_reads %12llu nr_writes %12llu nr_ops %12llu\n",
+ argv[0], duration, nr_readers, rduration,
+ nr_writers, wdelay, tot_reads, tot_writes,
+ tot_reads + tot_writes);
+ test_array_free(test_rcu_pointer);
+ free(test_array);
+ free(tid_reader);
+ free(tid_writer);
+ free(count_reader);
+ free(count_writer);
+ return 0;
+}
+++ /dev/null
-
-#define _LGPL_SOURCE
-#include <stdlib.h>
-#include <urcu.h>
-#include <arch.h>
-#include <arch_atomic.h>
-#include <assert.h>
-#include <compiler.h>
-#include <urcu-defer.h>
-#include <errno.h>
-
-#define HASH_SIZE 4096
-
-typedef unsigned long (*ht_hash_fct)(void *key);
-
-struct rcu_ht_node;
-
-struct rcu_ht_node {
- struct rcu_ht_node *next;
- void *key;
- void *data;
-};
-
-struct rcu_ht {
- struct rcu_ht_node *tbl[HASH_SIZE];
- ht_hash_fct hash_fct;
- void (*free_fct)(void *data); /* fct to free data */
-};
-
-struct rcu_ht *ht_new(ht_hash_fct hash_fct, void (*free_fct)(void *data));
-
-void ht_delete_all(struct rcu_ht *ht);
-
-int ht_destroy(struct rcu_ht *ht);
-
-void *ht_lookup(struct rcu_ht *ht, void *key);
-
-int ht_add(struct rcu_ht *ht, void *key, void *data);
-
-int ht_delete(struct rcu_ht *ht, void *key);
-
-void *ht_steal(struct rcu_ht *ht, void *key);
-
-
-/* Implementation */
-
-static unsigned long stupid_hash(void *key)
-{
- return (unsigned long)key % HASH_SIZE;
-}
-
-struct rcu_ht *ht_new(ht_hash_fct hash_fct, void (*free_fct)(void *data))
-{
- struct rcu_ht *ht;
-
- ht = calloc(1, sizeof(struct rcu_ht));
- ht->hash_fct = hash_fct;
- ht->free_fct = free_fct;
-}
-
-/* delete all elements */
-void ht_delete_all(struct rcu_ht *ht)
-{
- unsigned long i;
- struct rcu_ht_node **prev, *node;
-
- for (i = 0; i < HASH_SIZE; i++) {
- rcu_read_lock();
-
- prev = &ht->tbl[i];
- node = rcu_dereference(*prev);
- /*
- * Cut the head, therefore whole bucket will be unused
- * after GP. (except for concurrent additions)
- */
- rcu_assign_pointer(prev, NULL);
- for (;;) {
- if (likely(!node)) {
- break;
- }
- prev = &node->next;
- if (node->data)
- call_rcu(ht->free_fct, node->data);
- call_rcu(free, node);
- node = rcu_dereference(*prev);
- }
- rcu_read_unlock();
- }
-}
-
-/*
- * Should only be called when no more concurrent readers nor writers can
- * possibly access the table.
- */
-int ht_destroy(struct rcu_ht *ht)
-{
- ht_delete_all(ht);
- free(ht);
-}
-
-void *ht_lookup(struct rcu_ht *ht, void *key)
-{
- unsigned long hash;
- struct rcu_ht_node *node;
- void *ret;
-
- hash = ht->hash_fct(key);
-
- rcu_read_lock();
- node = rcu_dereference(ht->tbl[hash]);
- for (;;) {
- if (likely(!node)) {
- ret = NULL;
- break;
- }
- if (node->key == key) {
- ret = node->data;
- break;
- }
- node = rcu_dereference(node->next);
- }
- rcu_read_unlock();
-}
-
-/*
- * Will re-try until either:
- * - The key is already there (-EEXIST)
- * - We successfully add the key at the head of a table bucket.
- */
-int ht_add(struct rcu_ht *ht, void *key, void *data)
-{
- struct rcu_ht_node *node, *old_head, *new_head;
- unsigned long hash;
- int ret = 0;
-
- new_head = calloc(1, sizeof(struct rcu_ht_node));
- new_head->key = key;
- new_head->data = data;
- hash = ht->hash_fct(key);
-
- /* here comes the fun and tricky part.
- * Add at the beginning with a cmpxchg.
- * Hold a read lock between the moment the first element is read
- * and the nodes traversal (to find duplicates). This ensures
- * the head pointer has not been reclaimed when cmpxchg is done.
- * Always adding at the head ensures that we would have to
- * re-try if a new item has been added concurrently. So we ensure that
- * we never add duplicates. */
-retry:
- rcu_read_lock();
-
- old_head = node = rcu_dereference(ht->tbl[hash]);
- for (;;) {
- if (likely(!node)) {
- break;
- }
- if (node->key == key) {
- ret = -EEXIST;
- goto end;
- }
- node = rcu_dereference(node->next);
- }
- if (rcu_cmpxchg_pointer(&ht->tbl[hash], old_head, new_head) != old_head)
- goto restart;
-end:
- rcu_read_unlock();
-
- return ret;
-
- /* restart loop, release and re-take the read lock to be kind to GP */
-restart:
- rcu_read_unlock();
- goto retry;
-}
-
-/*
- * Restart until we successfully remove the entry, or no entry is left
- * ((void *)(unsigned long)-ENOENT).
- */
-struct rcu_ht_node *ht_steal(struct rcu_ht *ht, void *key);
-{
- struct rcu_ht_node **prev, *node;
- unsigned long hash;
-
- hash = ht->hash_fct(key);
-
-retry:
- rcu_read_lock();
-
- prev = &ht->tbl[hash];
- node = rcu_dereference(*prev);
- for (;;) {
- if (likely(!node)) {
- node = (void *)(unsigned long)-ENOENT;
- goto end;
- }
- if (node->key == key) {
- break;
- }
- prev = &node->next;
- node = rcu_dereference(*prev);
- }
- /* Found it ! pointer to object is in "prev" */
- if (rcu_cmpxchg_pointer(prev, node, node->next) != node)
- goto restart;
-end:
- rcu_read_unlock();
-
- return node;
-
- /* restart loop, release and re-take the read lock to be kind to GP */
-restart:
- rcu_read_unlock();
- goto retry;
-}
-
-int ht_delete(struct rcu_ht *ht, void *key)
-{
- struct rcu_ht_node *node;
-
- node = ht_steal(ht, key);
- if (node) {
- if (free_fct && node->data)
- call_rcu(ht->free_fct, node->data);
- call_rcu(free, node);
- return 0;
- } else {
- return -ENOENT;
- }
-}
--- /dev/null
+
+#define _LGPL_SOURCE
+#include <stdlib.h>
+#include <urcu.h>
+#include <arch.h>
+#include <arch_atomic.h>
+#include <assert.h>
+#include <compiler.h>
+#include <urcu-defer.h>
+#include <errno.h>
+#include <urcu-ht.h>
+
+struct rcu_ht_node;
+
+struct rcu_ht_node {
+ struct rcu_ht_node *next;
+ void *key;
+ void *data;
+};
+
+struct rcu_ht {
+ struct rcu_ht_node *tbl[HASH_SIZE];
+ ht_hash_fct hash_fct;
+ void (*free_fct)(void *data); /* fct to free data */
+};
+
+struct rcu_ht *ht_new(ht_hash_fct hash_fct, void (*free_fct)(void *data))
+{
+ struct rcu_ht *ht;
+
+ ht = calloc(1, sizeof(struct rcu_ht));
+ ht->hash_fct = hash_fct;
+ ht->free_fct = free_fct;
+ return ht;
+}
+
+/* delete all elements */
+void ht_delete_all(struct rcu_ht *ht)
+{
+ unsigned long i;
+ struct rcu_ht_node **prev, *node;
+
+ for (i = 0; i < HASH_SIZE; i++) {
+ rcu_read_lock();
+
+ prev = &ht->tbl[i];
+ node = rcu_dereference(*prev);
+ /*
+ * Cut the head, therefore whole bucket will be unused
+ * after GP. (except for concurrent additions)
+ */
+ rcu_assign_pointer(prev, NULL);
+ for (;;) {
+ if (likely(!node)) {
+ break;
+ }
+ prev = &node->next;
+ if (node->data)
+ call_rcu(ht->free_fct, node->data);
+ call_rcu(free, node);
+ node = rcu_dereference(*prev);
+ }
+ rcu_read_unlock();
+ }
+}
+
+/*
+ * Should only be called when no more concurrent readers nor writers can
+ * possibly access the table.
+ */
+void ht_destroy(struct rcu_ht *ht)
+{
+ ht_delete_all(ht);
+ free(ht);
+}
+
+void *ht_lookup(struct rcu_ht *ht, void *key)
+{
+ unsigned long hash;
+ struct rcu_ht_node *node;
+ void *ret;
+
+ hash = ht->hash_fct(key);
+
+ rcu_read_lock();
+ node = rcu_dereference(ht->tbl[hash]);
+ for (;;) {
+ if (likely(!node)) {
+ ret = NULL;
+ break;
+ }
+ if (node->key == key) {
+ ret = node->data;
+ break;
+ }
+ node = rcu_dereference(node->next);
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
+/*
+ * Will re-try until either:
+ * - The key is already there (-EEXIST)
+ * - We successfully add the key at the head of a table bucket.
+ */
+int ht_add(struct rcu_ht *ht, void *key, void *data)
+{
+ struct rcu_ht_node *node, *old_head, *new_head;
+ unsigned long hash;
+ int ret = 0;
+
+ new_head = calloc(1, sizeof(struct rcu_ht_node));
+ new_head->key = key;
+ new_head->data = data;
+ hash = ht->hash_fct(key);
+
+ /* here comes the fun and tricky part.
+ * Add at the beginning with a cmpxchg.
+ * Hold a read lock between the moment the first element is read
+ * and the nodes traversal (to find duplicates). This ensures
+ * the head pointer has not been reclaimed when cmpxchg is done.
+ * Always adding at the head ensures that we would have to
+ * re-try if a new item has been added concurrently. So we ensure that
+ * we never add duplicates. */
+retry:
+ rcu_read_lock();
+
+ old_head = node = rcu_dereference(ht->tbl[hash]);
+ for (;;) {
+ if (likely(!node)) {
+ break;
+ }
+ if (node->key == key) {
+ ret = -EEXIST;
+ goto end;
+ }
+ node = rcu_dereference(node->next);
+ }
+ if (rcu_cmpxchg_pointer(&ht->tbl[hash], old_head, new_head) != old_head)
+ goto restart;
+end:
+ rcu_read_unlock();
+
+ return ret;
+
+ /* restart loop, release and re-take the read lock to be kind to GP */
+restart:
+ rcu_read_unlock();
+ goto retry;
+}
+
+/*
+ * Restart until we successfully remove the entry, or no entry is left
+ * ((void *)(unsigned long)-ENOENT).
+ */
+void *ht_steal(struct rcu_ht *ht, void *key)
+{
+ struct rcu_ht_node **prev, *node;
+ unsigned long hash;
+ void *data;
+
+ hash = ht->hash_fct(key);
+
+retry:
+ rcu_read_lock();
+
+ prev = &ht->tbl[hash];
+ node = rcu_dereference(*prev);
+ for (;;) {
+ if (likely(!node)) {
+ data = (void *)(unsigned long)-ENOENT;
+ goto end;
+ }
+ if (node->key == key) {
+ break;
+ }
+ prev = &node->next;
+ node = rcu_dereference(*prev);
+ }
+ /* Found it ! pointer to object is in "prev" */
+ if (rcu_cmpxchg_pointer(prev, node, node->next) != node)
+ goto restart;
+end:
+ rcu_read_unlock();
+
+ data = node->data;
+ call_rcu(free, node);
+
+ return data;
+
+ /* restart loop, release and re-take the read lock to be kind to GP */
+restart:
+ rcu_read_unlock();
+ goto retry;
+}
+
+int ht_delete(struct rcu_ht *ht, void *key)
+{
+ void *data;
+
+ data = ht_steal(ht, key);
+ if (data) {
+ if (ht->free_fct && data)
+ call_rcu(ht->free_fct, data);
+ return 0;
+ } else {
+ return -ENOENT;
+ }
+}
+
+unsigned long stupid_hash(void *key)
+{
+ return (unsigned long)key % HASH_SIZE;
+}