--- /dev/null
+/*
+ * test_urcu_rbtree.c
+ *
+ * Userspace RCU library - test program for RB tree
+ *
+ * Copyright February 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#define _GNU_SOURCE
+#include "../config.h"
+#include <stdio.h>
+#include <pthread.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <assert.h>
+#include <sys/syscall.h>
+#include <sched.h>
+#include <errno.h>
+
+#include <urcu/arch.h>
+
+/* hardcoded number of CPUs */
+#define NR_CPUS 16384
+
+#if defined(_syscall0)
+_syscall0(pid_t, gettid)
+#elif defined(__NR_gettid)
+static inline pid_t gettid(void)
+{
+ return syscall(__NR_gettid);
+}
+#else
+#warning "use pid as tid"
+static inline pid_t gettid(void)
+{
+ return getpid();
+}
+#endif
+
+#ifndef DYNAMIC_LINK_TEST
+#define _LGPL_SOURCE
+#else
+#define debug_yield_read()
+#endif
+#include <urcu.h>
+#include <urcu-rbtree.h>
+
+static struct rcu_rbtree_node *rbtree_root;
+
+/* TODO: error handling testing for -ENOMEM */
+struct rcu_rbtree_node *rbtree_alloc(void)
+{
+ return malloc(sizeof(struct rcu_rbtree_node));
+}
+
+struct rcu_rbtree_node *rbtree_free(struct rcu_rbtree_node *node)
+{
+ return free(node);
+}
+
+static volatile int test_go, test_stop;
+
+static unsigned long wdelay;
+
+static unsigned long duration;
+
+/* read-side C.S. duration, in loops */
+static unsigned long rduration;
+
+/* write-side C.S. duration, in loops */
+static unsigned long wduration;
+
+static inline void loop_sleep(unsigned long l)
+{
+ while(l-- != 0)
+ cpu_relax();
+}
+
+static int verbose_mode;
+
+#define printf_verbose(fmt, args...) \
+ do { \
+ if (verbose_mode) \
+ printf(fmt, args); \
+ } while (0)
+
+static unsigned int cpu_affinities[NR_CPUS];
+static unsigned int next_aff = 0;
+static int use_affinity = 0;
+
+pthread_mutex_t affinity_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+#ifndef HAVE_CPU_SET_T
+typedef unsigned long cpu_set_t;
+# define CPU_ZERO(cpuset) do { *(cpuset) = 0; } while(0)
+# define CPU_SET(cpu, cpuset) do { *(cpuset) |= (1UL << (cpu)); } while(0)
+#endif
+
+static void set_affinity(void)
+{
+ cpu_set_t mask;
+ int cpu;
+ int ret;
+
+ if (!use_affinity)
+ return;
+
+#if HAVE_SCHED_SETAFFINITY
+ ret = pthread_mutex_lock(&affinity_mutex);
+ if (ret) {
+ perror("Error in pthread mutex lock");
+ exit(-1);
+ }
+ cpu = cpu_affinities[next_aff++];
+ ret = pthread_mutex_unlock(&affinity_mutex);
+ if (ret) {
+ perror("Error in pthread mutex unlock");
+ exit(-1);
+ }
+
+ CPU_ZERO(&mask);
+ CPU_SET(cpu, &mask);
+#if SCHED_SETAFFINITY_ARGS == 2
+ sched_setaffinity(0, &mask);
+#else
+ sched_setaffinity(0, sizeof(mask), &mask);
+#endif
+#endif /* HAVE_SCHED_SETAFFINITY */
+}
+
+/*
+ * returns 0 if test should end.
+ */
+static int test_duration_write(void)
+{
+ return !test_stop;
+}
+
+static int test_duration_read(void)
+{
+ return !test_stop;
+}
+
+static unsigned long long __thread nr_writes;
+static unsigned long long __thread nr_reads;
+
+static unsigned int nr_readers;
+static unsigned int nr_writers;
+
+pthread_mutex_t rcu_copy_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+void rcu_copy_mutex_lock(void)
+{
+ int ret;
+ ret = pthread_mutex_lock(&rcu_copy_mutex);
+ if (ret) {
+ perror("Error in pthread mutex lock");
+ exit(-1);
+ }
+}
+
+void rcu_copy_mutex_unlock(void)
+{
+ int ret;
+
+ ret = pthread_mutex_unlock(&rcu_copy_mutex);
+ if (ret) {
+ perror("Error in pthread mutex unlock");
+ exit(-1);
+ }
+}
+
+void *thr_reader(void *_count)
+{
+ unsigned long long *count = _count;
+
+ printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n",
+ "reader", pthread_self(), (unsigned long)gettid());
+
+ set_affinity();
+
+ rcu_register_thread();
+
+ while (!test_go)
+ {
+ }
+ smp_mb();
+
+ for (;;) {
+ rcu_read_lock();
+
+ debug_yield_read();
+ if (unlikely(rduration))
+ loop_sleep(rduration);
+ rcu_read_unlock();
+ nr_reads++;
+ if (unlikely(!test_duration_read()))
+ break;
+ }
+
+ rcu_unregister_thread();
+
+ /* test extra thread registration */
+ rcu_register_thread();
+ rcu_unregister_thread();
+
+ *count = nr_reads;
+ printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
+ "reader", pthread_self(), (unsigned long)gettid());
+ return ((void*)1);
+
+}
+
+void *thr_writer(void *_count)
+{
+ unsigned long long *count = _count;
+
+ printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n",
+ "writer", pthread_self(), (unsigned long)gettid());
+
+ set_affinity();
+
+ while (!test_go)
+ {
+ }
+ smp_mb();
+
+ for (;;) {
+
+ if (unlikely(wduration))
+ loop_sleep(wduration);
+
+ nr_writes++;
+ if (unlikely(!test_duration_write()))
+ break;
+ if (unlikely(wdelay))
+ loop_sleep(wdelay);
+ }
+
+ printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
+ "writer", pthread_self(), (unsigned long)gettid());
+ *count = nr_writes;
+ return ((void*)2);
+}
+
+void show_usage(int argc, char **argv)
+{
+ printf("Usage : %s nr_readers nr_writers duration (s)", argv[0]);
+#ifdef DEBUG_YIELD
+ printf(" [-r] [-w] (yield reader and/or writer)");
+#endif
+ printf(" [-d delay] (writer period (us))");
+ printf(" [-c duration] (reader C.S. duration (in loops))");
+ printf(" [-e duration] (writer C.S. duration (in loops))");
+ printf(" [-v] (verbose output)");
+ printf(" [-a cpu#] [-a cpu#]... (affinity)");
+ printf("\n");
+}
+
+int main(int argc, char **argv)
+{
+ int err;
+ pthread_t *tid_reader, *tid_writer;
+ void *tret;
+ unsigned long long *count_reader, *count_writer;
+ unsigned long long tot_reads = 0, tot_writes = 0;
+ int i, a;
+
+ if (argc < 4) {
+ show_usage(argc, argv);
+ return -1;
+ }
+
+ err = sscanf(argv[1], "%u", &nr_readers);
+ if (err != 1) {
+ show_usage(argc, argv);
+ return -1;
+ }
+
+ err = sscanf(argv[2], "%u", &nr_writers);
+ if (err != 1) {
+ show_usage(argc, argv);
+ return -1;
+ }
+
+ err = sscanf(argv[3], "%lu", &duration);
+ if (err != 1) {
+ show_usage(argc, argv);
+ return -1;
+ }
+
+ for (i = 4; i < argc; i++) {
+ if (argv[i][0] != '-')
+ continue;
+ switch (argv[i][1]) {
+#ifdef DEBUG_YIELD
+ case 'r':
+ yield_active |= YIELD_READ;
+ break;
+ case 'w':
+ yield_active |= YIELD_WRITE;
+ break;
+#endif
+ case 'a':
+ if (argc < i + 2) {
+ show_usage(argc, argv);
+ return -1;
+ }
+ a = atoi(argv[++i]);
+ cpu_affinities[next_aff++] = a;
+ use_affinity = 1;
+ printf_verbose("Adding CPU %d affinity\n", a);
+ break;
+ case 'c':
+ if (argc < i + 2) {
+ show_usage(argc, argv);
+ return -1;
+ }
+ rduration = atol(argv[++i]);
+ break;
+ case 'd':
+ if (argc < i + 2) {
+ show_usage(argc, argv);
+ return -1;
+ }
+ wdelay = atol(argv[++i]);
+ break;
+ case 'e':
+ if (argc < i + 2) {
+ show_usage(argc, argv);
+ return -1;
+ }
+ wduration = atol(argv[++i]);
+ break;
+ case 'v':
+ verbose_mode = 1;
+ break;
+ }
+ }
+
+ printf_verbose("running test for %lu seconds, %u readers, %u writers.\n",
+ duration, nr_readers, nr_writers);
+ printf_verbose("Writer delay : %lu loops.\n", wdelay);
+ printf_verbose("Reader duration : %lu loops.\n", rduration);
+ printf_verbose("thread %-6s, thread id : %lx, tid %lu\n",
+ "main", pthread_self(), (unsigned long)gettid());
+
+ tid_reader = malloc(sizeof(*tid_reader) * nr_readers);
+ tid_writer = malloc(sizeof(*tid_writer) * nr_writers);
+ count_reader = malloc(sizeof(*count_reader) * nr_readers);
+ count_writer = malloc(sizeof(*count_writer) * nr_writers);
+
+ next_aff = 0;
+
+ for (i = 0; i < nr_readers; i++) {
+ err = pthread_create(&tid_reader[i], NULL, thr_reader,
+ &count_reader[i]);
+ if (err != 0)
+ exit(1);
+ }
+ for (i = 0; i < nr_writers; i++) {
+ err = pthread_create(&tid_writer[i], NULL, thr_writer,
+ &count_writer[i]);
+ if (err != 0)
+ exit(1);
+ }
+
+ smp_mb();
+
+ test_go = 1;
+
+ sleep(duration);
+
+ test_stop = 1;
+
+ for (i = 0; i < nr_readers; i++) {
+ err = pthread_join(tid_reader[i], &tret);
+ if (err != 0)
+ exit(1);
+ tot_reads += count_reader[i];
+ }
+ for (i = 0; i < nr_writers; i++) {
+ err = pthread_join(tid_writer[i], &tret);
+ if (err != 0)
+ exit(1);
+ tot_writes += count_writer[i];
+ }
+
+ printf_verbose("total number of reads : %llu, writes %llu\n", tot_reads,
+ tot_writes);
+ printf("SUMMARY %-25s testdur %4lu nr_readers %3u rdur %6lu wdur %6lu "
+ "nr_writers %3u "
+ "wdelay %6lu nr_reads %12llu nr_writes %12llu nr_ops %12llu\n",
+ argv[0], duration, nr_readers, rduration, wduration,
+ nr_writers, wdelay, tot_reads, tot_writes,
+ tot_reads + tot_writes);
+ free(tid_reader);
+ free(tid_writer);
+ free(count_reader);
+ free(count_writer);
+ return 0;
+}
--- /dev/null
+/*
+ * urcu-rbtree.c
+ *
+ * Userspace RCU library - Red-Black Tree
+ *
+ * Copyright (c) 2010 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Implementation of RCU-adapted data structures and operations based on the RB
+ * tree algorithms found in chapter 12 of:
+ *
+ * Thomas H. Cormen, Charles E. Leiserson, Ronald L. Rivest, and
+ * Clifford Stein. Introduction to Algorithms, Third Edition. The MIT
+ * Press, September 2009.
+ */
+
+#define _BSD_SOURCE
+#define _LGPL_SOURCE
+
+#include <stdio.h>
+#include <pthread.h>
+
+#include <urcu-rbtree.h>
+#include <urcu-pointer.h>
+
+/*
+ * TODO
+ * Deal with memory allocation errors.
+ * Can be ensured by reserving a pool of memory entries before doing the
+ * insertion, which will have to be function of number of
+ * transplantations/rotations required for the operation.
+ */
+
+/* Sentinel (bottom nodes). Don't care about p, left, right and key values */
+static struct rcu_rbtree_node nil = {
+ .color = COLOR_BLACK,
+};
+
+/*
+ * Iterative rbtree search.
+ */
+struct rcu_rbtree_node* rcu_rbtree_search(struct rcu_rbtree_node *x,
+ void *k, rcu_rbtree_comp comp)
+{
+ x = rcu_dereference(x);
+
+ while (x != NULL && k != x->key) {
+ if (k < x->key)
+ x = rcu_dereference(x->left);
+ else
+ x = rcu_dereference(x->right);
+ }
+ return x;
+}
+
+struct rcu_rbtree_node *rcu_rbtree_min(struct rcu_rbtree_node *x,
+ rcu_rbtree_comp comp)
+{
+ struct rcu_rbtree_node *xl;
+
+ x = rcu_dereference(x);
+
+ while ((xl = rcu_dereference(x->left)) != NULL)
+ x = xl;
+ return x;
+}
+
+struct rcu_rbtree_node *rcu_rbtree_max(struct rcu_rbtree_node *x,
+ rcu_rbtree_comp comp)
+{
+ struct rcu_rbtree_node *xr;
+
+ x = rcu_dereference(x);
+
+ while ((xr = rcu_dereference(x->right)) != NULL)
+ x = xr;
+ return x;
+}
+
+/*
+ * next and prev need to have mutex held to ensure that parent pointer is
+ * coherent.
+ */
+struct rcu_rbtree_node *rcu_rbtree_next(struct rcu_rbtree_node *x,
+ rcu_rbtree_comp comp)
+{
+ struct rcu_rbtree_node *xr, *y;
+
+ x = rcu_dereference(x);
+
+ if ((xr = rcu_dereference(x->right)) != NULL)
+ return rcu_rbtree_min(xr, comp);
+ y = rcu_dereference(x->p);
+ while (y != NULL && x == rcu_dereference(y->right)) {
+ x = y;
+ y = rcu_dereference(y->p);
+ }
+ return y;
+}
+
+struct rcu_rbtree_node *rcu_rbtree_prev(struct rcu_rbtree_node *x,
+ rcu_rbtree_comp comp)
+{
+ struct rcu_rbtree_node *xl, *y;
+
+ x = rcu_dereference(x);
+
+ if ((xl = rcu_dereference(x->left)) != NULL)
+ return rcu_rbtree_max(xl, comp);
+ y = rcu_dereference(x->p);
+ while (y != NULL && x == rcu_dereference(y->left)) {
+ x = y;
+ y = rcu_dereference(y->p);
+ }
+ return y;
+}
+
+/*
+ * We have to ensure these assumptions are correct for prev/next
+ * traversal:
+ *
+ * with x being a right child, the assumption that:
+ * x->p->right == x
+ * or if x is a left child, the assumption that:
+ * x->p->left == x
+ *
+ * This explains why we have to allocate a vc copy of the node for left_rotate,
+ * right_rotate and transplant operations.
+ *
+ * We always ensure that the right/left child and correct parent is set in the
+ * node copies *before* we reparent the children and make the upper-level point
+ * to the copy.
+ */
+
+/* RCU: copy x and y, atomically point to new versions. GC old. */
+/* Should be eventually followed by a smp_wmc() */
+/* Returns the new x. Previous x->right references are changed to yc. */
+static struct rcu_rbtree_node *left_rotate(struct rcu_rbtree_node **root,
+ struct rcu_rbtree_node *x,
+ rcu_rbtree_alloc rballoc,
+ rcu_rbtree_free rbfree)
+{
+ struct rcu_rbtree_node *xc, *y, *yc;
+
+ y = x->right;
+
+ yc = rballoc();
+ xc = rballoc();
+ *xc = *x;
+ *yc = *y;
+
+ /* Modify children and parents in the node copies */
+ xc->right = y->left;
+ xc->p = yc;
+ yc->left = xc;
+ yc->p = x->p;
+
+ /*
+ * Order stores to node copies (children/parents) before stores that
+ * will make the copies visible to the rest of the tree.
+ */
+ smp_wmb();
+
+ /* Make parents point to the copies */
+ if (x->p == &nil)
+ _STORE_SHARED(*root, yc);
+ else if (x == x->p->left)
+ _STORE_SHARED(x->p->left, yc);
+ else
+ _STORE_SHARED(x->p->right, yc);
+
+ /* Assign children parents to copies */
+ _STORE_SHARED(xc->right->p, xc);
+ _STORE_SHARED(xc->left->p, xc);
+ _STORE_SHARED(yc->right->p, yc);
+ /* yc->left is xc, its parent is already set in node copy */
+
+ defer_rcu(rbfree, x);
+ defer_rcu(rbfree, y);
+ return xc;
+}
+
+#if 0 /* orig */
+static void left_rotate(struct rcu_rbtree_node **root,
+ struct rcu_rbtree_node *x,
+ rcu_rbtree_alloc rballoc)
+{
+ struct rcu_rbtree_node *y;
+
+ y = x->right;
+ x->right = y->left;
+ if (y->left != &nil)
+ y->left->p = x;
+ y->p = x->p;
+ if (x->p == &nil)
+ *root = y;
+ else if (x == x->p->left)
+ x->p->left = y;
+ else
+ x->p->right = y;
+ y->left = x;
+ x->p = y;
+}
+#endif //0
+
+/* RCU: copy x and y, atomically point to new versions. GC old. */
+/* Should be eventually followed by a smp_wmc() */
+/* Returns the new x. Previous x->left references are changed to yc. */
+static struct rcu_rbtree_node *right_rotate(struct rcu_rbtree_node **root,
+ struct rcu_rbtree_node *x,
+ rcu_rbtree_alloc rballoc,
+ rcu_rbtree_free rbfree)
+{
+ struct rcu_rbtree_node *xc, *y, *yc;
+
+ y = x->left;
+
+ yc = rballoc();
+ xc = rballoc();
+ *xc = *x;
+ *yc = *y;
+
+ /* Modify children and parents in the node copies */
+ xc->left = y->right;
+ xc->p = yc;
+ yc->right = xc;
+ yc->p = x->p;
+
+ /*
+ * Order stores to node copies (children/parents) before stores that
+ * will make the copies visible to the rest of the tree.
+ */
+ smp_wmb();
+
+ /* Make parents point to the copies */
+ if (x->p == &nil)
+ _STORE_SHARED(*root, yc);
+ else if (x == x->p->right)
+ _STORE_SHARED(x->p->right, yc);
+ else
+ _STORE_SHARED(x->p->left, yc);
+
+ /* Assign children parents to copies */
+ _STORE_SHARED(xc->left->p, xc);
+ _STORE_SHARED(xc->right->p, xc);
+ _STORE_SHARED(yc->left->p, yc);
+ /* yc->right is xc, its parent is already set in node copy */
+
+ defer_rcu(rbfree, x);
+ defer_rcu(rbfree, y);
+ return xc;
+}
+
+#if 0 //orig
+static void right_rotate(struct rcu_rbtree_node **root,
+ struct rcu_rbtree_node *x,
+ rcu_rbtree_alloc rballoc)
+{
+ struct rcu_rbtree_node *y;
+
+ y = x->left;
+ x->left = y->right;
+ if (y->right != &nil)
+ y->right->p = x;
+ y->p = x->p;
+ if (x->p == &nil)
+ *root = y;
+ else if (x == x->p->right)
+ x->p->right = y;
+ else
+ x->p->left = y;
+ y->right = x;
+ x->p = y;
+}
+#endif //0
+
+static void rcu_rbtree_insert_fixup(struct rcu_rbtree_node **root,
+ struct rcu_rbtree_node *z,
+ rcu_rbtree_alloc rballoc,
+ rcu_rbtree_free rbfree)
+{
+ struct rcu_rbtree_node *y;
+
+ while (z->p->color == COLOR_RED) {
+ if (z->p == z->p->p->left) {
+ y = z->p->p->right;
+ if (y->color == COLOR_RED) {
+ z->p->color = COLOR_BLACK;
+ y->color = COLOR_BLACK;
+ z->p->p->color = COLOR_RED;
+ z = z->p->p;
+ } else {
+ if (z == z->p->right) {
+ z = z->p;
+ z = left_rotate(root, z,
+ rballoc, rbfree);
+ }
+ z->p->color = COLOR_BLACK;
+ z->p->p->color = COLOR_RED;
+ z = right_rotate(root, z->p->p,
+ rballoc, rbfree);
+ }
+ } else {
+ y = z->p->p->left;
+ if (y->color == COLOR_RED) {
+ z->p->color = COLOR_BLACK;
+ y->color = COLOR_BLACK;
+ z->p->p->color = COLOR_RED;
+ z = z->p->p;
+ } else {
+ if (z == z->p->left) {
+ z = z->p;
+ z = right_rotate(root, z,
+ rballoc, rbfree);
+ }
+ z->p->color = COLOR_BLACK;
+ z->p->p->color = COLOR_RED;
+ z = left_rotate(root, z->p->p,
+ rballoc, rbfree);
+ }
+ }
+ }
+ (*root)->color = COLOR_BLACK;
+}
+
+/*
+ * rcu_rbtree_insert - Insert a node in the RCU rbtree
+ *
+ * Returns 0 on success, or < 0 on error.
+ */
+int rcu_rbtree_insert(struct rcu_rbtree_node **root,
+ struct rcu_rbtree_node *z,
+ rcu_rbtree_comp comp,
+ rcu_rbtree_alloc rballoc,
+ rcu_rbtree_free rbfree)
+{
+ struct rcu_rbtree_node *x, *y;
+
+ y = &nil;
+ x = *root;
+
+ while (x != &nil) {
+ y = x;
+ if (comp(z->key, x->key) < 0)
+ x = x->left;
+ else
+ x = x->right;
+ }
+
+ z->p = y;
+ z->left = &nil;
+ z->right = &nil;
+ z->color = COLOR_RED;
+
+ /*
+ * Order stores to z (children/parents) before stores that will make it
+ * visible to the rest of the tree.
+ */
+ smp_wmb();
+
+ if (y == &nil)
+ _STORE_SHARED(*root, z);
+ else if (comp(z->key, y->key) < 0)
+ _STORE_SHARED(y->left, z);
+ else
+ _STORE_SHARED(y->right, z);
+ rcu_rbtree_insert_fixup(root, z, rballoc, rbfree);
+ /*
+ * Make sure to commit all _STORE_SHARED() for non-coherent caches.
+ */
+ smp_wmc();
+
+ return 0;
+}
+
+/*
+ * Transplant v into u position.
+ * Returns new copy of v.
+ */
+static struct rcu_rbtree_node *
+rcu_rbtree_transplant(struct rcu_rbtree_node **root,
+ struct rcu_rbtree_node *u,
+ struct rcu_rbtree_node *v,
+ rcu_rbtree_alloc rballoc,
+ rcu_rbtree_free rbfree)
+{
+ struct rcu_rbtree_node *vc;
+
+ vc = rballoc();
+ *vc = *v;
+
+ /* Change vc parent pointer */
+ vc->p = u->p;
+
+ /*
+ * Order stores to node copies (children/parents) before stores that
+ * will make the copies visible to the rest of the tree.
+ */
+ smp_wmb();
+
+ /* Assign upper-level pointer to vc, replacing u. */
+ if (u->p == &nil)
+ _STORE_SHARED(*root, vc);
+ else if (u == u->p->left)
+ _STORE_SHARED(u->p->left, vc);
+ else
+ _STORE_SHARED(u->p->right, vc);
+
+ /*
+ * The children pointers in vc are the same as v. We can therefore
+ * reparent v's children to vc safely.
+ */
+ _STORE_SHARED(vc->right->p, vc);
+ _STORE_SHARED(vc->left->p, vc);
+
+ defer_rcu(rbfree, v);
+ return vc;
+}
+
+static void rcu_rbtree_remove_fixup(struct rcu_rbtree_node **root,
+ struct rcu_rbtree_node *x,
+ rcu_rbtree_alloc rballoc,
+ rcu_rbtree_free rbfree)
+{
+ while (x != *root && x->color == COLOR_BLACK) {
+ if (x == x->p->left) {
+ struct rcu_rbtree_node *w;
+
+ w = x->p->right;
+ if (w->color == COLOR_RED) {
+ w->color == COLOR_BLACK;
+ x->p->color = COLOR_RED;
+ left_rotate(root, x->p, rballoc, rbfree);
+ /* x is a left node, not copied by rotation */
+ w = x->p->right;
+ }
+ if (w->left->color == COLOR_BLACK
+ && w->right->color == COLOR_BLACK) {
+ w->color = COLOR_RED;
+ x = x->p;
+ } else {
+ if (w->right->color == COLOR_BLACK) {
+ w->left->color = COLOR_BLACK;
+ w->color = COLOR_RED;
+ right_rotate(root, w, rballoc, rbfree);
+ w = x->p->right;
+ }
+ w->color = x->p->color;
+ x->p->color = COLOR_BLACK;
+ w->right->color = COLOR_BLACK;
+ left_rotate(root, x->p, rballoc, rbfree);
+ x = *root;
+ }
+ } else {
+ struct rcu_rbtree_node *w;
+
+ w = x->p->left;
+ if (w->color == COLOR_RED) {
+ w->color == COLOR_BLACK;
+ x->p->color = COLOR_RED;
+ right_rotate(root, x->p, rballoc, rbfree);
+ /* x is a right node, not copied by rotation */
+ w = x->p->left;
+ }
+ if (w->right->color == COLOR_BLACK
+ && w->left->color == COLOR_BLACK) {
+ w->color = COLOR_RED;
+ x = x->p;
+ } else {
+ if (w->left->color == COLOR_BLACK) {
+ w->right->color = COLOR_BLACK;
+ w->color = COLOR_RED;
+ left_rotate(root, w, rballoc, rbfree);
+ w = x->p->left;
+ }
+ w->color = x->p->color;
+ x->p->color = COLOR_BLACK;
+ w->left->color = COLOR_BLACK;
+ right_rotate(root, x->p, rballoc, rbfree);
+ x = *root;
+ }
+ }
+ }
+ x->color = COLOR_BLACK;
+}
+
+/* Returns the new copy of y->right */
+static struct rcu_rbtree_node *
+rcu_rbtree_remove_nonil(struct rcu_rbtree_node **root,
+ struct rcu_rbtree_node *z,
+ struct rcu_rbtree_node *y,
+ rcu_rbtree_comp comp,
+ rcu_rbtree_alloc rballoc,
+ rcu_rbtree_free rbfree)
+{
+ struct rcu_rbtree_node *x, *xc, *yc;
+
+ x = y->right;
+
+ xc = rballoc();
+ yc = rballoc();
+ *xc = *x;
+ *yc = *y;
+
+ /* Update parent and children pointers within copies */
+ if (y->p == z)
+ xc->p = yc;
+ else {
+ /* Transplant y->right (xc) into y, within copy */
+ xc->p = y->p;
+ /* But also change the right pointer of yc */
+ yc->right = z->right;
+ }
+ /* Transplant y into z, within copy */
+ yc->p = z->p;
+
+ yc->left = z->left;
+ yc->color = z->color;
+
+ /*
+ * Order stores to node copies (children/parents) before stores that
+ * will make the copies visible to the rest of the tree.
+ */
+ smp_wmb();
+
+ /* Update external pointers */
+
+ if (y->p != z) {
+ /* Transplant y->right into y, external parent links */
+
+ /* Assign upper-level pointer to xc, replacing y. */
+ if (y->p == &nil)
+ _STORE_SHARED(*root, xc);
+ else if (y == y->p->left)
+ _STORE_SHARED(y->p->left, xc);
+ else
+ _STORE_SHARED(y->p->right, xc);
+ }
+
+ /* Transplant y into z, update external parent links */
+ if (z->p == &nil)
+ _STORE_SHARED(*root, yc);
+ else if (z == z->p->left)
+ _STORE_SHARED(z->p->left, yc);
+ else
+ _STORE_SHARED(z->p->right, yc);
+
+ /* Reparent xc's children to xc. */
+ _STORE_SHARED(xc->right->p, xc);
+ _STORE_SHARED(xc->left->p, xc);
+ /* Reparent yc's children to yc */
+ _STORE_SHARED(yc->right->p, yc);
+ _STORE_SHARED(yc->left->p, yc);
+
+ defer_rcu(rbfree, x);
+ defer_rcu(rbfree, y);
+
+ return xc;
+}
+
+int rcu_rbtree_remove(struct rcu_rbtree_node **root,
+ struct rcu_rbtree_node *z,
+ rcu_rbtree_comp comp,
+ rcu_rbtree_alloc rballoc,
+ rcu_rbtree_free rbfree)
+{
+ struct rcu_rbtree_node *x, *y;
+ unsigned int y_original_color;
+
+ y = z;
+ y_original_color = y->color;
+
+ if (z->left == &nil) {
+ x = rcu_rbtree_transplant(root, z, z->right, rballoc, rbfree);
+ } else if (z->right == &nil) {
+ x = rcu_rbtree_transplant(root, z, z->left, rballoc, rbfree);
+ } else {
+ y = rcu_rbtree_min(z->right, comp);
+ y_original_color = y->color;
+ x = rcu_rbtree_remove_nonil(root, z, y, comp, rballoc, rbfree);
+ }
+ if (y_original_color == COLOR_BLACK)
+ rcu_rbtree_remove_fixup(root, x, rballoc, rbfree);
+ /*
+ * Commit all _STORE_SHARED().
+ */
+ smp_wmc();
+
+ return 0;
+}
--- /dev/null
+#ifndef URCU_RBTREE_H
+#define URCU_RBTREE_H
+
+/*
+ * urcu-rbtree.h
+ *
+ * Userspace RCU library - Red-Black Tree
+ *
+ * Copyright (c) 2010 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Implementation of RCU-adapted data structures and operations based on the RB
+ * tree algorithms found in chapter 12 of:
+ *
+ * Thomas H. Cormen, Charles E. Leiserson, Ronald L. Rivest, and
+ * Clifford Stein. Introduction to Algorithms, Third Edition. The MIT
+ * Press, September 2009.
+ */
+
+#include <pthread.h>
+
+#define COLOR_BLACK 0
+#define COLOR_RED 1
+
+/*
+ * Node key comparison function.
+ * < 0 : a lower than b.
+ * > 0 : a greater than b.
+ * == 0 : a equals b.
+ */
+typedef int (*rcu_rbtree_comp)(void *a, void *b);
+
+/*
+ * Node allocation and deletion functions.
+ */
+typedef struct rcu_rbtree_node *(*rcu_rbtree_alloc)(void);
+typedef void (*rcu_rbtree_free)(struct rcu_rbtree_node *node);
+
+struct rcu_rbtree_node;
+
+struct rcu_rbtree_node {
+ /* must be set upon insertion */
+ void *key;
+
+ /* internally reserved */
+ struct rcu_rbtree_node *p, *left, *right;
+ unsigned int color:1;
+};
+
+/*
+ * Each of the search primitive and "prev"/"next" iteration must be called with
+ * the RCU read-side lock held.
+ *
+ * Insertion and removal must be protected by a mutex. At the moment, insertion
+ * and removal use defer_rcu, so calling them with rcu read-lock held is
+ * prohibited.
+ */
+
+/*
+ * Node insertion. Returns 0 on success. May fail with -ENOMEM.
+ */
+int rcu_rbtree_insert(struct rcu_rbtree_node **root,
+ struct rcu_rbtree_node *node,
+ rcu_rbtree_comp comp,
+ rcu_rbtree_alloc rballoc,
+ rcu_rbtree_free rbfree);
+
+/*
+ * Remove node from tree.
+ * Must wait for a grace period after removal before performing deletion of the
+ * node.
+ * Returns 0 on success. May fail with -ENOMEM.
+ */
+int rcu_rbtree_remove(struct rcu_rbtree_node **root,
+ struct rcu_rbtree_node *node,
+ rcu_rbtree_comp comp,
+ rcu_rbtree_alloc rballoc,
+ rcu_rbtree_free rbfree);
+
+/* RCU read-side */
+
+/*
+ * Search key starting from node x. Returns NULL if not found.
+ */
+struct rcu_rbtree_node* rcu_rbtree_search(struct rcu_rbtree_node *x,
+ void *key, rcu_rbtree_comp comp);
+
+struct rcu_rbtree_node *rcu_rbtree_min(struct rcu_rbtree_node *x,
+ rcu_rbtree_comp comp);
+
+struct rcu_rbtree_node *rcu_rbtree_max(struct rcu_rbtree_node *x,
+ rcu_rbtree_comp comp);
+
+struct rcu_rbtree_node *rcu_rbtree_next(struct rcu_rbtree_node *x,
+ rcu_rbtree_comp comp);
+
+struct rcu_rbtree_node *rcu_rbtree_prev(struct rcu_rbtree_node *x,
+ rcu_rbtree_comp comp);
+
+#endif /* URCU_RBTREE_H */