urcu-bp: New "bulletproof" RCU library flavor
authorMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Thu, 1 Oct 2009 20:31:23 +0000 (16:31 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Thu, 1 Oct 2009 20:31:23 +0000 (16:31 -0400)
Slower read-side/write-side, but do not require neither of:
urcu_init()
rcu_register_thread()
rcu_unregister_thread()

It is signal safe.

Specialized for the UST LTTng tracer port.

Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Makefile.inc
README
tests/Makefile.inc
tests/test_urcu_bp.c [new file with mode: 0644]
urcu-bp-static.h [new file with mode: 0644]
urcu-bp.c [new file with mode: 0644]
urcu-bp.h [new file with mode: 0644]
urcu/compiler.h

index 1df9d53d4ac67efc23d38cb478003ac48866f4c9..a27f0c31357f2449bfe015963963a46cc35d85c7 100644 (file)
@@ -6,6 +6,7 @@ DIRS=tests
 all: checkarch liburcu.so urcu.o \
        liburcu-qsbr.so urcu-qsbr.o \
        liburcu-mb.so urcu-mb.o \
+       liburcu-bp.so urcu-bp.o \
        liburcu-defer.so urcu-defer.o \
        urcu-yield.o \
        subdirs
@@ -35,6 +36,9 @@ urcu.o: urcu.c urcu.h
 urcu-mb.o: urcu.c urcu.h
        $(CC) -fPIC -DURCU_MB ${CFLAGS} -c -o $@ $(SRC_DEP)
 
+urcu-bp.o: urcu-bp.c urcu-bp.h
+       $(CC) -fPIC ${CFLAGS} -c -o $@ $(SRC_DEP)
+
 urcu-qsbr.o: urcu-qsbr.c urcu-qsbr.h
        $(CC) -fPIC ${CFLAGS} -c -o $@ $(SRC_DEP)
 
@@ -50,6 +54,9 @@ liburcu-qsbr.so: urcu-qsbr.o urcu-pointer.o
 liburcu-mb.so: urcu-mb.o urcu-pointer.o
        $(CC) ${LDFLAGS} -fPIC -shared -o $@ $<
 
+liburcu-bp.so: urcu-bp.o urcu-pointer.o
+       $(CC) ${LDFLAGS} -fPIC -shared -o $@ $<
+
 liburcu-defer.so: urcu-defer.o
        $(CC) ${LDFLAGS} -fPIC -shared -o $@ $<
 
@@ -63,6 +70,7 @@ subdirs:
 
 install: liburcu.so
        cp -f liburcu.so liburcu-mb.so liburcu-qsbr.so liburcu-defer.so \
+               liburcu-bp.so                                           \
                        /usr/lib/
        mkdir -p /usr/include/urcu
        cp -f urcu/arch.h urcu/arch_uatomic.h urcu/compiler.h           \
@@ -70,6 +78,7 @@ install: liburcu.so
                        /usr/include/urcu/
        cp -f urcu.h urcu-static.h                                      \
                urcu-qsbr.h urcu-qsbr-static.h                          \
+               urcu-bp.h urcu-bp-static.h                              \
                urcu-defer.h urcu-defer-static.h                        \
                urcu-pointer.h urcu-pointer-static.h                    \
                        /usr/include/
diff --git a/README b/README
index e5c04b179cae49d202a49809d09a76f714ada711..0549f241a5ec71b71287085d3fe199ade332917f 100644 (file)
--- a/README
+++ b/README
@@ -50,6 +50,17 @@ Usage of liburcu-qsbr
          the threads are not active. It provides the fastest read-side at the
          expense of more intrusiveness in the application code.
 
+Usage of liburcu-bp
+
+       * #include <urcu-bp.h>
+       * Link with "-lurcu-bp".
+       * The BP library flavor stands for "bulletproof". It is specifically
+         designed to help tracing library to hook on applications without
+         requiring to modify these applications. urcu_init(),
+         rcu_register_thread() and rcu_unregister_thread() all become nops.
+         The state is dealt with by the library internally at the expense of
+         read-side and write-side performance.
+
 Initialization
 
        Each thread that has reader critical sections (that uses
index 92d619e2609bbb82fb3465a384d2aa0b3c824ec5..9fa9bfb0bac74dab1011c92152808437c43e2547 100644 (file)
@@ -14,6 +14,7 @@ CFLAGS+=-I${LIBDIR}
 URCU_SIGNAL=${LIBDIR}/urcu.o ${LIBDIR}/urcu-pointer.o ${LIBDIR}/urcu.h
 URCU_SIGNAL_YIELD=${LIBDIR}/urcu-yield.o ${LIBDIR}/urcu-pointer.o ${LIBDIR}/urcu.h
 URCU_MB=${LIBDIR}/urcu-mb.o ${LIBDIR}/urcu-pointer.o ${LIBDIR}/urcu.h
+URCU_BP=${LIBDIR}/urcu-bp.o ${LIBDIR}/urcu-pointer.o ${LIBDIR}/urcu.h
 URCU_QSBR=${LIBDIR}/urcu-qsbr.o ${LIBDIR}/urcu-pointer.o ${LIBDIR}/urcu-qsbr.h
 URCU_MB_DEFER=${LIBDIR}/urcu-mb.o ${LIBDIR}/urcu-defer.o ${LIBDIR}/urcu-pointer.o ${LIBDIR}/urcu.h
 
@@ -24,7 +25,8 @@ all: test_urcu test_urcu_dynamic_link test_urcu_timing \
        urcutorture-yield test_mutex test_looplen test_urcu_gc \
        test_urcu_gc_mb test_qsbr_gc test_qsbr_lgc test_urcu_lgc \
        test_urcu_lgc_mb test_qsbr_dynamic_link test_urcu_mb_defer \
-       test_uatomic test_urcu_assign test_urcu_assign_dynamic_link
+       test_uatomic test_urcu_assign test_urcu_assign_dynamic_link \
+       test_urcu_bp test_urcu_bp_dynamic_link
 
 api.h: ${APIHEADER}
        cp -f ${APIHEADER} api.h
@@ -88,6 +90,13 @@ test_urcu_dynamic_link: test_urcu.c ${URCU_SIGNAL}
        $(CC) ${CFLAGS} -DDYNAMIC_LINK_TEST $(LDFLAGS) -o $@ $(SRC_DEP)
 
 
+test_urcu_bp: test_urcu_bp.c ${URCU_BP}
+       $(CC) ${CFLAGS} $(LDFLAGS) -o $@ $(SRC_DEP)
+
+test_urcu_bp_dynamic_link: test_urcu_bp.c ${URCU_BP}
+       $(CC) -DDYNAMIC_LINK_TEST ${CFLAGS} $(LDFLAGS) -o $@ $(SRC_DEP)
+
+
 test_urcu_yield: test_urcu.c ${URCU_SIGNAL_YIELD}
        $(CC) -DDEBUG_YIELD ${CFLAGS} $(LDFLAGS) -o $@ $(SRC_DEP)
 
diff --git a/tests/test_urcu_bp.c b/tests/test_urcu_bp.c
new file mode 100644 (file)
index 0000000..8987332
--- /dev/null
@@ -0,0 +1,427 @@
+/*
+ * test_urcu.c
+ *
+ * Userspace RCU library - test program
+ *
+ * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <pthread.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <assert.h>
+#include <sys/syscall.h>
+#include <sched.h>
+
+#include <urcu/arch.h>
+
+/* hardcoded number of CPUs */
+#define NR_CPUS 16384
+
+#if defined(_syscall0)
+_syscall0(pid_t, gettid)
+#elif defined(__NR_gettid)
+static inline pid_t gettid(void)
+{
+       return syscall(__NR_gettid);
+}
+#else
+#warning "use pid as tid"
+static inline pid_t gettid(void)
+{
+       return getpid();
+}
+#endif
+
+#ifndef DYNAMIC_LINK_TEST
+#define _LGPL_SOURCE
+#else
+#define debug_yield_read()
+#endif
+#include <urcu-bp.h>
+
+struct test_array {
+       int a;
+};
+
+static volatile int test_go, test_stop;
+
+static unsigned long wdelay;
+
+static struct test_array *test_rcu_pointer;
+
+static unsigned long duration;
+
+/* read-side C.S. duration, in loops */
+static unsigned long rduration;
+
+static inline void loop_sleep(unsigned long l)
+{
+       while(l-- != 0)
+               cpu_relax();
+}
+
+static int verbose_mode;
+
+#define printf_verbose(fmt, args...)           \
+       do {                                    \
+               if (verbose_mode)               \
+                       printf(fmt, args);      \
+       } while (0)
+
+static unsigned int cpu_affinities[NR_CPUS];
+static unsigned int next_aff = 0;
+static int use_affinity = 0;
+
+pthread_mutex_t affinity_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+static void set_affinity(void)
+{
+       cpu_set_t mask;
+       int cpu;
+       int ret;
+
+       if (!use_affinity)
+               return;
+
+       ret = pthread_mutex_lock(&affinity_mutex);
+       if (ret) {
+               perror("Error in pthread mutex lock");
+               exit(-1);
+       }
+       cpu = cpu_affinities[next_aff++];
+       ret = pthread_mutex_unlock(&affinity_mutex);
+       if (ret) {
+               perror("Error in pthread mutex unlock");
+               exit(-1);
+       }
+       CPU_ZERO(&mask);
+       CPU_SET(cpu, &mask);
+       sched_setaffinity(0, sizeof(mask), &mask);
+}
+
+/*
+ * returns 0 if test should end.
+ */
+static int test_duration_write(void)
+{
+       return !test_stop;
+}
+
+static int test_duration_read(void)
+{
+       return !test_stop;
+}
+
+static unsigned long long __thread nr_writes;
+static unsigned long long __thread nr_reads;
+
+static unsigned int nr_readers;
+static unsigned int nr_writers;
+
+pthread_mutex_t rcu_copy_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+void rcu_copy_mutex_lock(void)
+{
+       int ret;
+       ret = pthread_mutex_lock(&rcu_copy_mutex);
+       if (ret) {
+               perror("Error in pthread mutex lock");
+               exit(-1);
+       }
+}
+
+void rcu_copy_mutex_unlock(void)
+{
+       int ret;
+
+       ret = pthread_mutex_unlock(&rcu_copy_mutex);
+       if (ret) {
+               perror("Error in pthread mutex unlock");
+               exit(-1);
+       }
+}
+
+/*
+ * malloc/free are reusing memory areas too quickly, which does not let us
+ * test races appropriately. Use a large circular array for allocations.
+ * ARRAY_SIZE is larger than nr_writers, which insures we never run over our tail.
+ */
+#define ARRAY_SIZE (1048576 * nr_writers)
+#define ARRAY_POISON 0xDEADBEEF
+static int array_index;
+static struct test_array *test_array;
+
+static struct test_array *test_array_alloc(void)
+{
+       struct test_array *ret;
+       int index;
+
+       rcu_copy_mutex_lock();
+       index = array_index % ARRAY_SIZE;
+       assert(test_array[index].a == ARRAY_POISON ||
+               test_array[index].a == 0);
+       ret = &test_array[index];
+       array_index++;
+       if (array_index == ARRAY_SIZE)
+               array_index = 0;
+       rcu_copy_mutex_unlock();
+       return ret;
+}
+
+static void test_array_free(struct test_array *ptr)
+{
+       if (!ptr)
+               return;
+       rcu_copy_mutex_lock();
+       ptr->a = ARRAY_POISON;
+       rcu_copy_mutex_unlock();
+}
+
+void *thr_reader(void *_count)
+{
+       unsigned long long *count = _count;
+       struct test_array *local_ptr;
+
+       printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n",
+                       "reader", pthread_self(), (unsigned long)gettid());
+
+       set_affinity();
+
+       rcu_register_thread();
+
+       while (!test_go)
+       {
+       }
+       smp_mb();
+
+       for (;;) {
+               rcu_read_lock();
+               local_ptr = rcu_dereference(test_rcu_pointer);
+               debug_yield_read();
+               if (local_ptr)
+                       assert(local_ptr->a == 8);
+               if (unlikely(rduration))
+                       loop_sleep(rduration);
+               rcu_read_unlock();
+               nr_reads++;
+               if (unlikely(!test_duration_read()))
+                       break;
+       }
+
+       rcu_unregister_thread();
+
+       *count = nr_reads;
+       printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
+                       "reader", pthread_self(), (unsigned long)gettid());
+       return ((void*)1);
+
+}
+
+void *thr_writer(void *_count)
+{
+       unsigned long long *count = _count;
+       struct test_array *new, *old;
+
+       printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n",
+                       "writer", pthread_self(), (unsigned long)gettid());
+
+       set_affinity();
+
+       while (!test_go)
+       {
+       }
+       smp_mb();
+
+       for (;;) {
+               new = test_array_alloc();
+               new->a = 8;
+               old = rcu_publish_content(&test_rcu_pointer, new);
+               if (old)
+                       old->a = 0;
+               test_array_free(old);
+               nr_writes++;
+               if (unlikely(!test_duration_write()))
+                       break;
+               if (unlikely(wdelay))
+                       loop_sleep(wdelay);
+       }
+
+       printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
+                       "writer", pthread_self(), (unsigned long)gettid());
+       *count = nr_writes;
+       return ((void*)2);
+}
+
+void show_usage(int argc, char **argv)
+{
+       printf("Usage : %s nr_readers nr_writers duration (s)", argv[0]);
+#ifdef DEBUG_YIELD
+       printf(" [-r] [-w] (yield reader and/or writer)");
+#endif
+       printf(" [-d delay] (writer period (us))");
+       printf(" [-c duration] (reader C.S. duration (in loops))");
+       printf(" [-v] (verbose output)");
+       printf(" [-a cpu#] [-a cpu#]... (affinity)");
+       printf("\n");
+}
+
+int main(int argc, char **argv)
+{
+       int err;
+       pthread_t *tid_reader, *tid_writer;
+       void *tret;
+       unsigned long long *count_reader, *count_writer;
+       unsigned long long tot_reads = 0, tot_writes = 0;
+       int i, a;
+
+       if (argc < 4) {
+               show_usage(argc, argv);
+               return -1;
+       }
+
+       err = sscanf(argv[1], "%u", &nr_readers);
+       if (err != 1) {
+               show_usage(argc, argv);
+               return -1;
+       }
+
+       err = sscanf(argv[2], "%u", &nr_writers);
+       if (err != 1) {
+               show_usage(argc, argv);
+               return -1;
+       }
+       
+       err = sscanf(argv[3], "%lu", &duration);
+       if (err != 1) {
+               show_usage(argc, argv);
+               return -1;
+       }
+
+       for (i = 4; i < argc; i++) {
+               if (argv[i][0] != '-')
+                       continue;
+               switch (argv[i][1]) {
+#ifdef DEBUG_YIELD
+               case 'r':
+                       yield_active |= YIELD_READ;
+                       break;
+               case 'w':
+                       yield_active |= YIELD_WRITE;
+                       break;
+#endif
+               case 'a':
+                       if (argc < i + 2) {
+                               show_usage(argc, argv);
+                               return -1;
+                       }
+                       a = atoi(argv[++i]);
+                       cpu_affinities[next_aff++] = a;
+                       use_affinity = 1;
+                       printf_verbose("Adding CPU %d affinity\n", a);
+                       break;
+               case 'c':
+                       if (argc < i + 2) {
+                               show_usage(argc, argv);
+                               return -1;
+                       }
+                       rduration = atol(argv[++i]);
+                       break;
+               case 'd':
+                       if (argc < i + 2) {
+                               show_usage(argc, argv);
+                               return -1;
+                       }
+                       wdelay = atol(argv[++i]);
+                       break;
+               case 'v':
+                       verbose_mode = 1;
+                       break;
+               }
+       }
+
+       printf_verbose("running test for %lu seconds, %u readers, %u writers.\n",
+               duration, nr_readers, nr_writers);
+       printf_verbose("Writer delay : %lu loops.\n", wdelay);
+       printf_verbose("Reader duration : %lu loops.\n", rduration);
+       printf_verbose("thread %-6s, thread id : %lx, tid %lu\n",
+                       "main", pthread_self(), (unsigned long)gettid());
+
+       test_array = malloc(sizeof(*test_array) * ARRAY_SIZE);
+       tid_reader = malloc(sizeof(*tid_reader) * nr_readers);
+       tid_writer = malloc(sizeof(*tid_writer) * nr_writers);
+       count_reader = malloc(sizeof(*count_reader) * nr_readers);
+       count_writer = malloc(sizeof(*count_writer) * nr_writers);
+
+       next_aff = 0;
+
+       for (i = 0; i < nr_readers; i++) {
+               err = pthread_create(&tid_reader[i], NULL, thr_reader,
+                                    &count_reader[i]);
+               if (err != 0)
+                       exit(1);
+       }
+       for (i = 0; i < nr_writers; i++) {
+               err = pthread_create(&tid_writer[i], NULL, thr_writer,
+                                    &count_writer[i]);
+               if (err != 0)
+                       exit(1);
+       }
+
+       smp_mb();
+
+       test_go = 1;
+
+       sleep(duration);
+
+       test_stop = 1;
+
+       for (i = 0; i < nr_readers; i++) {
+               err = pthread_join(tid_reader[i], &tret);
+               if (err != 0)
+                       exit(1);
+               tot_reads += count_reader[i];
+       }
+       for (i = 0; i < nr_writers; i++) {
+               err = pthread_join(tid_writer[i], &tret);
+               if (err != 0)
+                       exit(1);
+               tot_writes += count_writer[i];
+       }
+       
+       printf_verbose("total number of reads : %llu, writes %llu\n", tot_reads,
+              tot_writes);
+       printf("SUMMARY %-25s testdur %4lu nr_readers %3u rdur %6lu "
+               "nr_writers %3u "
+               "wdelay %6lu nr_reads %12llu nr_writes %12llu nr_ops %12llu\n",
+               argv[0], duration, nr_readers, rduration,
+               nr_writers, wdelay, tot_reads, tot_writes,
+               tot_reads + tot_writes);
+       test_array_free(test_rcu_pointer);
+       free(test_array);
+       free(tid_reader);
+       free(tid_writer);
+       free(count_reader);
+       free(count_writer);
+       return 0;
+}
diff --git a/urcu-bp-static.h b/urcu-bp-static.h
new file mode 100644 (file)
index 0000000..428c53f
--- /dev/null
@@ -0,0 +1,197 @@
+#ifndef _URCU_BP_STATIC_H
+#define _URCU_BP_STATIC_H
+
+/*
+ * urcu-bp-static.h
+ *
+ * Userspace RCU header.
+ *
+ * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu.h for linking
+ * dynamically with the userspace rcu library.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+ * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * IBM's contributions to this file may be relicensed under LGPLv2 or later.
+ */
+
+#include <stdlib.h>
+#include <pthread.h>
+#include <syscall.h>
+#include <unistd.h>
+
+#include <urcu/compiler.h>
+#include <urcu/arch.h>
+#include <urcu/system.h>
+#include <urcu/arch_uatomic.h>
+#include <urcu/list.h>
+
+/*
+ * This code section can only be included in LGPL 2.1 compatible source code.
+ * See below for the function call wrappers which can be used in code meant to
+ * be only linked with the Userspace RCU library. This comes with a small
+ * performance degradation on the read-side due to the added function calls.
+ * This is required to permit relinking with newer versions of the library.
+ */
+
+/*
+ * Active attempts to check for reader Q.S. before calling sleep().
+ */
+#define RCU_QS_ACTIVE_ATTEMPTS 100
+
+#ifdef DEBUG_RCU
+#define rcu_assert(args...)    assert(args)
+#else
+#define rcu_assert(args...)
+#endif
+
+#ifdef DEBUG_YIELD
+#include <sched.h>
+#include <time.h>
+#include <pthread.h>
+#include <unistd.h>
+
+#define YIELD_READ     (1 << 0)
+#define YIELD_WRITE    (1 << 1)
+
+/*
+ * Updates without URCU_MB are much slower. Account this in
+ * the delay.
+ */
+/* maximum sleep delay, in us */
+#define MAX_SLEEP 50
+
+extern unsigned int yield_active;
+extern unsigned int __thread rand_yield;
+
+static inline void debug_yield_read(void)
+{
+       if (yield_active & YIELD_READ)
+               if (rand_r(&rand_yield) & 0x1)
+                       usleep(rand_r(&rand_yield) % MAX_SLEEP);
+}
+
+static inline void debug_yield_write(void)
+{
+       if (yield_active & YIELD_WRITE)
+               if (rand_r(&rand_yield) & 0x1)
+                       usleep(rand_r(&rand_yield) % MAX_SLEEP);
+}
+
+static inline void debug_yield_init(void)
+{
+       rand_yield = time(NULL) ^ pthread_self();
+}
+#else
+static inline void debug_yield_read(void)
+{
+}
+
+static inline void debug_yield_write(void)
+{
+}
+
+static inline void debug_yield_init(void)
+{
+
+}
+#endif
+
+/*
+ * The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a
+ * full 8-bits, 16-bits or 32-bits bitmask for the lower order bits.
+ */
+#define RCU_GP_COUNT           (1UL << 0)
+/* Use the amount of bits equal to half of the architecture long size */
+#define RCU_GP_CTR_BIT         (1UL << (sizeof(long) << 2))
+#define RCU_GP_CTR_NEST_MASK   (RCU_GP_CTR_BIT - 1)
+
+/*
+ * Used internally by _rcu_read_lock.
+ */
+extern void rcu_bp_register(void);
+
+/*
+ * Global quiescent period counter with low-order bits unused.
+ * Using a int rather than a char to eliminate false register dependencies
+ * causing stalls on some architectures.
+ */
+extern long urcu_gp_ctr;
+
+struct urcu_reader {
+       /* Data used by both reader and synchronize_rcu() */
+       long ctr;
+       /* Data used for registry */
+       struct list_head head __attribute__((aligned(CACHE_LINE_SIZE)));
+       pthread_t tid;
+       int alloc;      /* registry entry allocated */
+};
+
+/*
+ * Bulletproof version keeps a pointer to a registry not part of the TLS.
+ * Adds a pointer dereference on the read-side, but won't require to unregister
+ * the reader thread.
+ */
+extern struct urcu_reader __thread *urcu_reader;
+
+static inline int rcu_old_gp_ongoing(long *value)
+{
+       long v;
+
+       if (value == NULL)
+               return 0;
+       /*
+        * Make sure both tests below are done on the same version of *value
+        * to insure consistency.
+        */
+       v = LOAD_SHARED(*value);
+       return (v & RCU_GP_CTR_NEST_MASK) &&
+                ((v ^ urcu_gp_ctr) & RCU_GP_CTR_BIT);
+}
+
+static inline void _rcu_read_lock(void)
+{
+       long tmp;
+
+       /* Check if registered */
+       if (unlikely(!urcu_reader))
+               rcu_bp_register();
+
+       tmp = urcu_reader->ctr;
+       /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */
+       if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) {
+               _STORE_SHARED(urcu_reader->ctr, _LOAD_SHARED(urcu_gp_ctr));
+               /*
+                * Set active readers count for outermost nesting level before
+                * accessing the pointer.
+                */
+               smp_mb();
+       } else {
+               _STORE_SHARED(urcu_reader->ctr, tmp + RCU_GP_COUNT);
+       }
+}
+
+static inline void _rcu_read_unlock(void)
+{
+       /*
+        * Finish using rcu before decrementing the pointer.
+        */
+       smp_mb();
+       _STORE_SHARED(urcu_reader->ctr, urcu_reader->ctr - RCU_GP_COUNT);
+}
+
+#endif /* _URCU_BP_STATIC_H */
diff --git a/urcu-bp.c b/urcu-bp.c
new file mode 100644 (file)
index 0000000..0fdf1f4
--- /dev/null
+++ b/urcu-bp.c
@@ -0,0 +1,362 @@
+/*
+ * urcu-bp.c
+ *
+ * Userspace RCU library, "bulletproof" version.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+ * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * IBM's contributions to this file may be relicensed under LGPLv2 or later.
+ */
+
+#include <stdio.h>
+#include <pthread.h>
+#include <signal.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <poll.h>
+#include <unistd.h>
+#include <sys/mman.h>
+
+#include "urcu-bp-static.h"
+/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
+#include "urcu-bp.h"
+
+/* Sleep delay in us */
+#define RCU_SLEEP_DELAY                1000
+#define ARENA_INIT_ALLOC       16
+
+void __attribute__((destructor)) urcu_bp_exit(void);
+
+static pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+#ifdef DEBUG_YIELD
+unsigned int yield_active;
+unsigned int __thread rand_yield;
+#endif
+
+/*
+ * Global grace period counter.
+ * Contains the current RCU_GP_CTR_BIT.
+ * Also has a RCU_GP_COUNT of 1, to accelerate the reader fast path.
+ * Written to only by writer with mutex taken. Read by both writer and readers.
+ */
+long urcu_gp_ctr = RCU_GP_COUNT;
+
+/*
+ * Pointer to registry elements. Written to only by each individual reader. Read
+ * by both the reader and the writers.
+ */
+struct urcu_reader __thread *urcu_reader;
+
+static LIST_HEAD(registry);
+
+struct registry_arena {
+       void *p;
+       size_t len;
+       size_t used;
+};
+
+static struct registry_arena registry_arena;
+
+static void rcu_gc_registry(void);
+
+static void internal_urcu_lock(void)
+{
+       int ret;
+
+#ifndef DISTRUST_SIGNALS_EXTREME
+       ret = pthread_mutex_lock(&urcu_mutex);
+       if (ret) {
+               perror("Error in pthread mutex lock");
+               exit(-1);
+       }
+#else /* #ifndef DISTRUST_SIGNALS_EXTREME */
+       while ((ret = pthread_mutex_trylock(&urcu_mutex)) != 0) {
+               if (ret != EBUSY && ret != EINTR) {
+                       printf("ret = %d, errno = %d\n", ret, errno);
+                       perror("Error in pthread mutex lock");
+                       exit(-1);
+               }
+               if (urcu_reader.need_mb) {
+                       smp_mb();
+                       urcu_reader.need_mb = 0;
+                       smp_mb();
+               }
+               poll(NULL,0,10);
+       }
+#endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
+}
+
+static void internal_urcu_unlock(void)
+{
+       int ret;
+
+       ret = pthread_mutex_unlock(&urcu_mutex);
+       if (ret) {
+               perror("Error in pthread mutex unlock");
+               exit(-1);
+       }
+}
+
+/*
+ * called with urcu_mutex held.
+ */
+static void switch_next_urcu_qparity(void)
+{
+       STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr ^ RCU_GP_CTR_BIT);
+}
+
+void wait_for_quiescent_state(void)
+{
+       LIST_HEAD(qsreaders);
+       int wait_loops = 0;
+       struct urcu_reader *index, *tmp;
+
+       if (list_empty(&registry))
+               return;
+       /*
+        * Wait for each thread urcu_reader.ctr count to become 0.
+        */
+       for (;;) {
+               wait_loops++;
+               list_for_each_entry_safe(index, tmp, &registry, head) {
+                       if (!rcu_old_gp_ongoing(&index->ctr))
+                               list_move(&index->head, &qsreaders);
+               }
+
+               if (list_empty(&registry)) {
+                       break;
+               } else {
+                       if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS)
+                               usleep(RCU_SLEEP_DELAY);
+                       else
+                               cpu_relax();
+               }
+       }
+       /* put back the reader list in the registry */
+       list_splice(&qsreaders, &registry);
+}
+
+void synchronize_rcu(void)
+{
+       sigset_t newmask, oldmask;
+       int ret;
+
+       ret = sigemptyset(&newmask);
+       assert(!ret);
+       ret = pthread_sigmask(SIG_SETMASK, &newmask, &oldmask);
+       assert(!ret);
+
+       internal_urcu_lock();
+
+       /* Remove old registry elements */
+       rcu_gc_registry();
+
+       /* All threads should read qparity before accessing data structure
+        * where new ptr points to. Must be done within internal_urcu_lock
+        * because it iterates on reader threads.*/
+       /* Write new ptr before changing the qparity */
+       smp_mb();
+
+       switch_next_urcu_qparity();     /* 0 -> 1 */
+
+       /*
+        * Must commit qparity update to memory before waiting for parity
+        * 0 quiescent state. Failure to do so could result in the writer
+        * waiting forever while new readers are always accessing data (no
+        * progress).
+        * Ensured by STORE_SHARED and LOAD_SHARED.
+        */
+
+       /*
+        * Adding a smp_mb() which is _not_ formally required, but makes the
+        * model easier to understand. It does not have a big performance impact
+        * anyway, given this is the write-side.
+        */
+       smp_mb();
+
+       /*
+        * Wait for previous parity to be empty of readers.
+        */
+       wait_for_quiescent_state();     /* Wait readers in parity 0 */
+
+       /*
+        * Must finish waiting for quiescent state for parity 0 before
+        * committing qparity update to memory. Failure to do so could result in
+        * the writer waiting forever while new readers are always accessing
+        * data (no progress).
+        * Ensured by STORE_SHARED and LOAD_SHARED.
+        */
+
+       /*
+        * Adding a smp_mb() which is _not_ formally required, but makes the
+        * model easier to understand. It does not have a big performance impact
+        * anyway, given this is the write-side.
+        */
+       smp_mb();
+
+       switch_next_urcu_qparity();     /* 1 -> 0 */
+
+       /*
+        * Must commit qparity update to memory before waiting for parity
+        * 1 quiescent state. Failure to do so could result in the writer
+        * waiting forever while new readers are always accessing data (no
+        * progress).
+        * Ensured by STORE_SHARED and LOAD_SHARED.
+        */
+
+       /*
+        * Adding a smp_mb() which is _not_ formally required, but makes the
+        * model easier to understand. It does not have a big performance impact
+        * anyway, given this is the write-side.
+        */
+       smp_mb();
+
+       /*
+        * Wait for previous parity to be empty of readers.
+        */
+       wait_for_quiescent_state();     /* Wait readers in parity 1 */
+
+       /* Finish waiting for reader threads before letting the old ptr being
+        * freed. Must be done within internal_urcu_lock because it iterates on
+        * reader threads. */
+       smp_mb();
+
+       internal_urcu_unlock();
+       ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
+       assert(!ret);
+}
+
+/*
+ * library wrappers to be used by non-LGPL compatible source code.
+ */
+
+void rcu_read_lock(void)
+{
+       _rcu_read_lock();
+}
+
+void rcu_read_unlock(void)
+{
+       _rcu_read_unlock();
+}
+
+/*
+ * only grow for now.
+ */
+static void resize_arena(struct registry_arena *arena, size_t len)
+{
+       void *new_arena;
+
+       new_arena = mmap(arena->p, len,
+                        PROT_READ | PROT_WRITE,
+                        MAP_ANONYMOUS | MAP_PRIVATE,
+                        -1, 0);
+       /*
+        * re-used the same region ?
+        */
+       if (new_arena == arena->p)
+               return;
+
+       memcpy(new_arena, arena->p, arena->len);
+       bzero(new_arena + arena->len, len - arena->len);
+       arena->p = new_arena;
+}
+
+/* Called with signals off and mutex locked */
+static void add_thread(void)
+{
+       struct urcu_reader *urcu_reader_reg;
+
+       if (registry_arena.len
+           < registry_arena.used + sizeof(struct urcu_reader))
+               resize_arena(&registry_arena,
+               max(registry_arena.len << 1, ARENA_INIT_ALLOC));
+       /*
+        * Find a free spot.
+        */
+       for (urcu_reader_reg = registry_arena.p;
+            (void *)urcu_reader_reg < registry_arena.p + registry_arena.len;
+            urcu_reader_reg++) {
+               if (!urcu_reader_reg->alloc)
+                       break;
+       }
+       urcu_reader_reg->alloc = 1;
+       registry_arena.used += sizeof(struct urcu_reader);
+
+       /* Add to registry */
+       urcu_reader_reg->tid = pthread_self();
+       assert(urcu_reader_reg->ctr == 0);
+       list_add(&urcu_reader_reg->head, &registry);
+       urcu_reader = urcu_reader_reg;
+}
+
+/* Called with signals off and mutex locked */
+static void rcu_gc_registry(void)
+{
+       struct urcu_reader *urcu_reader_reg;
+       pthread_t tid;
+       int ret;
+
+       for (urcu_reader_reg = registry_arena.p;
+            (void *)urcu_reader_reg < registry_arena.p + registry_arena.len;
+            urcu_reader_reg++) {
+               if (!urcu_reader_reg->alloc)
+                       continue;
+               tid = urcu_reader_reg->tid;
+               ret = pthread_kill(tid, 0);
+               assert(ret != EINVAL);
+               if (ret == ESRCH) {
+                       list_del(&urcu_reader_reg->head);
+                       urcu_reader_reg->alloc = 0;
+                       registry_arena.used -= sizeof(struct urcu_reader);
+               }
+       }
+}
+
+/* Disable signals, take mutex, add to registry */
+void rcu_bp_register(void)
+{
+       sigset_t newmask, oldmask;
+       int ret;
+
+       ret = sigemptyset(&newmask);
+       assert(!ret);
+       ret = pthread_sigmask(SIG_SETMASK, &newmask, &oldmask);
+       assert(!ret);
+
+       /*
+        * Check if a signal concurrently registered our thread since
+        * the check in rcu_read_lock(). */
+       if (urcu_reader)
+               goto end;
+
+       internal_urcu_lock();
+       add_thread();
+       internal_urcu_unlock();
+end:
+       ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
+       assert(!ret);
+}
+
+void urcu_bp_exit()
+{
+       munmap(registry_arena.p, registry_arena.len);
+}
diff --git a/urcu-bp.h b/urcu-bp.h
new file mode 100644 (file)
index 0000000..d42a2cb
--- /dev/null
+++ b/urcu-bp.h
@@ -0,0 +1,100 @@
+#ifndef _URCU_BP_H
+#define _URCU_BP_H
+
+/*
+ * urcu-bp.h
+ *
+ * Userspace RCU header, "bulletproof" version.
+ *
+ * Slower RCU read-side adapted for tracing library. Does not require thread
+ * registration nor unregistration. Also signal-safe.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+ * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
+ *
+ * LGPL-compatible code should include this header with :
+ *
+ * #define _LGPL_SOURCE
+ * #include <urcu.h>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * IBM's contributions to this file may be relicensed under LGPLv2 or later.
+ */
+
+#include <stdlib.h>
+#include <pthread.h>
+
+/*
+ * See urcu-pointer.h and urcu-pointer-static.h for pointer publication headers.
+ */
+#include <urcu-pointer.h>
+
+/*
+ * Important !
+ *
+ * Each thread containing read-side critical sections must be registered
+ * with rcu_register_thread() before calling rcu_read_lock().
+ * rcu_unregister_thread() should be called before the thread exits.
+ */
+
+#ifdef _LGPL_SOURCE
+
+#include <urcu-bp-static.h>
+
+/*
+ * Mappings for static use of the userspace RCU library.
+ * Should only be used in LGPL-compatible code.
+ */
+
+/*
+ * rcu_read_lock()
+ * rcu_read_unlock()
+ *
+ * Mark the beginning and end of a read-side critical section.
+ */
+#define rcu_read_lock()                _rcu_read_lock()
+#define rcu_read_unlock()      _rcu_read_unlock()
+
+#else /* !_LGPL_SOURCE */
+
+/*
+ * library wrappers to be used by non-LGPL compatible source code.
+ * See LGPL-only urcu-pointer-static.h for documentation.
+ */
+
+extern void rcu_read_lock(void);
+extern void rcu_read_unlock(void);
+
+#endif /* !_LGPL_SOURCE */
+
+extern void synchronize_rcu(void);
+
+/*
+ * In the bulletproof version, the following functions are no-ops.
+ */
+static inline void rcu_register_thread(void)
+{
+}
+
+static inline void rcu_unregister_thread(void)
+{
+}
+
+static inline void urcu_init(void)
+{
+}
+
+#endif /* _URCU_BP_H */
index 4dd89ea212285e664e2cd3c35e796a9af1d73d1e..975e1eadc1e9cfddd8c21d34f129921808ef7863 100644 (file)
  */
 #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&x)
 
+#ifndef max
+#define max(a,b) ((a)>(b)?(a):(b))
+#endif
+
+#ifndef min
+#define min(a,b) ((a)<(b)?(a):(b))
+#endif
+
 #endif /* _URCU_COMPILER_H */
This page took 0.041109 seconds and 4 git commands to generate.