From: Mathieu Desnoyers Date: Mon, 9 Feb 2009 06:31:05 +0000 (-0500) Subject: Add rcutorture X-Git-Tag: v0.1~310 X-Git-Url: https://git.lttng.org./?a=commitdiff_plain;h=8a953620d565793dcb141e673fa907cdae0a9363;p=urcu.git Add rcutorture Add a modified version of rcutorture.h and api.h. Used bu urcutorture. Signed-off-by: Mathieu Desnoyers --- diff --git a/api.h b/api.h new file mode 100644 index 0000000..230c11d --- /dev/null +++ b/api.h @@ -0,0 +1,1430 @@ +/* MECHANICALLY GENERATED, DO NOT EDIT!!! */ + +/* + * common.h: Common Linux kernel-isms. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; but version 2 of the License only due + * to code included from the Linux kernel. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (c) 2006 Paul E. McKenney, IBM. + * + * Much code taken from the Linux kernel. For such code, the option + * to redistribute under later versions of GPL might not be available. + */ + +#ifndef __always_inline +#define __always_inline inline +#endif + +#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) +#define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1) + +#ifdef __ASSEMBLY__ +# define stringify_in_c(...) __VA_ARGS__ +# define ASM_CONST(x) x +#else +/* This version of stringify will deal with commas... */ +# define __stringify_in_c(...) #__VA_ARGS__ +# define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " " +# define __ASM_CONST(x) x##UL +# define ASM_CONST(x) __ASM_CONST(x) +#endif + + +/* + * arch-i386.h: Expose x86 atomic instructions. 80486 and better only. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, but version 2 only due to inclusion + * of Linux-kernel code. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (c) 2006 Paul E. McKenney, IBM. + * + * Much code taken from the Linux kernel. For such code, the option + * to redistribute under later versions of GPL might not be available. + */ + +/* + * Machine parameters. + */ + +#define CONFIG_SMP + +#define CACHE_LINE_SIZE 64 +#define ____cacheline_internodealigned_in_smp \ + __attribute__((__aligned__(1 << 6))) + +#define LOCK_PREFIX "lock ; " + +/* + * Atomic data structure, initialization, and access. + */ + +typedef struct { volatile int counter; } atomic_t; + +#define ATOMIC_INIT(i) { (i) } + +#define atomic_read(v) ((v)->counter) +#define atomic_set(v, i) (((v)->counter) = (i)) + +/* + * Atomic operations. + */ + +/** + * atomic_add - add integer to atomic variable + * @i: integer value to add + * @v: pointer of type atomic_t + * + * Atomically adds @i to @v. + */ +static __inline__ void atomic_add(int i, atomic_t *v) +{ + __asm__ __volatile__( + LOCK_PREFIX "addl %1,%0" + :"+m" (v->counter) + :"ir" (i)); +} + +/** + * atomic_sub - subtract the atomic variable + * @i: integer value to subtract + * @v: pointer of type atomic_t + * + * Atomically subtracts @i from @v. + */ +static __inline__ void atomic_sub(int i, atomic_t *v) +{ + __asm__ __volatile__( + LOCK_PREFIX "subl %1,%0" + :"+m" (v->counter) + :"ir" (i)); +} + +/** + * atomic_sub_and_test - subtract value from variable and test result + * @i: integer value to subtract + * @v: pointer of type atomic_t + * + * Atomically subtracts @i from @v and returns + * true if the result is zero, or false for all + * other cases. + */ +static __inline__ int atomic_sub_and_test(int i, atomic_t *v) +{ + unsigned char c; + + __asm__ __volatile__( + LOCK_PREFIX "subl %2,%0; sete %1" + :"+m" (v->counter), "=qm" (c) + :"ir" (i) : "memory"); + return c; +} + +/** + * atomic_inc - increment atomic variable + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1. + */ +static __inline__ void __atomic_inc(atomic_t *v) +{ + __asm__ __volatile__( + LOCK_PREFIX "incl %0" + :"+m" (v->counter)); +} + +/** + * atomic_dec - decrement atomic variable + * @v: pointer of type atomic_t + * + * Atomically decrements @v by 1. + */ +static __inline__ void atomic_dec(atomic_t *v) +{ + __asm__ __volatile__( + LOCK_PREFIX "decl %0" + :"+m" (v->counter)); +} + +/** + * atomic_dec_and_test - decrement and test + * @v: pointer of type atomic_t + * + * Atomically decrements @v by 1 and + * returns true if the result is 0, or false for all other + * cases. + */ +static __inline__ int atomic_dec_and_test(atomic_t *v) +{ + unsigned char c; + + __asm__ __volatile__( + LOCK_PREFIX "decl %0; sete %1" + :"+m" (v->counter), "=qm" (c) + : : "memory"); + return c != 0; +} + +/** + * atomic_inc_and_test - increment and test + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +static __inline__ int atomic_inc_and_test(atomic_t *v) +{ + unsigned char c; + + __asm__ __volatile__( + LOCK_PREFIX "incl %0; sete %1" + :"+m" (v->counter), "=qm" (c) + : : "memory"); + return c != 0; +} + +/** + * atomic_add_negative - add and test if negative + * @v: pointer of type atomic_t + * @i: integer value to add + * + * Atomically adds @i to @v and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. + */ +static __inline__ int atomic_add_negative(int i, atomic_t *v) +{ + unsigned char c; + + __asm__ __volatile__( + LOCK_PREFIX "addl %2,%0; sets %1" + :"+m" (v->counter), "=qm" (c) + :"ir" (i) : "memory"); + return c; +} + +/** + * atomic_add_return - add and return + * @v: pointer of type atomic_t + * @i: integer value to add + * + * Atomically adds @i to @v and returns @i + @v + */ +static __inline__ int atomic_add_return(int i, atomic_t *v) +{ + int __i; + + __i = i; + __asm__ __volatile__( + LOCK_PREFIX "xaddl %0, %1;" + :"=r"(i) + :"m"(v->counter), "0"(i)); + return i + __i; +} + +static __inline__ int atomic_sub_return(int i, atomic_t *v) +{ + return atomic_add_return(-i,v); +} + +static inline unsigned int +cmpxchg(volatile long *ptr, long oldval, long newval) +{ + unsigned long retval; + + asm("# cmpxchg\n" + "lock; cmpxchgl %4,(%2)\n" + "# end atomic_cmpxchg4" + : "=a" (retval), "=m" (*ptr) + : "r" (ptr), "0" (oldval), "r" (newval), "m" (*ptr) + : "cc"); + return (retval); +} + +#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) +#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) + +/** + * atomic_add_unless - add unless the number is a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns non-zero if @v was not @u, and zero otherwise. + */ +#define atomic_add_unless(v, a, u) \ +({ \ + int c, old; \ + c = atomic_read(v); \ + for (;;) { \ + if (unlikely(c == (u))) \ + break; \ + old = atomic_cmpxchg((v), c, c + (a)); \ + if (likely(old == c)) \ + break; \ + c = old; \ + } \ + c != (u); \ +}) +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + +#define atomic_inc_return(v) (atomic_add_return(1,v)) +#define atomic_dec_return(v) (atomic_sub_return(1,v)) + +/* These are x86-specific, used by some header files */ +#define atomic_clear_mask(mask, addr) \ +__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \ +: : "r" (~(mask)),"m" (*addr) : "memory") + +#define atomic_set_mask(mask, addr) \ +__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \ +: : "r" (mask),"m" (*(addr)) : "memory") + +/* Atomic operations are already serializing on x86 */ +#define smp_mb__before_atomic_dec() barrier() +#define smp_mb__after_atomic_dec() barrier() +#define smp_mb__before_atomic_inc() barrier() +#define smp_mb__after_atomic_inc() barrier() + +#define smp_mb() \ +__asm__ __volatile__("mfence" : : : "memory") +/* __asm__ __volatile__("lock; addl $0,0(%%esp)" : : : "memory") */ + + +/* + * Generate 64-bit timestamp. + */ + +static unsigned long long get_timestamp(void) +{ + unsigned int __a,__d; + + __asm__ __volatile__("rdtsc" : "=a" (__a), "=d" (__d)); + return ((long long)__a) | (((long long)__d)<<32); +} + +/* + * api_pthreads.h: API mapping to pthreads environment. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. However, please note that much + * of the code in this file derives from the Linux kernel, and that such + * code may not be available except under GPLv2. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (c) 2006 Paul E. McKenney, IBM. + */ + +#include +#include +#include +#include +#include +#define __USE_GNU +#include +#include +#include +/* #include "atomic.h" */ + +/* + * Compiler magic. + */ +#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) +#define container_of(ptr, type, member) ({ \ + const typeof( ((type *)0)->member ) *__mptr = (ptr); \ + (type *)( (char *)__mptr - offsetof(type,member) );}) +#define barrier() __asm__ __volatile__("": : :"memory") + +/* + * Default machine parameters. + */ + +#ifndef CACHE_LINE_SIZE +#define CACHE_LINE_SIZE 128 +#endif /* #ifndef CACHE_LINE_SIZE */ + +/* + * Exclusive locking primitives. + */ + +typedef pthread_mutex_t spinlock_t; + +#define DEFINE_SPINLOCK(lock) spinlock_t lock = PTHREAD_MUTEX_INITIALIZER; +#define __SPIN_LOCK_UNLOCKED(lockp) PTHREAD_MUTEX_INITIALIZER + +static void spin_lock_init(spinlock_t *sp) +{ + if (pthread_mutex_init(sp, NULL) != 0) { + perror("spin_lock_init:pthread_mutex_init"); + exit(-1); + } +} + +static void spin_lock(spinlock_t *sp) +{ + if (pthread_mutex_lock(sp) != 0) { + perror("spin_lock:pthread_mutex_lock"); + exit(-1); + } +} + +static int spin_trylock(spinlock_t *sp) +{ + int retval; + + if ((retval = pthread_mutex_trylock(sp)) == 0) + return 1; + if (retval == EBUSY) + return 0; + perror("spin_trylock:pthread_mutex_trylock"); + exit(-1); +} + +static void spin_unlock(spinlock_t *sp) +{ + if (pthread_mutex_unlock(sp) != 0) { + perror("spin_unlock:pthread_mutex_unlock"); + exit(-1); + } +} + +#define spin_lock_irqsave(l, f) do { f = 1; spin_lock(l); } while (0) +#define spin_unlock_irqrestore(l, f) do { f = 0; spin_unlock(l); } while (0) + +#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) +#define unlikely(x) x +#define likely(x) x +#define prefetch(x) x + +/* + * Thread creation/destruction primitives. + */ + +typedef pthread_t thread_id_t; + +#define NR_THREADS 128 + +#define __THREAD_ID_MAP_EMPTY 0 +#define __THREAD_ID_MAP_WAITING 1 +thread_id_t __thread_id_map[NR_THREADS]; +spinlock_t __thread_id_map_mutex; + +#define for_each_thread(t) \ + for (t = 0; t < NR_THREADS; t++) + +#define for_each_running_thread(t) \ + for (t = 0; t < NR_THREADS; t++) \ + if ((__thread_id_map[t] != __THREAD_ID_MAP_EMPTY) && \ + (__thread_id_map[t] != __THREAD_ID_MAP_WAITING)) + +pthread_key_t thread_id_key; + +static int __smp_thread_id(void) +{ + int i; + thread_id_t tid = pthread_self(); + + for (i = 0; i < NR_THREADS; i++) { + if (__thread_id_map[i] == tid) { + long v = i + 1; /* must be non-NULL. */ + + if (pthread_setspecific(thread_id_key, (void *)v) != 0) { + perror("pthread_setspecific"); + exit(-1); + } + return i; + } + } + spin_lock(&__thread_id_map_mutex); + for (i = 0; i < NR_THREADS; i++) { + if (__thread_id_map[i] == tid) + spin_unlock(&__thread_id_map_mutex); + return i; + } + spin_unlock(&__thread_id_map_mutex); + fprintf(stderr, "smp_thread_id: Rogue thread, id: %d(%#x)\n", tid, tid); + exit(-1); +} + +static int smp_thread_id(void) +{ + void *id; + + id = pthread_getspecific(thread_id_key); + if (id == NULL) + return __smp_thread_id(); + return (long)(id - 1); +} + +static thread_id_t create_thread(void *(*func)(void *), void *arg) +{ + thread_id_t tid; + int i; + + spin_lock(&__thread_id_map_mutex); + for (i = 0; i < NR_THREADS; i++) { + if (__thread_id_map[i] == __THREAD_ID_MAP_EMPTY) + break; + } + if (i >= NR_THREADS) { + spin_unlock(&__thread_id_map_mutex); + fprintf(stderr, "Thread limit of %d exceeded!\n", NR_THREADS); + exit(-1); + } + __thread_id_map[i] = __THREAD_ID_MAP_WAITING; + spin_unlock(&__thread_id_map_mutex); + if (pthread_create(&tid, NULL, func, arg) != 0) { + perror("create_thread:pthread_create"); + exit(-1); + } + __thread_id_map[i] = tid; + return tid; +} + +static void *wait_thread(thread_id_t tid) +{ + int i; + void *vp; + + for (i = 0; i < NR_THREADS; i++) { + if (__thread_id_map[i] == tid) + break; + } + if (i >= NR_THREADS){ + fprintf(stderr, "wait_thread: bad tid = %d(%#x)\n", tid, tid); + exit(-1); + } + if (pthread_join(tid, &vp) != 0) { + perror("wait_thread:pthread_join"); + exit(-1); + } + __thread_id_map[i] = __THREAD_ID_MAP_EMPTY; + return vp; +} + +static void wait_all_threads(void) +{ + int i; + thread_id_t tid; + + for (i = 1; i < NR_THREADS; i++) { + tid = __thread_id_map[i]; + if (tid != __THREAD_ID_MAP_EMPTY && + tid != __THREAD_ID_MAP_WAITING) + (void)wait_thread(tid); + } +} + +static void run_on(int cpu) +{ + cpu_set_t mask; + + CPU_ZERO(&mask); + CPU_SET(cpu, &mask); + sched_setaffinity(0, sizeof(mask), &mask); +} + +/* + * timekeeping -- very crude -- should use MONOTONIC... + */ + +long long get_microseconds(void) +{ + struct timeval tv; + + if (gettimeofday(&tv, NULL) != 0) + abort(); + return ((long long)tv.tv_sec) * 1000000LL + (long long)tv.tv_usec; +} + +/* + * Per-thread variables. + */ + +#define DEFINE_PER_THREAD(type, name) \ + struct { \ + __typeof__(type) v \ + __attribute__((__aligned__(CACHE_LINE_SIZE))); \ + } __per_thread_##name[NR_THREADS]; +#define DECLARE_PER_THREAD(type, name) extern DEFINE_PER_THREAD(type, name) + +#define per_thread(name, thread) __per_thread_##name[thread].v +#define __get_thread_var(name) per_thread(name, smp_thread_id()) + +#define init_per_thread(name, v) \ + do { \ + int __i_p_t_i; \ + for (__i_p_t_i = 0; __i_p_t_i < NR_THREADS; __i_p_t_i++) \ + per_thread(name, __i_p_t_i) = v; \ + } while (0) + +/* + * CPU traversal primitives. + */ + +#ifndef NR_CPUS +#define NR_CPUS 16 +#endif /* #ifndef NR_CPUS */ + +#define for_each_possible_cpu(cpu) \ + for (cpu = 0; cpu < NR_CPUS; cpu++) +#define for_each_online_cpu(cpu) \ + for (cpu = 0; cpu < NR_CPUS; cpu++) + +/* + * Per-CPU variables. + */ + +#define DEFINE_PER_CPU(type, name) \ + struct { \ + __typeof__(type) v \ + __attribute__((__aligned__(CACHE_LINE_SIZE))); \ + } __per_cpu_##name[NR_CPUS] +#define DECLARE_PER_CPU(type, name) extern DEFINE_PER_CPU(type, name) + +DEFINE_PER_THREAD(int, smp_processor_id); + +static int smp_processor_id(void) +{ + return __get_thread_var(smp_processor_id); +} + +static void set_smp_processor_id(int cpu) +{ + __get_thread_var(smp_processor_id) = cpu; +} + +#define per_cpu(name, thread) __per_cpu_##name[thread].v +#define __get_cpu_var(name) per_cpu(name, smp_processor_id()) + +#define init_per_cpu(name, v) \ + do { \ + int __i_p_c_i; \ + for (__i_p_c_i = 0; __i_p_c_i < NR_CPUS; __i_p_c_i++) \ + per_cpu(name, __i_p_c_i) = v; \ + } while (0) + +/* + * CPU state checking (crowbarred). + */ + +#define idle_cpu(cpu) 0 +#define in_softirq() 1 +#define hardirq_count() 0 +#define PREEMPT_SHIFT 0 +#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) +#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) +#define PREEMPT_BITS 8 +#define SOFTIRQ_BITS 8 + +/* + * CPU hotplug. + */ + +struct notifier_block { + int (*notifier_call)(struct notifier_block *, unsigned long, void *); + struct notifier_block *next; + int priority; +}; + +#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */ +#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */ +#define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */ +#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */ +#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */ +#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ +#define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task, + * not handling interrupts, soon dead */ +#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug + * lock is dropped */ + +/* Used for CPU hotplug events occuring while tasks are frozen due to a suspend + * operation in progress + */ +#define CPU_TASKS_FROZEN 0x0010 + +#define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN) +#define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN) +#define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN) +#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN) +#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN) +#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN) +#define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN) + +/* Hibernation and suspend events */ +#define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */ +#define PM_POST_HIBERNATION 0x0002 /* Hibernation finished */ +#define PM_SUSPEND_PREPARE 0x0003 /* Going to suspend the system */ +#define PM_POST_SUSPEND 0x0004 /* Suspend finished */ +#define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */ +#define PM_POST_RESTORE 0x0006 /* Restore failed */ + +#define NOTIFY_DONE 0x0000 /* Don't care */ +#define NOTIFY_OK 0x0001 /* Suits me */ +#define NOTIFY_STOP_MASK 0x8000 /* Don't call further */ +#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002) + /* Bad/Veto action */ +/* + * Clean way to return from the notifier and stop further calls. + */ +#define NOTIFY_STOP (NOTIFY_OK|NOTIFY_STOP_MASK) + +/* + * Bug checks. + */ + +#define BUG_ON(c) do { if (!(c)) abort(); } while (0) + +/* + * Initialization -- Must be called before calling any primitives. + */ + +static void smp_init(void) +{ + int i; + + spin_lock_init(&__thread_id_map_mutex); + __thread_id_map[0] = pthread_self(); + for (i = 1; i < NR_THREADS; i++) + __thread_id_map[i] = __THREAD_ID_MAP_EMPTY; + init_per_thread(smp_processor_id, 0); + if (pthread_key_create(&thread_id_key, NULL) != 0) { + perror("pthread_key_create"); + exit(-1); + } +} + +/* Taken from the Linux kernel source tree, so GPLv2-only!!! */ + +#ifndef _LINUX_LIST_H +#define _LINUX_LIST_H + +#define LIST_POISON1 ((void *) 0x00100100) +#define LIST_POISON2 ((void *) 0x00200200) + +#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) +#define container_of(ptr, type, member) ({ \ + const typeof( ((type *)0)->member ) *__mptr = (ptr); \ + (type *)( (char *)__mptr - offsetof(type,member) );}) + +/* + * Simple doubly linked list implementation. + * + * Some of the internal functions ("__xxx") are useful when + * manipulating whole lists rather than single entries, as + * sometimes we already know the next/prev entries and we can + * generate better code by using them directly rather than + * using the generic single-entry routines. + */ + +struct list_head { + struct list_head *next, *prev; +}; + +#define LIST_HEAD_INIT(name) { &(name), &(name) } + +#define LIST_HEAD(name) \ + struct list_head name = LIST_HEAD_INIT(name) + +static inline void INIT_LIST_HEAD(struct list_head *list) +{ + list->next = list; + list->prev = list; +} + +/* + * Insert a new entry between two known consecutive entries. + * + * This is only for internal list manipulation where we know + * the prev/next entries already! + */ +#ifndef CONFIG_DEBUG_LIST +static inline void __list_add(struct list_head *new, + struct list_head *prev, + struct list_head *next) +{ + next->prev = new; + new->next = next; + new->prev = prev; + prev->next = new; +} +#else +extern void __list_add(struct list_head *new, + struct list_head *prev, + struct list_head *next); +#endif + +/** + * list_add - add a new entry + * @new: new entry to be added + * @head: list head to add it after + * + * Insert a new entry after the specified head. + * This is good for implementing stacks. + */ +static inline void list_add(struct list_head *new, struct list_head *head) +{ + __list_add(new, head, head->next); +} + + +/** + * list_add_tail - add a new entry + * @new: new entry to be added + * @head: list head to add it before + * + * Insert a new entry before the specified head. + * This is useful for implementing queues. + */ +static inline void list_add_tail(struct list_head *new, struct list_head *head) +{ + __list_add(new, head->prev, head); +} + +/* + * Delete a list entry by making the prev/next entries + * point to each other. + * + * This is only for internal list manipulation where we know + * the prev/next entries already! + */ +static inline void __list_del(struct list_head * prev, struct list_head * next) +{ + next->prev = prev; + prev->next = next; +} + +/** + * list_del - deletes entry from list. + * @entry: the element to delete from the list. + * Note: list_empty() on entry does not return true after this, the entry is + * in an undefined state. + */ +#ifndef CONFIG_DEBUG_LIST +static inline void list_del(struct list_head *entry) +{ + __list_del(entry->prev, entry->next); + entry->next = LIST_POISON1; + entry->prev = LIST_POISON2; +} +#else +extern void list_del(struct list_head *entry); +#endif + +/** + * list_replace - replace old entry by new one + * @old : the element to be replaced + * @new : the new element to insert + * + * If @old was empty, it will be overwritten. + */ +static inline void list_replace(struct list_head *old, + struct list_head *new) +{ + new->next = old->next; + new->next->prev = new; + new->prev = old->prev; + new->prev->next = new; +} + +static inline void list_replace_init(struct list_head *old, + struct list_head *new) +{ + list_replace(old, new); + INIT_LIST_HEAD(old); +} + +/** + * list_del_init - deletes entry from list and reinitialize it. + * @entry: the element to delete from the list. + */ +static inline void list_del_init(struct list_head *entry) +{ + __list_del(entry->prev, entry->next); + INIT_LIST_HEAD(entry); +} + +/** + * list_move - delete from one list and add as another's head + * @list: the entry to move + * @head: the head that will precede our entry + */ +static inline void list_move(struct list_head *list, struct list_head *head) +{ + __list_del(list->prev, list->next); + list_add(list, head); +} + +/** + * list_move_tail - delete from one list and add as another's tail + * @list: the entry to move + * @head: the head that will follow our entry + */ +static inline void list_move_tail(struct list_head *list, + struct list_head *head) +{ + __list_del(list->prev, list->next); + list_add_tail(list, head); +} + +/** + * list_is_last - tests whether @list is the last entry in list @head + * @list: the entry to test + * @head: the head of the list + */ +static inline int list_is_last(const struct list_head *list, + const struct list_head *head) +{ + return list->next == head; +} + +/** + * list_empty - tests whether a list is empty + * @head: the list to test. + */ +static inline int list_empty(const struct list_head *head) +{ + return head->next == head; +} + +/** + * list_empty_careful - tests whether a list is empty and not being modified + * @head: the list to test + * + * Description: + * tests whether a list is empty _and_ checks that no other CPU might be + * in the process of modifying either member (next or prev) + * + * NOTE: using list_empty_careful() without synchronization + * can only be safe if the only activity that can happen + * to the list entry is list_del_init(). Eg. it cannot be used + * if another CPU could re-list_add() it. + */ +static inline int list_empty_careful(const struct list_head *head) +{ + struct list_head *next = head->next; + return (next == head) && (next == head->prev); +} + +/** + * list_is_singular - tests whether a list has just one entry. + * @head: the list to test. + */ +static inline int list_is_singular(const struct list_head *head) +{ + return !list_empty(head) && (head->next == head->prev); +} + +static inline void __list_cut_position(struct list_head *list, + struct list_head *head, struct list_head *entry) +{ + struct list_head *new_first = entry->next; + list->next = head->next; + list->next->prev = list; + list->prev = entry; + entry->next = list; + head->next = new_first; + new_first->prev = head; +} + +/** + * list_cut_position - cut a list into two + * @list: a new list to add all removed entries + * @head: a list with entries + * @entry: an entry within head, could be the head itself + * and if so we won't cut the list + * + * This helper moves the initial part of @head, up to and + * including @entry, from @head to @list. You should + * pass on @entry an element you know is on @head. @list + * should be an empty list or a list you do not care about + * losing its data. + * + */ +static inline void list_cut_position(struct list_head *list, + struct list_head *head, struct list_head *entry) +{ + if (list_empty(head)) + return; + if (list_is_singular(head) && + (head->next != entry && head != entry)) + return; + if (entry == head) + INIT_LIST_HEAD(list); + else + __list_cut_position(list, head, entry); +} + +static inline void __list_splice(const struct list_head *list, + struct list_head *prev, + struct list_head *next) +{ + struct list_head *first = list->next; + struct list_head *last = list->prev; + + first->prev = prev; + prev->next = first; + + last->next = next; + next->prev = last; +} + +/** + * list_splice - join two lists, this is designed for stacks + * @list: the new list to add. + * @head: the place to add it in the first list. + */ +static inline void list_splice(const struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) + __list_splice(list, head, head->next); +} + +/** + * list_splice_tail - join two lists, each list being a queue + * @list: the new list to add. + * @head: the place to add it in the first list. + */ +static inline void list_splice_tail(struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) + __list_splice(list, head->prev, head); +} + +/** + * list_splice_init - join two lists and reinitialise the emptied list. + * @list: the new list to add. + * @head: the place to add it in the first list. + * + * The list at @list is reinitialised + */ +static inline void list_splice_init(struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) { + __list_splice(list, head, head->next); + INIT_LIST_HEAD(list); + } +} + +/** + * list_splice_tail_init - join two lists and reinitialise the emptied list + * @list: the new list to add. + * @head: the place to add it in the first list. + * + * Each of the lists is a queue. + * The list at @list is reinitialised + */ +static inline void list_splice_tail_init(struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) { + __list_splice(list, head->prev, head); + INIT_LIST_HEAD(list); + } +} + +/** + * list_entry - get the struct for this entry + * @ptr: the &struct list_head pointer. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_struct within the struct. + */ +#define list_entry(ptr, type, member) \ + container_of(ptr, type, member) + +/** + * list_first_entry - get the first element from a list + * @ptr: the list head to take the element from. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_struct within the struct. + * + * Note, that list is expected to be not empty. + */ +#define list_first_entry(ptr, type, member) \ + list_entry((ptr)->next, type, member) + +/** + * list_for_each - iterate over a list + * @pos: the &struct list_head to use as a loop cursor. + * @head: the head for your list. + */ +#define list_for_each(pos, head) \ + for (pos = (head)->next; prefetch(pos->next), pos != (head); \ + pos = pos->next) + +/** + * __list_for_each - iterate over a list + * @pos: the &struct list_head to use as a loop cursor. + * @head: the head for your list. + * + * This variant differs from list_for_each() in that it's the + * simplest possible list iteration code, no prefetching is done. + * Use this for code that knows the list to be very short (empty + * or 1 entry) most of the time. + */ +#define __list_for_each(pos, head) \ + for (pos = (head)->next; pos != (head); pos = pos->next) + +/** + * list_for_each_prev - iterate over a list backwards + * @pos: the &struct list_head to use as a loop cursor. + * @head: the head for your list. + */ +#define list_for_each_prev(pos, head) \ + for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \ + pos = pos->prev) + +/** + * list_for_each_safe - iterate over a list safe against removal of list entry + * @pos: the &struct list_head to use as a loop cursor. + * @n: another &struct list_head to use as temporary storage + * @head: the head for your list. + */ +#define list_for_each_safe(pos, n, head) \ + for (pos = (head)->next, n = pos->next; pos != (head); \ + pos = n, n = pos->next) + +/** + * list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry + * @pos: the &struct list_head to use as a loop cursor. + * @n: another &struct list_head to use as temporary storage + * @head: the head for your list. + */ +#define list_for_each_prev_safe(pos, n, head) \ + for (pos = (head)->prev, n = pos->prev; \ + prefetch(pos->prev), pos != (head); \ + pos = n, n = pos->prev) + +/** + * list_for_each_entry - iterate over list of given type + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + */ +#define list_for_each_entry(pos, head, member) \ + for (pos = list_entry((head)->next, typeof(*pos), member); \ + prefetch(pos->member.next), &pos->member != (head); \ + pos = list_entry(pos->member.next, typeof(*pos), member)) + +/** + * list_for_each_entry_reverse - iterate backwards over list of given type. + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + */ +#define list_for_each_entry_reverse(pos, head, member) \ + for (pos = list_entry((head)->prev, typeof(*pos), member); \ + prefetch(pos->member.prev), &pos->member != (head); \ + pos = list_entry(pos->member.prev, typeof(*pos), member)) + +/** + * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue() + * @pos: the type * to use as a start point + * @head: the head of the list + * @member: the name of the list_struct within the struct. + * + * Prepares a pos entry for use as a start point in list_for_each_entry_continue(). + */ +#define list_prepare_entry(pos, head, member) \ + ((pos) ? : list_entry(head, typeof(*pos), member)) + +/** + * list_for_each_entry_continue - continue iteration over list of given type + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * Continue to iterate over list of given type, continuing after + * the current position. + */ +#define list_for_each_entry_continue(pos, head, member) \ + for (pos = list_entry(pos->member.next, typeof(*pos), member); \ + prefetch(pos->member.next), &pos->member != (head); \ + pos = list_entry(pos->member.next, typeof(*pos), member)) + +/** + * list_for_each_entry_continue_reverse - iterate backwards from the given point + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * Start to iterate over list of given type backwards, continuing after + * the current position. + */ +#define list_for_each_entry_continue_reverse(pos, head, member) \ + for (pos = list_entry(pos->member.prev, typeof(*pos), member); \ + prefetch(pos->member.prev), &pos->member != (head); \ + pos = list_entry(pos->member.prev, typeof(*pos), member)) + +/** + * list_for_each_entry_from - iterate over list of given type from the current point + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * Iterate over list of given type, continuing from current position. + */ +#define list_for_each_entry_from(pos, head, member) \ + for (; prefetch(pos->member.next), &pos->member != (head); \ + pos = list_entry(pos->member.next, typeof(*pos), member)) + +/** + * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry + * @pos: the type * to use as a loop cursor. + * @n: another type * to use as temporary storage + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + */ +#define list_for_each_entry_safe(pos, n, head, member) \ + for (pos = list_entry((head)->next, typeof(*pos), member), \ + n = list_entry(pos->member.next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = n, n = list_entry(n->member.next, typeof(*n), member)) + +/** + * list_for_each_entry_safe_continue + * @pos: the type * to use as a loop cursor. + * @n: another type * to use as temporary storage + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * Iterate over list of given type, continuing after current point, + * safe against removal of list entry. + */ +#define list_for_each_entry_safe_continue(pos, n, head, member) \ + for (pos = list_entry(pos->member.next, typeof(*pos), member), \ + n = list_entry(pos->member.next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = n, n = list_entry(n->member.next, typeof(*n), member)) + +/** + * list_for_each_entry_safe_from + * @pos: the type * to use as a loop cursor. + * @n: another type * to use as temporary storage + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * Iterate over list of given type from current point, safe against + * removal of list entry. + */ +#define list_for_each_entry_safe_from(pos, n, head, member) \ + for (n = list_entry(pos->member.next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = n, n = list_entry(n->member.next, typeof(*n), member)) + +/** + * list_for_each_entry_safe_reverse + * @pos: the type * to use as a loop cursor. + * @n: another type * to use as temporary storage + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * Iterate backwards over list of given type, safe against removal + * of list entry. + */ +#define list_for_each_entry_safe_reverse(pos, n, head, member) \ + for (pos = list_entry((head)->prev, typeof(*pos), member), \ + n = list_entry(pos->member.prev, typeof(*pos), member); \ + &pos->member != (head); \ + pos = n, n = list_entry(n->member.prev, typeof(*n), member)) + +/* + * Double linked lists with a single pointer list head. + * Mostly useful for hash tables where the two pointer list head is + * too wasteful. + * You lose the ability to access the tail in O(1). + */ + +struct hlist_head { + struct hlist_node *first; +}; + +struct hlist_node { + struct hlist_node *next, **pprev; +}; + +#define HLIST_HEAD_INIT { .first = NULL } +#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } +#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) +static inline void INIT_HLIST_NODE(struct hlist_node *h) +{ + h->next = NULL; + h->pprev = NULL; +} + +static inline int hlist_unhashed(const struct hlist_node *h) +{ + return !h->pprev; +} + +static inline int hlist_empty(const struct hlist_head *h) +{ + return !h->first; +} + +static inline void __hlist_del(struct hlist_node *n) +{ + struct hlist_node *next = n->next; + struct hlist_node **pprev = n->pprev; + *pprev = next; + if (next) + next->pprev = pprev; +} + +static inline void hlist_del(struct hlist_node *n) +{ + __hlist_del(n); + n->next = LIST_POISON1; + n->pprev = LIST_POISON2; +} + +static inline void hlist_del_init(struct hlist_node *n) +{ + if (!hlist_unhashed(n)) { + __hlist_del(n); + INIT_HLIST_NODE(n); + } +} + +static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) +{ + struct hlist_node *first = h->first; + n->next = first; + if (first) + first->pprev = &n->next; + h->first = n; + n->pprev = &h->first; +} + +/* next must be != NULL */ +static inline void hlist_add_before(struct hlist_node *n, + struct hlist_node *next) +{ + n->pprev = next->pprev; + n->next = next; + next->pprev = &n->next; + *(n->pprev) = n; +} + +static inline void hlist_add_after(struct hlist_node *n, + struct hlist_node *next) +{ + next->next = n->next; + n->next = next; + next->pprev = &n->next; + + if(next->next) + next->next->pprev = &next->next; +} + +/* + * Move a list from one list head to another. Fixup the pprev + * reference of the first entry if it exists. + */ +static inline void hlist_move_list(struct hlist_head *old, + struct hlist_head *new) +{ + new->first = old->first; + if (new->first) + new->first->pprev = &new->first; + old->first = NULL; +} + +#define hlist_entry(ptr, type, member) container_of(ptr,type,member) + +#define hlist_for_each(pos, head) \ + for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \ + pos = pos->next) + +#define hlist_for_each_safe(pos, n, head) \ + for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ + pos = n) + +/** + * hlist_for_each_entry - iterate over list of given type + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry(tpos, pos, head, member) \ + for (pos = (head)->first; \ + pos && ({ prefetch(pos->next); 1;}) && \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ + pos = pos->next) + +/** + * hlist_for_each_entry_continue - iterate over a hlist continuing after current point + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry_continue(tpos, pos, member) \ + for (pos = (pos)->next; \ + pos && ({ prefetch(pos->next); 1;}) && \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ + pos = pos->next) + +/** + * hlist_for_each_entry_from - iterate over a hlist continuing from current point + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry_from(tpos, pos, member) \ + for (; pos && ({ prefetch(pos->next); 1;}) && \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ + pos = pos->next) + +/** + * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @n: another &struct hlist_node to use as temporary storage + * @head: the head for your list. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \ + for (pos = (head)->first; \ + pos && ({ n = pos->next; 1; }) && \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ + pos = n) + +#endif diff --git a/rcutorture.h b/rcutorture.h new file mode 100644 index 0000000..7a19fed --- /dev/null +++ b/rcutorture.h @@ -0,0 +1,412 @@ +/* + * rcutorture.h: simple user-level performance/stress test of RCU. + * + * Usage: + * ./rcu rperf [ ] + * Run a read-side performance test with the specified + * number of readers spaced by . + * Thus "./rcu 16 rperf 2" would run 16 readers on even-numbered + * CPUs from 0 to 30. + * ./rcu uperf [ ] + * Run an update-side performance test with the specified + * number of updaters and specified CPU spacing. + * ./rcu perf [ ] + * Run a combined read/update performance test with the specified + * number of readers and one updater and specified CPU spacing. + * The readers run on the low-numbered CPUs and the updater + * of the highest-numbered CPU. + * + * The above tests produce output as follows: + * + * n_reads: 46008000 n_updates: 146026 nreaders: 2 nupdaters: 1 duration: 1 + * ns/read: 43.4707 ns/update: 6848.1 + * + * The first line lists the total number of RCU reads and updates executed + * during the test, the number of reader threads, the number of updater + * threads, and the duration of the test in seconds. The second line + * lists the average duration of each type of operation in nanoseconds, + * or "nan" if the corresponding type of operation was not performed. + * + * ./rcu stress + * Run a stress test with the specified number of readers and + * one updater. None of the threads are affinitied to any + * particular CPU. + * + * This test produces output as follows: + * + * n_reads: 114633217 n_updates: 3903415 n_mberror: 0 + * rcu_stress_count: 114618391 14826 0 0 0 0 0 0 0 0 0 + * + * The first line lists the number of RCU read and update operations + * executed, followed by the number of memory-ordering violations + * (which will be zero in a correct RCU implementation). The second + * line lists the number of readers observing progressively more stale + * data. A correct RCU implementation will have all but the first two + * numbers non-zero. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (c) 2008 Paul E. McKenney, IBM Corporation. + */ + +/* + * Test variables. + */ + +DEFINE_PER_THREAD(long long, n_reads_pt); +DEFINE_PER_THREAD(long long, n_updates_pt); + +long long n_reads = 0LL; +long n_updates = 0L; +atomic_t nthreadsrunning; +char argsbuf[64]; + +#define GOFLAG_INIT 0 +#define GOFLAG_RUN 1 +#define GOFLAG_STOP 2 + +int goflag __attribute__((__aligned__(CACHE_LINE_SIZE))) = GOFLAG_INIT; + +#define RCU_READ_RUN 1000 + +//MD +#define RCU_READ_NESTABLE + +#ifdef RCU_READ_NESTABLE +#define rcu_read_lock_nest() rcu_read_lock() +#define rcu_read_unlock_nest() rcu_read_unlock() +#else /* #ifdef RCU_READ_NESTABLE */ +#define rcu_read_lock_nest() +#define rcu_read_unlock_nest() +#endif /* #else #ifdef RCU_READ_NESTABLE */ + +#ifndef mark_rcu_quiescent_state +#define mark_rcu_quiescent_state() do ; while (0) +#endif /* #ifdef mark_rcu_quiescent_state */ + +#ifndef put_thread_offline +#define put_thread_offline() do ; while (0) +#define put_thread_online() do ; while (0) +#define put_thread_online_delay() do ; while (0) +#else /* #ifndef put_thread_offline */ +#define put_thread_online_delay() synchronize_rcu() +#endif /* #else #ifndef put_thread_offline */ + +/* + * Performance test. + */ + +void *rcu_read_perf_test(void *arg) +{ + int i; + int me = (long)arg; + cpu_set_t mask; + long long n_reads_local = 0; + + urcu_register_thread(); + run_on(me); + __atomic_inc(&nthreadsrunning); + while (goflag == GOFLAG_INIT) + poll(NULL, 0, 1); + mark_rcu_quiescent_state(); + while (goflag == GOFLAG_RUN) { + for (i = 0; i < RCU_READ_RUN; i++) { + rcu_read_lock(); + /* rcu_read_lock_nest(); */ + /* rcu_read_unlock_nest(); */ + rcu_read_unlock(); + } + n_reads_local += RCU_READ_RUN; + mark_rcu_quiescent_state(); + } + __get_thread_var(n_reads_pt) += n_reads_local; + put_thread_offline(); + urcu_unregister_thread(); + + return (NULL); +} + +void *rcu_update_perf_test(void *arg) +{ + long long n_updates_local = 0; + + __atomic_inc(&nthreadsrunning); + while (goflag == GOFLAG_INIT) + poll(NULL, 0, 1); + while (goflag == GOFLAG_RUN) { + synchronize_rcu(); + n_updates_local++; + } + __get_thread_var(n_updates_pt) += n_updates_local; +} + +void perftestinit(void) +{ + init_per_thread(n_reads_pt, 0LL); + init_per_thread(n_updates_pt, 0LL); + atomic_set(&nthreadsrunning, 0); +} + +void perftestrun(int nthreads, int nreaders, int nupdaters) +{ + int t; + int duration = 1; + + smp_mb(); + while (atomic_read(&nthreadsrunning) < nthreads) + poll(NULL, 0, 1); + goflag = GOFLAG_RUN; + smp_mb(); + sleep(duration); + smp_mb(); + goflag = GOFLAG_STOP; + smp_mb(); + wait_all_threads(); + for_each_thread(t) { + n_reads += per_thread(n_reads_pt, t); + n_updates += per_thread(n_updates_pt, t); + } + printf("n_reads: %lld n_updates: %ld nreaders: %d nupdaters: %d duration: %d\n", + n_reads, n_updates, nreaders, nupdaters, duration); + printf("ns/read: %g ns/update: %g\n", + ((duration * 1000*1000*1000.*(double)nreaders) / + (double)n_reads), + ((duration * 1000*1000*1000.*(double)nupdaters) / + (double)n_updates)); + exit(0); +} + +void perftest(int nreaders, int cpustride) +{ + int i; + long arg; + + perftestinit(); + for (i = 0; i < nreaders; i++) { + arg = (long)(i * cpustride); + create_thread(rcu_read_perf_test, (void *)arg); + } + arg = (long)(i * cpustride); + create_thread(rcu_update_perf_test, (void *)arg); + perftestrun(i + 1, nreaders, 1); +} + +void rperftest(int nreaders, int cpustride) +{ + int i; + long arg; + + perftestinit(); + init_per_thread(n_reads_pt, 0LL); + for (i = 0; i < nreaders; i++) { + arg = (long)(i * cpustride); + create_thread(rcu_read_perf_test, (void *)arg); + } + perftestrun(i, nreaders, 0); +} + +void uperftest(int nupdaters, int cpustride) +{ + int i; + long arg; + + perftestinit(); + init_per_thread(n_reads_pt, 0LL); + for (i = 0; i < nupdaters; i++) { + arg = (long)(i * cpustride); + create_thread(rcu_update_perf_test, (void *)arg); + } + perftestrun(i, 0, nupdaters); +} + +/* + * Stress test. + */ + +#define RCU_STRESS_PIPE_LEN 10 + +struct rcu_stress { + int pipe_count; + int mbtest; +}; + +struct rcu_stress rcu_stress_array[RCU_STRESS_PIPE_LEN] = { 0 }; +struct rcu_stress *rcu_stress_current; +int rcu_stress_idx = 0; + +int n_mberror = 0; +DEFINE_PER_THREAD(long long [RCU_STRESS_PIPE_LEN + 1], rcu_stress_count); + +int garbage = 0; + +void *rcu_read_stress_test(void *arg) +{ + int i; + int itercnt = 0; + struct rcu_stress *p; + int pc; + + urcu_register_thread(); + while (goflag == GOFLAG_INIT) + poll(NULL, 0, 1); + mark_rcu_quiescent_state(); + while (goflag == GOFLAG_RUN) { + rcu_read_lock(); + p = rcu_dereference(rcu_stress_current); + if (p->mbtest == 0) + n_mberror++; + rcu_read_lock_nest(); + for (i = 0; i < 100; i++) + garbage++; + rcu_read_unlock_nest(); + pc = p->pipe_count; + rcu_read_unlock(); + if ((pc > RCU_STRESS_PIPE_LEN) || (pc < 0)) + pc = RCU_STRESS_PIPE_LEN; + __get_thread_var(rcu_stress_count)[pc]++; + __get_thread_var(n_reads_pt)++; + mark_rcu_quiescent_state(); + if ((++itercnt % 0x1000) == 0) { + put_thread_offline(); + put_thread_online_delay(); + put_thread_online(); + } + } + put_thread_offline(); + urcu_unregister_thread(); + + return (NULL); +} + +void *rcu_update_stress_test(void *arg) +{ + int i; + struct rcu_stress *p; + + while (goflag == GOFLAG_INIT) + poll(NULL, 0, 1); + while (goflag == GOFLAG_RUN) { + i = rcu_stress_idx + 1; + if (i >= RCU_STRESS_PIPE_LEN) + i = 0; + p = &rcu_stress_array[i]; + p->mbtest = 0; + smp_mb(); + p->pipe_count = 0; + p->mbtest = 1; + rcu_assign_pointer(rcu_stress_current, p); + rcu_stress_idx = i; + for (i = 0; i < RCU_STRESS_PIPE_LEN; i++) + if (i != rcu_stress_idx) + rcu_stress_array[i].pipe_count++; + synchronize_rcu(); + n_updates++; + } +} + +void *rcu_fake_update_stress_test(void *arg) +{ + int i; + struct rcu_stress *p; + + while (goflag == GOFLAG_INIT) + poll(NULL, 0, 1); + while (goflag == GOFLAG_RUN) { + synchronize_rcu(); + poll(NULL, 0, 1); + } +} + +void stresstest(int nreaders) +{ + int i; + int t; + long long *p; + long long sum; + + init_per_thread(n_reads_pt, 0LL); + for_each_thread(t) { + p = &per_thread(rcu_stress_count,t)[0]; + for (i = 0; i <= RCU_STRESS_PIPE_LEN; i++) + p[i] = 0LL; + } + rcu_stress_current = &rcu_stress_array[0]; + rcu_stress_current->pipe_count = 0; + rcu_stress_current->mbtest = 1; + for (i = 0; i < nreaders; i++) + create_thread(rcu_read_stress_test, NULL); + create_thread(rcu_update_stress_test, NULL); + for (i = 0; i < 5; i++) + create_thread(rcu_fake_update_stress_test, NULL); + smp_mb(); + goflag = GOFLAG_RUN; + smp_mb(); + sleep(10); + smp_mb(); + goflag = GOFLAG_STOP; + smp_mb(); + wait_all_threads(); + for_each_thread(t) + n_reads += per_thread(n_reads_pt, t); + printf("n_reads: %lld n_updates: %ld n_mberror: %ld\n", + n_reads, n_updates, n_mberror); + printf("rcu_stress_count:"); + for (i = 0; i <= RCU_STRESS_PIPE_LEN; i++) { + sum = 0LL; + for_each_thread(t) { + sum += per_thread(rcu_stress_count, t)[i]; + } + printf(" %lld", sum); + } + printf("\n"); + exit(0); +} + +/* + * Mainprogram. + */ + +void usage(int argc, char *argv[]) +{ + fprintf(stderr, "Usage: %s [nreaders [ perf | stress ] ]\n", argv[0]); + exit(-1); +} + +int main(int argc, char *argv[]) +{ + int nreaders = 1; + int cpustride = 1; + + smp_init(); + //rcu_init(); + + if (argc > 1) { + nreaders = strtoul(argv[1], NULL, 0); + if (argc == 2) + perftest(nreaders, cpustride); + if (argc > 3) + cpustride = strtoul(argv[3], NULL, 0); + if (strcmp(argv[2], "perf") == 0) + perftest(nreaders, cpustride); + else if (strcmp(argv[2], "rperf") == 0) + rperftest(nreaders, cpustride); + else if (strcmp(argv[2], "uperf") == 0) + uperftest(nreaders, cpustride); + else if (strcmp(argv[2], "stress") == 0) + stresstest(nreaders); + usage(argc, argv); + } + perftest(nreaders, cpustride); +} diff --git a/urcutorture.c b/urcutorture.c new file mode 100644 index 0000000..7d672fd --- /dev/null +++ b/urcutorture.c @@ -0,0 +1,8 @@ +#include +#include +#include +#include +#include +#include "urcu.h" +#include "api.h" +#include "rcutorture.h"