#include <linux/hardirq.h>
#include <linux/module.h>
#include <linux/percpu.h>
+#include <linux/spinlock.h>
#include <asm/ptrace.h>
#if (NR_CPUS > 64 && (BITS_PER_LONG == 32 || NR_CPUS > 32768))
#error "fair rwlock needs more bits per long to deal with that many CPUs"
#endif
+/* Test with no contention duration, in seconds */
+#define SINGLE_WRITER_TEST_DURATION 10
+#define SINGLE_READER_TEST_DURATION 10
+#define MULTIPLE_READERS_TEST_DURATION 10
+
/* Test duration, in seconds */
#define TEST_DURATION 60
-#define THREAD_ROFFSET 1UL
-#define THREAD_RMASK ((NR_CPUS - 1) * THREAD_ROFFSET)
-#define SOFTIRQ_ROFFSET (THREAD_RMASK + 1)
-#define SOFTIRQ_RMASK ((NR_CPUS - 1) * SOFTIRQ_ROFFSET)
-#define HARDIRQ_ROFFSET ((SOFTIRQ_RMASK | THREAD_RMASK) + 1)
-#define HARDIRQ_RMASK ((NR_CPUS - 1) * HARDIRQ_ROFFSET)
-
-#define SUBSCRIBERS_WOFFSET \
- ((HARDIRQ_RMASK | SOFTIRQ_RMASK | THREAD_RMASK) + 1)
-#define SUBSCRIBERS_WMASK \
- ((NR_CPUS - 1) * SUBSCRIBERS_WOFFSET)
-#define WRITER_MUTEX \
- ((SUBSCRIBERS_WMASK | HARDIRQ_RMASK | SOFTIRQ_RMASK | THREAD_RMASK) + 1)
-#define SOFTIRQ_WMASK (WRITER_MUTEX << 1)
-#define SOFTIRQ_WOFFSET SOFTIRQ_WMASK
-#define HARDIRQ_WMASK (SOFTIRQ_WMASK << 1)
-#define HARDIRQ_WOFFSET HARDIRQ_WMASK
-
#define NR_VARS 100
-#define NR_WRITERS 3
-#define NR_READERS 6
-#define NR_INTERRUPT_READERS 2
+//#define NR_WRITERS 2
+#define NR_WRITERS 2
+//#define NR_TRYLOCK_WRITERS 2
+#define NR_TRYLOCK_WRITERS 0
+#define NR_READERS 4
+//#define NR_TRYLOCK_READERS 2
+#define NR_TRYLOCK_READERS 0
+
+/*
+ * 1 : test standard rwlock
+ * 0 : test frwlock
+ */
+#define TEST_STD_RWLOCK 0
-/* Writer iteration delay, in ms. 0 for busy loop. */
-#define WRITER_DELAY 0
+/*
+ * 1 : test with thread and interrupt readers.
+ * 0 : test only with thread readers.
+ */
+#define TEST_INTERRUPTS 1
+
+#if (TEST_INTERRUPTS)
+#define NR_INTERRUPT_READERS 1
+#define NR_TRYLOCK_INTERRUPT_READERS 1
+#else
+#define NR_INTERRUPT_READERS 0
+#define NR_TRYLOCK_INTERRUPT_READERS 0
+#endif
+
+/*
+ * Writer iteration delay, in us. 0 for busy loop. Caution : writers can
+ * starve readers.
+ */
+#define WRITER_DELAY 100
+#define TRYLOCK_WRITER_DELAY 1000
+
+/*
+ * Number of iterations after which a trylock writer fails.
+ * -1 for infinite loop.
+ */
+#define TRYLOCK_WRITERS_FAIL_ITER 100
+
+/* Thread and interrupt reader delay, in ms */
+#define THREAD_READER_DELAY 0 /* busy loop */
+#define INTERRUPT_READER_DELAY 100
static int var[NR_VARS];
static struct task_struct *reader_threads[NR_READERS];
+static struct task_struct *trylock_reader_threads[NR_TRYLOCK_READERS];
static struct task_struct *writer_threads[NR_WRITERS];
-static struct task_struct *interrupt_reader;
+static struct task_struct *trylock_writer_threads[NR_TRYLOCK_WRITERS];
+static struct task_struct *interrupt_reader[NR_INTERRUPT_READERS];
+static struct task_struct *trylock_interrupt_reader[NR_TRYLOCK_INTERRUPT_READERS];
+
+#if (TEST_STD_RWLOCK)
+
+static DEFINE_RWLOCK(std_rw_lock);
+
+#define wrap_read_lock() read_lock(&std_rw_lock)
+#define wrap_read_trylock() read_trylock(&std_rw_lock)
+#define wrap_read_unlock() read_unlock(&std_rw_lock)
+
+#define wrap_read_lock_irq() read_lock(&std_rw_lock)
+#define wrap_read_trylock_irq() read_trylock(&std_rw_lock)
+#define wrap_read_unlock_irq() read_unlock(&std_rw_lock)
+
+#if (TEST_INTERRUPTS)
+#define wrap_write_lock() write_lock_irq(&std_rw_lock)
+#define wrap_write_unlock() write_unlock_irq(&std_rw_lock)
+#else
+#define wrap_write_lock() write_lock(&std_rw_lock)
+#define wrap_write_unlock() write_unlock(&std_rw_lock)
+#endif
+
+#else
static struct fair_rwlock frwlock = {
.value = ATOMIC_LONG_INIT(0),
};
+#define wrap_read_lock() fair_read_lock(&frwlock)
+#define wrap_read_trylock() fair_read_trylock(&frwlock)
+#define wrap_read_unlock() fair_read_unlock(&frwlock)
+
+#define wrap_read_lock_irq() fair_read_lock_irq(&frwlock)
+#define wrap_read_trylock_irq() fair_read_trylock_irq(&frwlock)
+#define wrap_read_unlock_irq() fair_read_unlock_irq(&frwlock)
+
+#if (TEST_INTERRUPTS)
+#define wrap_write_lock() fair_write_lock_irq(&frwlock)
+#define wrap_write_unlock() fair_write_unlock_irq(&frwlock)
+#else
+#define wrap_write_lock() fair_write_lock(&frwlock)
+#define wrap_write_unlock() fair_write_unlock(&frwlock)
+#endif
+
+#endif
+
+static cycles_t cycles_calibration_min,
+ cycles_calibration_avg,
+ cycles_calibration_max;
+
+static inline cycles_t calibrate_cycles(cycles_t cycles)
+{
+ return cycles - cycles_calibration_avg;
+}
+
struct proc_dir_entry *pentry = NULL;
static int reader_thread(void *data)
int i;
int prev, cur;
unsigned long iter = 0;
- cycles_t time1, time2, delaymax = 0;
+ cycles_t time1, time2, delay, delaymax = 0, delaymin = ULLONG_MAX,
+ delayavg = 0;
printk("reader_thread/%lu runnning\n", (unsigned long)data);
do {
iter++;
preempt_disable(); /* for get_cycles accuracy */
+ rdtsc_barrier();
time1 = get_cycles();
- fair_read_lock(&frwlock);
+ rdtsc_barrier();
+
+ wrap_read_lock();
+
+ rdtsc_barrier();
time2 = get_cycles();
- delaymax = max(delaymax, time2 - time1);
+ rdtsc_barrier();
+ delay = time2 - time1;
+ delaymax = max(delaymax, delay);
+ delaymin = min(delaymin, delay);
+ delayavg += delay;
prev = var[0];
for (i = 1; i < NR_VARS; i++) {
cur = var[i];
"Unequal cur %d/prev %d at i %d, iter %lu "
"in thread\n", cur, prev, i, iter);
}
- fair_read_unlock(&frwlock);
+
+ wrap_read_unlock();
+
preempt_enable(); /* for get_cycles accuracy */
- //msleep(100);
+ if (THREAD_READER_DELAY)
+ msleep(THREAD_READER_DELAY);
+ } while (!kthread_should_stop());
+ if (!iter) {
+ printk("reader_thread/%lu iterations : %lu",
+ (unsigned long)data, iter);
+ } else {
+ delayavg /= iter;
+ printk("reader_thread/%lu iterations : %lu, "
+ "lock delay [min,avg,max] %llu,%llu,%llu cycles\n",
+ (unsigned long)data, iter,
+ calibrate_cycles(delaymin),
+ calibrate_cycles(delayavg),
+ calibrate_cycles(delaymax));
+ }
+ return 0;
+}
+
+static int trylock_reader_thread(void *data)
+{
+ int i;
+ int prev, cur;
+ unsigned long iter = 0, success_iter = 0;
+
+ printk("trylock_reader_thread/%lu runnning\n", (unsigned long)data);
+ do {
+ while (!wrap_read_trylock())
+ iter++;
+ success_iter++;
+ prev = var[0];
+ for (i = 1; i < NR_VARS; i++) {
+ cur = var[i];
+ if (cur != prev)
+ printk(KERN_ALERT
+ "Unequal cur %d/prev %d at i %d, iter %lu "
+ "in thread\n", cur, prev, i, iter);
+ }
+ wrap_read_unlock();
+ if (THREAD_READER_DELAY)
+ msleep(THREAD_READER_DELAY);
} while (!kthread_should_stop());
- printk("reader_thread/%lu iterations : %lu, "
- "max contention %llu cycles\n",
- (unsigned long)data, iter, delaymax);
+ printk("trylock_reader_thread/%lu iterations : %lu, "
+ "successful iterations : %lu\n",
+ (unsigned long)data, iter, success_iter);
return 0;
}
+DEFINE_PER_CPU(cycles_t, int_delaymin);
+DEFINE_PER_CPU(cycles_t, int_delayavg);
DEFINE_PER_CPU(cycles_t, int_delaymax);
+DEFINE_PER_CPU(cycles_t, int_ipi_nr);
static void interrupt_reader_ipi(void *data)
{
int i;
int prev, cur;
cycles_t time1, time2;
- cycles_t *delaymax;
+ cycles_t *delaymax, *delaymin, *delayavg, *ipi_nr, delay;
/*
* Skip the ipi caller, not in irq context.
return;
delaymax = &per_cpu(int_delaymax, smp_processor_id());
+ delaymin = &per_cpu(int_delaymin, smp_processor_id());
+ delayavg = &per_cpu(int_delayavg, smp_processor_id());
+ ipi_nr = &per_cpu(int_ipi_nr, smp_processor_id());
+
+ rdtsc_barrier();
time1 = get_cycles();
- fair_read_lock(&frwlock);
+ rdtsc_barrier();
+
+ wrap_read_lock_irq();
+
+ rdtsc_barrier();
time2 = get_cycles();
- *delaymax = max(*delaymax, time2 - time1);
+ rdtsc_barrier();
+ delay = time2 - time1;
+ *delaymax = max(*delaymax, delay);
+ *delaymin = min(*delaymin, delay);
+ *delayavg += delay;
+ (*ipi_nr)++;
prev = var[0];
for (i = 1; i < NR_VARS; i++) {
cur = var[i];
"Unequal cur %d/prev %d at i %d in interrupt\n",
cur, prev, i);
}
- fair_read_unlock(&frwlock);
+ wrap_read_unlock_irq();
}
+DEFINE_PER_CPU(unsigned long, trylock_int_iter);
+DEFINE_PER_CPU(unsigned long, trylock_int_success);
+
+static void trylock_interrupt_reader_ipi(void *data)
+{
+ int i;
+ int prev, cur;
+
+ /*
+ * Skip the ipi caller, not in irq context.
+ */
+ if (!in_irq())
+ return;
+
+ per_cpu(trylock_int_iter, smp_processor_id())++;
+ while (!wrap_read_trylock_irq())
+ per_cpu(trylock_int_iter, smp_processor_id())++;
+ per_cpu(trylock_int_success, smp_processor_id())++;
+ prev = var[0];
+ for (i = 1; i < NR_VARS; i++) {
+ cur = var[i];
+ if (cur != prev)
+ printk(KERN_ALERT
+ "Unequal cur %d/prev %d at i %d in interrupt\n",
+ cur, prev, i);
+ }
+ wrap_read_unlock_irq();
+}
+
+
static int interrupt_reader_thread(void *data)
{
unsigned long iter = 0;
int i;
+ for_each_online_cpu(i) {
+ per_cpu(int_delaymax, i) = 0;
+ per_cpu(int_delaymin, i) = ULLONG_MAX;
+ per_cpu(int_delayavg, i) = 0;
+ per_cpu(int_ipi_nr, i) = 0;
+ }
do {
iter++;
on_each_cpu(interrupt_reader_ipi, NULL, 0);
- msleep(100);
+ if (INTERRUPT_READER_DELAY)
+ msleep(INTERRUPT_READER_DELAY);
} while (!kthread_should_stop());
printk("interrupt_reader_thread/%lu iterations : %lu\n",
(unsigned long)data, iter);
for_each_online_cpu(i) {
+ if (!per_cpu(int_ipi_nr, i))
+ continue;
+ per_cpu(int_delayavg, i) /= per_cpu(int_ipi_nr, i);
printk("interrupt readers on CPU %i, "
- "max contention : %llu cycles\n",
- i, per_cpu(int_delaymax, i));
+ "lock delay [min,avg,max] %llu,%llu,%llu cycles\n",
+ i,
+ calibrate_cycles(per_cpu(int_delaymin, i)),
+ calibrate_cycles(per_cpu(int_delayavg, i)),
+ calibrate_cycles(per_cpu(int_delaymax, i)));
+ }
+ return 0;
+}
+
+static int trylock_interrupt_reader_thread(void *data)
+{
+ unsigned long iter = 0;
+ int i;
+
+ do {
+ iter++;
+ on_each_cpu(trylock_interrupt_reader_ipi, NULL, 0);
+ if (INTERRUPT_READER_DELAY)
+ msleep(INTERRUPT_READER_DELAY);
+ } while (!kthread_should_stop());
+ printk("trylock_interrupt_reader_thread/%lu iterations : %lu\n",
+ (unsigned long)data, iter);
+ for_each_online_cpu(i) {
+ printk("trylock interrupt readers on CPU %i, "
+ "iterations %lu, "
+ "successful iterations : %lu\n",
+ i, per_cpu(trylock_int_iter, i),
+ per_cpu(trylock_int_success, i));
+ per_cpu(trylock_int_iter, i) = 0;
+ per_cpu(trylock_int_success, i) = 0;
}
return 0;
}
int i;
int new;
unsigned long iter = 0;
- cycles_t time1, time2, delaymax = 0;
+ cycles_t time1, time2, delay, delaymax = 0, delaymin = ULLONG_MAX,
+ delayavg = 0;
printk("writer_thread/%lu runnning\n", (unsigned long)data);
do {
iter++;
preempt_disable(); /* for get_cycles accuracy */
+ rdtsc_barrier();
time1 = get_cycles();
- fair_write_lock_irq(&frwlock);
- //fair_write_lock(&frwlock);
+ rdtsc_barrier();
+
+ wrap_write_lock();
+
+ rdtsc_barrier();
time2 = get_cycles();
- delaymax = max(delaymax, time2 - time1);
+ rdtsc_barrier();
+ delay = time2 - time1;
+ delaymax = max(delaymax, delay);
+ delaymin = min(delaymin, delay);
+ delayavg += delay;
new = (int)get_cycles();
for (i = 0; i < NR_VARS; i++) {
var[i] = new;
}
- //fair_write_unlock(&frwlock);
- fair_write_unlock_irq(&frwlock);
+
+ wrap_write_unlock();
+
preempt_enable(); /* for get_cycles accuracy */
if (WRITER_DELAY > 0)
- msleep(WRITER_DELAY);
+ udelay(WRITER_DELAY);
} while (!kthread_should_stop());
+ delayavg /= iter;
printk("writer_thread/%lu iterations : %lu, "
- "max contention %llu cycles\n",
- (unsigned long)data, iter, delaymax);
+ "lock delay [min,avg,max] %llu,%llu,%llu cycles\n",
+ (unsigned long)data, iter,
+ calibrate_cycles(delaymin),
+ calibrate_cycles(delayavg),
+ calibrate_cycles(delaymax));
+ return 0;
+}
+
+#if (TEST_STD_RWLOCK)
+static int trylock_writer_thread(void *data)
+{
+ int i;
+ int new;
+ unsigned long iter = 0, success = 0, fail = 0;
+
+ printk("trylock_writer_thread/%lu runnning\n", (unsigned long)data);
+ do {
+#if (TEST_INTERRUPTS)
+ /* std write trylock cannot disable interrupts. */
+ local_irq_disable();
+#endif
+
+#if (TRYLOCK_WRITERS_FAIL_ITER == -1)
+ for (;;) {
+ iter++;
+ if (write_trylock(&std_rw_lock))
+ goto locked;
+ }
+#else
+ for (i = 0; i < TRYLOCK_WRITERS_FAIL_ITER; i++) {
+ iter++;
+ if (write_trylock(&std_rw_lock))
+ goto locked;
+ }
+#endif
+ fail++;
+#if (TEST_INTERRUPTS)
+ local_irq_enable();
+#endif
+ goto loop;
+locked:
+ success++;
+ new = (int)get_cycles();
+ for (i = 0; i < NR_VARS; i++) {
+ var[i] = new;
+ }
+#if (TEST_INTERRUPTS)
+ write_unlock_irq(&std_rw_lock);
+#else
+ write_unlock(&std_rw_lock);
+#endif
+loop:
+ if (TRYLOCK_WRITER_DELAY > 0)
+ udelay(TRYLOCK_WRITER_DELAY);
+ } while (!kthread_should_stop());
+ printk("trylock_writer_thread/%lu iterations : "
+ "[try,success,fail after %d try], "
+ "%lu,%lu,%lu\n",
+ (unsigned long)data, TRYLOCK_WRITERS_FAIL_ITER,
+ iter, success, fail);
+ return 0;
+}
+
+#else /* !TEST_STD_RWLOCK */
+
+static int trylock_writer_thread(void *data)
+{
+ int i;
+ int new;
+ unsigned long iter = 0, success = 0, fail = 0;
+
+ printk("trylock_writer_thread/%lu runnning\n", (unsigned long)data);
+ do {
+ iter++;
+#if (TEST_INTERRUPTS)
+ if (fair_write_trylock_irq_else_subscribe(&frwlock))
+#else
+ if (fair_write_trylock_else_subscribe(&frwlock))
+#endif
+ goto locked;
+
+#if (TRYLOCK_WRITERS_FAIL_ITER == -1)
+ for (;;) {
+ iter++;
+#if (TEST_INTERRUPTS)
+ if (fair_write_trylock_irq_subscribed(&frwlock))
+#else
+ if (fair_write_trylock_subscribed(&frwlock))
+#endif
+ goto locked;
+ }
+#else
+ for (i = 0; i < TRYLOCK_WRITERS_FAIL_ITER - 1; i++) {
+ iter++;
+#if (TEST_INTERRUPTS)
+ if (fair_write_trylock_irq_subscribed(&frwlock))
+#else
+ if (fair_write_trylock_subscribed(&frwlock))
+#endif
+ goto locked;
+ }
+#endif
+ fail++;
+ fair_write_unsubscribe(&frwlock);
+ goto loop;
+locked:
+ success++;
+ new = (int)get_cycles();
+ for (i = 0; i < NR_VARS; i++) {
+ var[i] = new;
+ }
+#if (TEST_INTERRUPTS)
+ fair_write_unlock_irq(&frwlock);
+#else
+ fair_write_unlock(&frwlock);
+#endif
+loop:
+ if (TRYLOCK_WRITER_DELAY > 0)
+ udelay(TRYLOCK_WRITER_DELAY);
+ } while (!kthread_should_stop());
+ printk("trylock_writer_thread/%lu iterations : "
+ "[try,success,fail after %d try], "
+ "%lu,%lu,%lu\n",
+ (unsigned long)data, TRYLOCK_WRITERS_FAIL_ITER,
+ iter, success, fail);
return 0;
}
+#endif /* TEST_STD_RWLOCK */
+
static void fair_rwlock_create(void)
{
unsigned long i;
BUG_ON(!reader_threads[i]);
}
- printk("starting interrupt reader %lu\n", i);
- interrupt_reader = kthread_run(interrupt_reader_thread, NULL,
- "frwlock_interrupt_reader");
-
+ for (i = 0; i < NR_TRYLOCK_READERS; i++) {
+ printk("starting trylock reader thread %lu\n", i);
+ trylock_reader_threads[i] = kthread_run(trylock_reader_thread,
+ (void *)i, "frwlock_trylock_reader");
+ BUG_ON(!trylock_reader_threads[i]);
+ }
+ for (i = 0; i < NR_INTERRUPT_READERS; i++) {
+ printk("starting interrupt reader %lu\n", i);
+ interrupt_reader[i] = kthread_run(interrupt_reader_thread,
+ (void *)i,
+ "frwlock_interrupt_reader");
+ }
+ for (i = 0; i < NR_TRYLOCK_INTERRUPT_READERS; i++) {
+ printk("starting trylock interrupt reader %lu\n", i);
+ trylock_interrupt_reader[i] =
+ kthread_run(trylock_interrupt_reader_thread,
+ (void *)i, "frwlock_trylock_interrupt_reader");
+ }
for (i = 0; i < NR_WRITERS; i++) {
printk("starting writer thread %lu\n", i);
writer_threads[i] = kthread_run(writer_thread, (void *)i,
"frwlock_writer");
BUG_ON(!writer_threads[i]);
}
+ for (i = 0; i < NR_TRYLOCK_WRITERS; i++) {
+ printk("starting trylock writer thread %lu\n", i);
+ trylock_writer_threads[i] = kthread_run(trylock_writer_thread,
+ (void *)i, "frwlock_trylock_writer");
+ BUG_ON(!trylock_writer_threads[i]);
+ }
}
static void fair_rwlock_stop(void)
{
unsigned long i;
- for (i = 0; i < NR_WRITERS; i++) {
+ for (i = 0; i < NR_WRITERS; i++)
kthread_stop(writer_threads[i]);
- }
-
- for (i = 0; i < NR_READERS; i++) {
+ for (i = 0; i < NR_TRYLOCK_WRITERS; i++)
+ kthread_stop(trylock_writer_threads[i]);
+ for (i = 0; i < NR_READERS; i++)
kthread_stop(reader_threads[i]);
- }
-
- kthread_stop(interrupt_reader);
+ for (i = 0; i < NR_TRYLOCK_READERS; i++)
+ kthread_stop(trylock_reader_threads[i]);
+ for (i = 0; i < NR_INTERRUPT_READERS; i++)
+ kthread_stop(interrupt_reader[i]);
+ for (i = 0; i < NR_TRYLOCK_INTERRUPT_READERS; i++)
+ kthread_stop(trylock_interrupt_reader[i]);
}
static int my_open(struct inode *inode, struct file *file)
{
+ unsigned long i;
+ cycles_t time1, time2, delay;
+
+ printk("** get_cycles calibration **\n");
+ cycles_calibration_min = ULLONG_MAX;
+ cycles_calibration_avg = 0;
+ cycles_calibration_max = 0;
+
+ local_irq_disable();
+ for (i = 0; i < 10; i++) {
+ rdtsc_barrier();
+ time1 = get_cycles();
+ rdtsc_barrier();
+ rdtsc_barrier();
+ time2 = get_cycles();
+ rdtsc_barrier();
+ delay = time2 - time1;
+ cycles_calibration_min = min(cycles_calibration_min, delay);
+ cycles_calibration_avg += delay;
+ cycles_calibration_max = max(cycles_calibration_max, delay);
+ }
+ cycles_calibration_avg /= 10;
+ local_irq_enable();
+
+ printk("get_cycles takes [min,avg,max] %llu,%llu,%llu cycles, "
+ "results calibrated on avg\n",
+ cycles_calibration_min,
+ cycles_calibration_avg,
+ cycles_calibration_max);
+
+ printk("** Single writer test, no contention **\n");
+ writer_threads[0] = kthread_run(writer_thread, (void *)0,
+ "frwlock_writer");
+ BUG_ON(!writer_threads[0]);
+ ssleep(SINGLE_WRITER_TEST_DURATION);
+ kthread_stop(writer_threads[0]);
+
+ printk("** Single trylock writer test, no contention **\n");
+ trylock_writer_threads[0] = kthread_run(trylock_writer_thread,
+ (void *)0,
+ "trylock_frwlock_writer");
+ BUG_ON(!trylock_writer_threads[0]);
+ ssleep(SINGLE_WRITER_TEST_DURATION);
+ kthread_stop(trylock_writer_threads[0]);
+
+ printk("** Single reader test, no contention **\n");
+ reader_threads[0] = kthread_run(reader_thread, (void *)0,
+ "frwlock_reader");
+ BUG_ON(!reader_threads[0]);
+ ssleep(SINGLE_READER_TEST_DURATION);
+ kthread_stop(reader_threads[0]);
+
+ printk("** Multiple readers test, no contention **\n");
+ for (i = 0; i < NR_READERS; i++) {
+ printk("starting reader thread %lu\n", i);
+ reader_threads[i] = kthread_run(reader_thread, (void *)i,
+ "frwlock_reader");
+ BUG_ON(!reader_threads[i]);
+ }
+ ssleep(SINGLE_READER_TEST_DURATION);
+ for (i = 0; i < NR_READERS; i++)
+ kthread_stop(reader_threads[i]);
+
+ printk("** High contention test **\n");
perform_test("fair-rwlock-create", fair_rwlock_create);
ssleep(TEST_DURATION);
perform_test("fair-rwlock-stop", fair_rwlock_stop);