5 #include <linux/module.h>
6 #include <linux/proc_fs.h>
7 #include <linux/sched.h>
8 #include <linux/timex.h>
9 #include <linux/kthread.h>
10 #include <linux/delay.h>
11 #include <linux/hardirq.h>
12 #include <linux/module.h>
13 #include <linux/percpu.h>
14 #include <linux/spinlock.h>
15 #include <asm/ptrace.h>
16 #include <linux/wbias-rwlock.h>
18 /* Test with no contention duration, in seconds */
19 #define SINGLE_WRITER_TEST_DURATION 10
20 #define SINGLE_READER_TEST_DURATION 10
21 #define MULTIPLE_READERS_TEST_DURATION 10
23 /* Test duration, in seconds */
24 #define TEST_DURATION 60
28 #define NR_TRYLOCK_WRITERS 1
29 #define NR_NPREADERS 2
30 #define NR_TRYLOCK_READERS 1
33 * 1 : test standard rwlock
34 * 0 : test wbiasrwlock
36 #define TEST_STD_RWLOCK 0
39 * 1 : test with thread and interrupt readers.
40 * 0 : test only with thread readers.
42 #define TEST_INTERRUPTS 1
45 #define NR_INTERRUPT_READERS 1
46 #define NR_TRYLOCK_INTERRUPT_READERS 1
48 #define NR_INTERRUPT_READERS 0
49 #define NR_TRYLOCK_INTERRUPT_READERS 0
53 * 1 : test with thread preemption readers.
54 * 0 : test only with non-preemptable thread readers.
56 #define TEST_PREEMPT 1
66 * Writer iteration delay, in us. 0 for busy loop. Caution : writers can
69 #define WRITER_DELAY 100
70 #define TRYLOCK_WRITER_DELAY 1000
73 * Number of iterations after which a trylock writer fails.
74 * -1 for infinite loop.
76 #define TRYLOCK_WRITERS_FAIL_ITER 100
78 /* Thread and interrupt reader delay, in ms */
79 #define THREAD_READER_DELAY 0 /* busy loop */
80 #define INTERRUPT_READER_DELAY 100
82 static int var
[NR_VARS
];
83 static struct task_struct
*preader_threads
[NR_PREADERS
];
84 static struct task_struct
*npreader_threads
[NR_NPREADERS
];
85 static struct task_struct
*trylock_reader_threads
[NR_TRYLOCK_READERS
];
86 static struct task_struct
*writer_threads
[NR_WRITERS
];
87 static struct task_struct
*trylock_writer_threads
[NR_TRYLOCK_WRITERS
];
88 static struct task_struct
*interrupt_reader
[NR_INTERRUPT_READERS
];
89 static struct task_struct
*trylock_interrupt_reader
[NR_TRYLOCK_INTERRUPT_READERS
];
93 static DEFINE_RWLOCK(std_rw_lock
);
95 #define wrap_read_lock() read_lock(&std_rw_lock)
96 #define wrap_read_trylock() read_trylock(&std_rw_lock)
97 #define wrap_read_unlock() read_unlock(&std_rw_lock)
99 #define wrap_read_lock_inatomic() read_lock(&std_rw_lock)
100 #define wrap_read_trylock_inatomic() read_trylock(&std_rw_lock)
101 #define wrap_read_unlock_inatomic() read_unlock(&std_rw_lock)
103 #define wrap_read_lock_irq() read_lock(&std_rw_lock)
104 #define wrap_read_trylock_irq() read_trylock(&std_rw_lock)
105 #define wrap_read_unlock_irq() read_unlock(&std_rw_lock)
107 #if (TEST_INTERRUPTS)
108 #define wrap_write_lock() write_lock_irq(&std_rw_lock)
109 #define wrap_write_unlock() write_unlock_irq(&std_rw_lock)
111 #define wrap_write_lock() write_lock(&std_rw_lock)
112 #define wrap_write_unlock() write_unlock(&std_rw_lock)
117 #if (TEST_INTERRUPTS)
119 #define WBIASRWLOCKMASK (BW_WPTHREAD | BW_RIRQ | BW_RNPTHREAD | BW_RPTHREAD)
121 #define WBIASRWLOCKMASK (BW_WNPTHREAD | BW_RIRQ | BW_RNPTHREAD)
125 #define WBIASRWLOCKMASK (BW_WPTHREAD | BW_RNPTHREAD | BW_RPTHREAD)
127 #define WBIASRWLOCKMASK (BW_WNPTHREAD | BW_RNPTHREAD)
130 static DEFINE_WBIAS_RWLOCK(wbiasrwlock
, WBIASRWLOCKMASK
);
131 CHECK_WBIAS_RWLOCK_MAP(wbiasrwlock
, WBIASRWLOCKMASK
);
135 #define wrap_read_lock() wbias_read_lock(&wbiasrwlock)
136 #define wrap_read_trylock() wbias_read_trylock(&wbiasrwlock)
137 #define wrap_read_unlock() wbias_read_unlock(&wbiasrwlock)
139 #define wrap_read_lock() wbias_read_lock_inatomic(&wbiasrwlock)
140 #define wrap_read_trylock() wbias_read_trylock_inatomic(&wbiasrwlock)
141 #define wrap_read_unlock() wbias_read_unlock_inatomic(&wbiasrwlock)
144 #define wrap_read_lock_inatomic() wbias_read_lock_inatomic(&wbiasrwlock)
145 #define wrap_read_trylock_inatomic() \
146 wbias_read_trylock_inatomic(&wbiasrwlock)
147 #define wrap_read_unlock_inatomic() \
148 wbias_read_unlock_inatomic(&wbiasrwlock)
150 #define wrap_read_lock_irq() wbias_read_lock_irq(&wbiasrwlock)
151 #define wrap_read_trylock_irq() wbias_read_trylock_irq(&wbiasrwlock)
152 #define wrap_read_unlock_irq() wbias_read_unlock_irq(&wbiasrwlock)
154 #define wrap_write_lock() \
155 wbias_write_lock(&wbiasrwlock, WBIASRWLOCKMASK)
156 #define wrap_write_unlock() \
157 wbias_write_unlock(&wbiasrwlock, WBIASRWLOCKMASK)
158 #define wrap_write_trylock_else_subscribe() \
159 wbias_write_trylock_else_subscribe(&wbiasrwlock, WBIASRWLOCKMASK)
160 #define wrap_write_trylock_subscribed() \
161 wbias_write_trylock_subscribed(&wbiasrwlock, WBIASRWLOCKMASK)
162 #define wrap_write_unsubscribe() \
163 wbias_write_unsubscribe(&wbiasrwlock, WBIASRWLOCKMASK)
167 static cycles_t cycles_calibration_min
,
168 cycles_calibration_avg
,
169 cycles_calibration_max
;
171 static inline cycles_t
calibrate_cycles(cycles_t cycles
)
173 return cycles
- cycles_calibration_avg
;
176 struct proc_dir_entry
*pentry
= NULL
;
178 static int p_or_np_reader_thread(const char *typename
,
179 void *data
, int preemptable
)
183 unsigned long iter
= 0;
184 cycles_t time1
, time2
, delay
;
185 cycles_t ldelaymax
= 0, ldelaymin
= ULLONG_MAX
, ldelayavg
= 0;
186 cycles_t udelaymax
= 0, udelaymin
= ULLONG_MAX
, udelayavg
= 0;
188 printk("%s/%lu runnning\n", typename
, (unsigned long)data
);
194 time1
= get_cycles();
198 wrap_read_lock_inatomic();
203 time2
= get_cycles();
205 delay
= time2
- time1
;
206 ldelaymax
= max(ldelaymax
, delay
);
207 ldelaymin
= min(ldelaymin
, delay
);
210 for (i
= 1; i
< NR_VARS
; i
++) {
214 "Unequal cur %d/prev %d at i %d, iter %lu "
215 "in thread\n", cur
, prev
, i
, iter
);
219 time1
= get_cycles();
223 wrap_read_unlock_inatomic();
227 time2
= get_cycles();
229 delay
= time2
- time1
;
230 udelaymax
= max(udelaymax
, delay
);
231 udelaymin
= min(udelaymin
, delay
);
237 if (THREAD_READER_DELAY
)
238 msleep(THREAD_READER_DELAY
);
239 } while (!kthread_should_stop());
241 printk("%s/%lu iterations : %lu", typename
,
242 (unsigned long)data
, iter
);
246 printk("%s/%lu iterations : %lu, "
247 "lock delay [min,avg,max] %llu,%llu,%llu cycles\n",
249 (unsigned long)data
, iter
,
250 calibrate_cycles(ldelaymin
),
251 calibrate_cycles(ldelayavg
),
252 calibrate_cycles(ldelaymax
));
253 printk("%s/%lu iterations : %lu, "
254 "unlock delay [min,avg,max] %llu,%llu,%llu cycles\n",
256 (unsigned long)data
, iter
,
257 calibrate_cycles(udelaymin
),
258 calibrate_cycles(udelayavg
),
259 calibrate_cycles(udelaymax
));
264 static int preader_thread(void *data
)
266 return p_or_np_reader_thread("preader_thread", data
, 1);
269 static int npreader_thread(void *data
)
271 return p_or_np_reader_thread("npreader_thread", data
, 0);
274 static int trylock_reader_thread(void *data
)
278 unsigned long iter
= 0, success_iter
= 0;
280 printk("trylock_reader_thread/%lu runnning\n", (unsigned long)data
);
285 while (!wrap_read_trylock())
289 for (i
= 1; i
< NR_VARS
; i
++) {
293 "Unequal cur %d/prev %d at i %d, iter %lu "
294 "in thread\n", cur
, prev
, i
, iter
);
300 if (THREAD_READER_DELAY
)
301 msleep(THREAD_READER_DELAY
);
302 } while (!kthread_should_stop());
303 printk("trylock_reader_thread/%lu iterations : %lu, "
304 "successful iterations : %lu\n",
305 (unsigned long)data
, iter
, success_iter
);
309 DEFINE_PER_CPU(cycles_t
, int_ldelaymin
);
310 DEFINE_PER_CPU(cycles_t
, int_ldelayavg
);
311 DEFINE_PER_CPU(cycles_t
, int_ldelaymax
);
312 DEFINE_PER_CPU(cycles_t
, int_udelaymin
);
313 DEFINE_PER_CPU(cycles_t
, int_udelayavg
);
314 DEFINE_PER_CPU(cycles_t
, int_udelaymax
);
315 DEFINE_PER_CPU(cycles_t
, int_ipi_nr
);
317 static void interrupt_reader_ipi(void *data
)
321 cycles_t time1
, time2
;
322 cycles_t
*ldelaymax
, *ldelaymin
, *ldelayavg
, *ipi_nr
, delay
;
323 cycles_t
*udelaymax
, *udelaymin
, *udelayavg
;
326 * Skip the ipi caller, not in irq context.
331 ldelaymax
= &per_cpu(int_ldelaymax
, smp_processor_id());
332 ldelaymin
= &per_cpu(int_ldelaymin
, smp_processor_id());
333 ldelayavg
= &per_cpu(int_ldelayavg
, smp_processor_id());
334 udelaymax
= &per_cpu(int_udelaymax
, smp_processor_id());
335 udelaymin
= &per_cpu(int_udelaymin
, smp_processor_id());
336 udelayavg
= &per_cpu(int_udelayavg
, smp_processor_id());
337 ipi_nr
= &per_cpu(int_ipi_nr
, smp_processor_id());
340 time1
= get_cycles();
343 wrap_read_lock_irq();
346 time2
= get_cycles();
348 delay
= time2
- time1
;
349 *ldelaymax
= max(*ldelaymax
, delay
);
350 *ldelaymin
= min(*ldelaymin
, delay
);
354 for (i
= 1; i
< NR_VARS
; i
++) {
358 "Unequal cur %d/prev %d at i %d in interrupt\n",
362 time1
= get_cycles();
364 wrap_read_unlock_irq();
365 time2
= get_cycles();
367 delay
= time2
- time1
;
368 *udelaymax
= max(*udelaymax
, delay
);
369 *udelaymin
= min(*udelaymin
, delay
);
373 DEFINE_PER_CPU(unsigned long, trylock_int_iter
);
374 DEFINE_PER_CPU(unsigned long, trylock_int_success
);
376 static void trylock_interrupt_reader_ipi(void *data
)
382 * Skip the ipi caller, not in irq context.
387 per_cpu(trylock_int_iter
, smp_processor_id())++;
388 while (!wrap_read_trylock_irq())
389 per_cpu(trylock_int_iter
, smp_processor_id())++;
390 per_cpu(trylock_int_success
, smp_processor_id())++;
392 for (i
= 1; i
< NR_VARS
; i
++) {
396 "Unequal cur %d/prev %d at i %d in interrupt\n",
399 wrap_read_unlock_irq();
403 static int interrupt_reader_thread(void *data
)
405 unsigned long iter
= 0;
408 for_each_online_cpu(i
) {
409 per_cpu(int_ldelaymax
, i
) = 0;
410 per_cpu(int_ldelaymin
, i
) = ULLONG_MAX
;
411 per_cpu(int_ldelayavg
, i
) = 0;
412 per_cpu(int_udelaymax
, i
) = 0;
413 per_cpu(int_udelaymin
, i
) = ULLONG_MAX
;
414 per_cpu(int_udelayavg
, i
) = 0;
415 per_cpu(int_ipi_nr
, i
) = 0;
419 on_each_cpu(interrupt_reader_ipi
, NULL
, 0);
420 if (INTERRUPT_READER_DELAY
)
421 msleep(INTERRUPT_READER_DELAY
);
422 } while (!kthread_should_stop());
423 printk("interrupt_reader_thread/%lu iterations : %lu\n",
424 (unsigned long)data
, iter
);
425 for_each_online_cpu(i
) {
426 if (!per_cpu(int_ipi_nr
, i
))
428 per_cpu(int_ldelayavg
, i
) /= per_cpu(int_ipi_nr
, i
);
429 per_cpu(int_udelayavg
, i
) /= per_cpu(int_ipi_nr
, i
);
430 printk("interrupt readers on CPU %i, "
431 "lock delay [min,avg,max] %llu,%llu,%llu cycles\n",
433 calibrate_cycles(per_cpu(int_ldelaymin
, i
)),
434 calibrate_cycles(per_cpu(int_ldelayavg
, i
)),
435 calibrate_cycles(per_cpu(int_ldelaymax
, i
)));
436 printk("interrupt readers on CPU %i, "
437 "unlock delay [min,avg,max] %llu,%llu,%llu cycles\n",
439 calibrate_cycles(per_cpu(int_udelaymin
, i
)),
440 calibrate_cycles(per_cpu(int_udelayavg
, i
)),
441 calibrate_cycles(per_cpu(int_udelaymax
, i
)));
446 static int trylock_interrupt_reader_thread(void *data
)
448 unsigned long iter
= 0;
453 on_each_cpu(trylock_interrupt_reader_ipi
, NULL
, 0);
454 if (INTERRUPT_READER_DELAY
)
455 msleep(INTERRUPT_READER_DELAY
);
456 } while (!kthread_should_stop());
457 printk("trylock_interrupt_reader_thread/%lu iterations : %lu\n",
458 (unsigned long)data
, iter
);
459 for_each_online_cpu(i
) {
460 printk("trylock interrupt readers on CPU %i, "
462 "successful iterations : %lu\n",
463 i
, per_cpu(trylock_int_iter
, i
),
464 per_cpu(trylock_int_success
, i
));
465 per_cpu(trylock_int_iter
, i
) = 0;
466 per_cpu(trylock_int_success
, i
) = 0;
471 static int writer_thread(void *data
)
475 unsigned long iter
= 0;
476 cycles_t time1
, time2
, delay
;
477 cycles_t ldelaymax
= 0, ldelaymin
= ULLONG_MAX
, ldelayavg
= 0;
478 cycles_t udelaymax
= 0, udelaymin
= ULLONG_MAX
, udelayavg
= 0;
480 printk("writer_thread/%lu runnning\n", (unsigned long)data
);
487 time1
= get_cycles();
493 time2
= get_cycles();
495 delay
= time2
- time1
;
496 ldelaymax
= max(ldelaymax
, delay
);
497 ldelaymin
= min(ldelaymin
, delay
);
499 new = (int)get_cycles();
500 for (i
= 0; i
< NR_VARS
; i
++) {
505 time1
= get_cycles();
511 time2
= get_cycles();
513 delay
= time2
- time1
;
514 udelaymax
= max(udelaymax
, delay
);
515 udelaymin
= min(udelaymin
, delay
);
521 if (WRITER_DELAY
> 0)
522 udelay(WRITER_DELAY
);
524 * make sure we don't busy-loop faster than
525 * the lock busy-loop, it would cause reader and
528 } while (!kthread_should_stop());
531 printk("writer_thread/%lu iterations : %lu, "
532 "lock delay [min,avg,max] %llu,%llu,%llu cycles\n",
533 (unsigned long)data
, iter
,
534 calibrate_cycles(ldelaymin
),
535 calibrate_cycles(ldelayavg
),
536 calibrate_cycles(ldelaymax
));
537 printk("writer_thread/%lu iterations : %lu, "
538 "unlock delay [min,avg,max] %llu,%llu,%llu cycles\n",
539 (unsigned long)data
, iter
,
540 calibrate_cycles(udelaymin
),
541 calibrate_cycles(udelayavg
),
542 calibrate_cycles(udelaymax
));
546 #if (TEST_STD_RWLOCK)
547 static int trylock_writer_thread(void *data
)
551 unsigned long iter
= 0, success
= 0, fail
= 0;
553 printk("trylock_writer_thread/%lu runnning\n", (unsigned long)data
);
555 #if (TEST_INTERRUPTS)
556 /* std write trylock cannot disable interrupts. */
560 #if (TRYLOCK_WRITERS_FAIL_ITER == -1)
563 if (write_trylock(&std_rw_lock
))
567 for (i
= 0; i
< TRYLOCK_WRITERS_FAIL_ITER
; i
++) {
569 if (write_trylock(&std_rw_lock
))
574 #if (TEST_INTERRUPTS)
580 new = (int)get_cycles();
581 for (i
= 0; i
< NR_VARS
; i
++) {
584 #if (TEST_INTERRUPTS)
585 write_unlock_irq(&std_rw_lock
);
587 write_unlock(&std_rw_lock
);
590 if (TRYLOCK_WRITER_DELAY
> 0)
591 udelay(TRYLOCK_WRITER_DELAY
);
593 * make sure we don't busy-loop faster than
594 * the lock busy-loop, it would cause reader and
597 } while (!kthread_should_stop());
598 printk("trylock_writer_thread/%lu iterations : "
599 "[try,success,fail after %d try], "
601 (unsigned long)data
, TRYLOCK_WRITERS_FAIL_ITER
,
602 iter
, success
, fail
);
606 #else /* !TEST_STD_RWLOCK */
608 static int trylock_writer_thread(void *data
)
612 unsigned long iter
= 0, success
= 0, fail
= 0;
614 printk("trylock_writer_thread/%lu runnning\n", (unsigned long)data
);
620 if (wrap_write_trylock_else_subscribe())
623 #if (TRYLOCK_WRITERS_FAIL_ITER == -1)
626 if (wrap_write_trylock_subscribed())
630 for (i
= 0; i
< TRYLOCK_WRITERS_FAIL_ITER
- 1; i
++) {
632 if (wrap_write_trylock_subscribed())
637 wrap_write_unsubscribe();
641 new = (int)get_cycles();
642 for (i
= 0; i
< NR_VARS
; i
++) {
650 if (TRYLOCK_WRITER_DELAY
> 0)
651 udelay(TRYLOCK_WRITER_DELAY
);
653 * make sure we don't busy-loop faster than
654 * the lock busy-loop, it would cause reader and
657 } while (!kthread_should_stop());
658 printk("trylock_writer_thread/%lu iterations : "
659 "[try,success,fail after %d try], "
661 (unsigned long)data
, TRYLOCK_WRITERS_FAIL_ITER
,
662 iter
, success
, fail
);
666 #endif /* TEST_STD_RWLOCK */
668 static void wbias_rwlock_create(void)
672 for (i
= 0; i
< NR_PREADERS
; i
++) {
673 printk("starting preemptable reader thread %lu\n", i
);
674 preader_threads
[i
] = kthread_run(preader_thread
, (void *)i
,
675 "wbiasrwlock_preader");
676 BUG_ON(!preader_threads
[i
]);
679 for (i
= 0; i
< NR_NPREADERS
; i
++) {
680 printk("starting non-preemptable reader thread %lu\n", i
);
681 npreader_threads
[i
] = kthread_run(npreader_thread
, (void *)i
,
682 "wbiasrwlock_npreader");
683 BUG_ON(!npreader_threads
[i
]);
686 for (i
= 0; i
< NR_TRYLOCK_READERS
; i
++) {
687 printk("starting trylock reader thread %lu\n", i
);
688 trylock_reader_threads
[i
] = kthread_run(trylock_reader_thread
,
689 (void *)i
, "wbiasrwlock_trylock_reader");
690 BUG_ON(!trylock_reader_threads
[i
]);
692 for (i
= 0; i
< NR_INTERRUPT_READERS
; i
++) {
693 printk("starting interrupt reader %lu\n", i
);
694 interrupt_reader
[i
] = kthread_run(interrupt_reader_thread
,
696 "wbiasrwlock_interrupt_reader");
698 for (i
= 0; i
< NR_TRYLOCK_INTERRUPT_READERS
; i
++) {
699 printk("starting trylock interrupt reader %lu\n", i
);
700 trylock_interrupt_reader
[i
] =
701 kthread_run(trylock_interrupt_reader_thread
,
702 (void *)i
, "wbiasrwlock_trylock_interrupt_reader");
704 for (i
= 0; i
< NR_WRITERS
; i
++) {
705 printk("starting writer thread %lu\n", i
);
706 writer_threads
[i
] = kthread_run(writer_thread
, (void *)i
,
707 "wbiasrwlock_writer");
708 BUG_ON(!writer_threads
[i
]);
710 for (i
= 0; i
< NR_TRYLOCK_WRITERS
; i
++) {
711 printk("starting trylock writer thread %lu\n", i
);
712 trylock_writer_threads
[i
] = kthread_run(trylock_writer_thread
,
713 (void *)i
, "wbiasrwlock_trylock_writer");
714 BUG_ON(!trylock_writer_threads
[i
]);
718 static void wbias_rwlock_stop(void)
722 for (i
= 0; i
< NR_WRITERS
; i
++)
723 kthread_stop(writer_threads
[i
]);
724 for (i
= 0; i
< NR_TRYLOCK_WRITERS
; i
++)
725 kthread_stop(trylock_writer_threads
[i
]);
726 for (i
= 0; i
< NR_NPREADERS
; i
++)
727 kthread_stop(npreader_threads
[i
]);
728 for (i
= 0; i
< NR_PREADERS
; i
++)
729 kthread_stop(preader_threads
[i
]);
730 for (i
= 0; i
< NR_TRYLOCK_READERS
; i
++)
731 kthread_stop(trylock_reader_threads
[i
]);
732 for (i
= 0; i
< NR_INTERRUPT_READERS
; i
++)
733 kthread_stop(interrupt_reader
[i
]);
734 for (i
= 0; i
< NR_TRYLOCK_INTERRUPT_READERS
; i
++)
735 kthread_stop(trylock_interrupt_reader
[i
]);
739 static void perform_test(const char *name
, void (*callback
)(void))
741 printk("%s\n", name
);
745 static int my_open(struct inode
*inode
, struct file
*file
)
748 cycles_t time1
, time2
, delay
;
750 printk("** get_cycles calibration **\n");
751 cycles_calibration_min
= ULLONG_MAX
;
752 cycles_calibration_avg
= 0;
753 cycles_calibration_max
= 0;
756 for (i
= 0; i
< 10; i
++) {
758 time1
= get_cycles();
761 time2
= get_cycles();
763 delay
= time2
- time1
;
764 cycles_calibration_min
= min(cycles_calibration_min
, delay
);
765 cycles_calibration_avg
+= delay
;
766 cycles_calibration_max
= max(cycles_calibration_max
, delay
);
768 cycles_calibration_avg
/= 10;
771 printk("get_cycles takes [min,avg,max] %llu,%llu,%llu cycles, "
772 "results calibrated on avg\n",
773 cycles_calibration_min
,
774 cycles_calibration_avg
,
775 cycles_calibration_max
);
778 printk("** Single writer test, no contention **\n");
779 wbias_rwlock_profile_latency_reset();
780 writer_threads
[0] = kthread_run(writer_thread
, (void *)0,
781 "wbiasrwlock_writer");
782 BUG_ON(!writer_threads
[0]);
783 ssleep(SINGLE_WRITER_TEST_DURATION
);
784 kthread_stop(writer_threads
[0]);
787 wbias_rwlock_profile_latency_print();
789 printk("** Single trylock writer test, no contention **\n");
790 wbias_rwlock_profile_latency_reset();
791 trylock_writer_threads
[0] = kthread_run(trylock_writer_thread
,
793 "trylock_wbiasrwlock_writer");
794 BUG_ON(!trylock_writer_threads
[0]);
795 ssleep(SINGLE_WRITER_TEST_DURATION
);
796 kthread_stop(trylock_writer_threads
[0]);
799 wbias_rwlock_profile_latency_print();
801 printk("** Single preemptable reader test, no contention **\n");
802 wbias_rwlock_profile_latency_reset();
803 preader_threads
[0] = kthread_run(preader_thread
, (void *)0,
804 "wbiasrwlock_preader");
805 BUG_ON(!preader_threads
[0]);
806 ssleep(SINGLE_READER_TEST_DURATION
);
807 kthread_stop(preader_threads
[0]);
810 wbias_rwlock_profile_latency_print();
813 printk("** Single non-preemptable reader test, no contention **\n");
814 wbias_rwlock_profile_latency_reset();
815 npreader_threads
[0] = kthread_run(npreader_thread
, (void *)0,
816 "wbiasrwlock_npreader");
817 BUG_ON(!npreader_threads
[0]);
818 ssleep(SINGLE_READER_TEST_DURATION
);
819 kthread_stop(npreader_threads
[0]);
822 wbias_rwlock_profile_latency_print();
825 printk("** Multiple p/non-p readers test, no contention **\n");
826 wbias_rwlock_profile_latency_reset();
828 for (i
= 0; i
< NR_PREADERS
; i
++) {
829 printk("starting preader thread %lu\n", i
);
830 preader_threads
[i
] = kthread_run(preader_thread
, (void *)i
,
831 "wbiasrwlock_preader");
832 BUG_ON(!preader_threads
[i
]);
835 for (i
= 0; i
< NR_NPREADERS
; i
++) {
836 printk("starting npreader thread %lu\n", i
);
837 npreader_threads
[i
] = kthread_run(npreader_thread
, (void *)i
,
838 "wbiasrwlock_npreader");
839 BUG_ON(!npreader_threads
[i
]);
841 ssleep(SINGLE_READER_TEST_DURATION
);
842 for (i
= 0; i
< NR_NPREADERS
; i
++)
843 kthread_stop(npreader_threads
[i
]);
845 for (i
= 0; i
< NR_PREADERS
; i
++)
846 kthread_stop(preader_threads
[i
]);
850 wbias_rwlock_profile_latency_print();
852 printk("** High contention test **\n");
853 wbias_rwlock_profile_latency_reset();
854 perform_test("wbias-rwlock-create", wbias_rwlock_create
);
855 ssleep(TEST_DURATION
);
856 perform_test("wbias-rwlock-stop", wbias_rwlock_stop
);
858 wbias_rwlock_profile_latency_print();
864 static struct file_operations my_operations
= {
868 int init_module(void)
870 pentry
= create_proc_entry("testwbiasrwlock", 0444, NULL
);
872 pentry
->proc_fops
= &my_operations
;
874 printk("PTHREAD_ROFFSET : %016lX\n", PTHREAD_ROFFSET
);
875 printk("PTHREAD_RMASK : %016lX\n", PTHREAD_RMASK
);
876 printk("NPTHREAD_ROFFSET : %016lX\n", NPTHREAD_ROFFSET
);
877 printk("NPTHREAD_RMASK : %016lX\n", NPTHREAD_RMASK
);
878 printk("SOFTIRQ_ROFFSET : %016lX\n", SOFTIRQ_ROFFSET
);
879 printk("SOFTIRQ_RMASK : %016lX\n", SOFTIRQ_RMASK
);
880 printk("HARDIRQ_ROFFSET : %016lX\n", HARDIRQ_ROFFSET
);
881 printk("HARDIRQ_RMASK : %016lX\n", HARDIRQ_RMASK
);
882 printk("PTHREAD_WOFFSET : %016lX\n", PTHREAD_WOFFSET
);
883 printk("PTHREAD_WMASK : %016lX\n", PTHREAD_WMASK
);
884 printk("NPTHREAD_WOFFSET : %016lX\n", NPTHREAD_WOFFSET
);
885 printk("NPTHREAD_WMASK : %016lX\n", NPTHREAD_WMASK
);
886 printk("WRITER_MUTEX : %016lX\n", WRITER_MUTEX
);
887 printk("SOFTIRQ_WMASK : %016lX\n", SOFTIRQ_WMASK
);
888 printk("HARDIRQ_WMASK : %016lX\n", HARDIRQ_WMASK
);
889 printk("WQ_MUTEX : %016lX\n", WQ_MUTEX
);
890 printk("WQ_ACTIVE : %016lX\n", WQ_ACTIVE
);
895 void cleanup_module(void)
897 remove_proc_entry("testwbiasrwlock", NULL
);
900 MODULE_LICENSE("GPL");
901 MODULE_AUTHOR("Mathieu Desnoyers");
902 MODULE_DESCRIPTION("wbias rwlock test");