update test
[lttv.git] / trunk / tests / kernel / test-fair-rwlock.c
1 /* test-fair-rwlock.c
2 *
3 */
4
5 #include <linux/module.h>
6 #include <linux/proc_fs.h>
7 #include <linux/sched.h>
8 #include <linux/timex.h>
9 #include <linux/fair-rwlock.h>
10 #include <linux/kthread.h>
11 #include <linux/delay.h>
12 #include <linux/hardirq.h>
13 #include <linux/module.h>
14 #include <linux/percpu.h>
15 #include <linux/spinlock.h>
16 #include <asm/ptrace.h>
17
18 #if (NR_CPUS > 64 && (BITS_PER_LONG == 32 || NR_CPUS > 32768))
19 #error "fair rwlock needs more bits per long to deal with that many CPUs"
20 #endif
21
22 /* Test with no contention duration, in seconds */
23 #define SINGLE_WRITER_TEST_DURATION 10
24 #define SINGLE_READER_TEST_DURATION 10
25 #define MULTIPLE_READERS_TEST_DURATION 10
26
27 /* Test duration, in seconds */
28 #define TEST_DURATION 60
29
30 #define NR_VARS 100
31 //#define NR_WRITERS 2
32 #define NR_WRITERS 2
33 //#define NR_TRYLOCK_WRITERS 2
34 #define NR_TRYLOCK_WRITERS 0
35 #define NR_READERS 4
36 //#define NR_TRYLOCK_READERS 2
37 #define NR_TRYLOCK_READERS 0
38
39 /*
40 * 1 : test standard rwlock
41 * 0 : test frwlock
42 */
43 #define TEST_STD_RWLOCK 0
44
45 /*
46 * 1 : test with thread and interrupt readers.
47 * 0 : test only with thread readers.
48 */
49 #define TEST_INTERRUPTS 1
50
51 #if (TEST_INTERRUPTS)
52 #define NR_INTERRUPT_READERS 1
53 #define NR_TRYLOCK_INTERRUPT_READERS 1
54 #else
55 #define NR_INTERRUPT_READERS 0
56 #define NR_TRYLOCK_INTERRUPT_READERS 0
57 #endif
58
59 /*
60 * Writer iteration delay, in us. 0 for busy loop. Caution : writers can
61 * starve readers.
62 */
63 #define WRITER_DELAY 100
64 #define TRYLOCK_WRITER_DELAY 1000
65
66 /*
67 * Number of iterations after which a trylock writer fails.
68 * -1 for infinite loop.
69 */
70 #define TRYLOCK_WRITERS_FAIL_ITER 100
71
72 /* Thread and interrupt reader delay, in ms */
73 #define THREAD_READER_DELAY 0 /* busy loop */
74 #define INTERRUPT_READER_DELAY 100
75
76 static int var[NR_VARS];
77 static struct task_struct *reader_threads[NR_READERS];
78 static struct task_struct *trylock_reader_threads[NR_TRYLOCK_READERS];
79 static struct task_struct *writer_threads[NR_WRITERS];
80 static struct task_struct *trylock_writer_threads[NR_TRYLOCK_WRITERS];
81 static struct task_struct *interrupt_reader[NR_INTERRUPT_READERS];
82 static struct task_struct *trylock_interrupt_reader[NR_TRYLOCK_INTERRUPT_READERS];
83
84 #if (TEST_STD_RWLOCK)
85
86 static DEFINE_RWLOCK(std_rw_lock);
87
88 #define wrap_read_lock() read_lock(&std_rw_lock)
89 #define wrap_read_trylock() read_trylock(&std_rw_lock)
90 #define wrap_read_unlock() read_unlock(&std_rw_lock)
91
92 #define wrap_read_lock_irq() read_lock(&std_rw_lock)
93 #define wrap_read_trylock_irq() read_trylock(&std_rw_lock)
94 #define wrap_read_unlock_irq() read_unlock(&std_rw_lock)
95
96 #if (TEST_INTERRUPTS)
97 #define wrap_write_lock() write_lock_irq(&std_rw_lock)
98 #define wrap_write_unlock() write_unlock_irq(&std_rw_lock)
99 #else
100 #define wrap_write_lock() write_lock(&std_rw_lock)
101 #define wrap_write_unlock() write_unlock(&std_rw_lock)
102 #endif
103
104 #else
105
106 static struct fair_rwlock frwlock = {
107 .value = ATOMIC_LONG_INIT(0),
108 };
109
110 #define wrap_read_lock() fair_read_lock(&frwlock)
111 #define wrap_read_trylock() fair_read_trylock(&frwlock)
112 #define wrap_read_unlock() fair_read_unlock(&frwlock)
113
114 #define wrap_read_lock_irq() fair_read_lock_irq(&frwlock)
115 #define wrap_read_trylock_irq() fair_read_trylock_irq(&frwlock)
116 #define wrap_read_unlock_irq() fair_read_unlock_irq(&frwlock)
117
118 #if (TEST_INTERRUPTS)
119 #define wrap_write_lock() fair_write_lock_irq(&frwlock)
120 #define wrap_write_unlock() fair_write_unlock_irq(&frwlock)
121 #else
122 #define wrap_write_lock() fair_write_lock(&frwlock)
123 #define wrap_write_unlock() fair_write_unlock(&frwlock)
124 #endif
125
126 #endif
127
128 static cycles_t cycles_calibration_min,
129 cycles_calibration_avg,
130 cycles_calibration_max;
131
132 static inline cycles_t calibrate_cycles(cycles_t cycles)
133 {
134 return cycles - cycles_calibration_avg;
135 }
136
137 struct proc_dir_entry *pentry = NULL;
138
139 static int reader_thread(void *data)
140 {
141 int i;
142 int prev, cur;
143 unsigned long iter = 0;
144 cycles_t time1, time2, delay, delaymax = 0, delaymin = ULLONG_MAX,
145 delayavg = 0;
146
147 printk("reader_thread/%lu runnning\n", (unsigned long)data);
148 do {
149 iter++;
150 preempt_disable(); /* for get_cycles accuracy */
151 rdtsc_barrier();
152 time1 = get_cycles();
153 rdtsc_barrier();
154
155 wrap_read_lock();
156
157 rdtsc_barrier();
158 time2 = get_cycles();
159 rdtsc_barrier();
160 delay = time2 - time1;
161 delaymax = max(delaymax, delay);
162 delaymin = min(delaymin, delay);
163 delayavg += delay;
164 prev = var[0];
165 for (i = 1; i < NR_VARS; i++) {
166 cur = var[i];
167 if (cur != prev)
168 printk(KERN_ALERT
169 "Unequal cur %d/prev %d at i %d, iter %lu "
170 "in thread\n", cur, prev, i, iter);
171 }
172
173 wrap_read_unlock();
174
175 preempt_enable(); /* for get_cycles accuracy */
176 if (THREAD_READER_DELAY)
177 msleep(THREAD_READER_DELAY);
178 } while (!kthread_should_stop());
179 if (!iter) {
180 printk("reader_thread/%lu iterations : %lu",
181 (unsigned long)data, iter);
182 } else {
183 delayavg /= iter;
184 printk("reader_thread/%lu iterations : %lu, "
185 "lock delay [min,avg,max] %llu,%llu,%llu cycles\n",
186 (unsigned long)data, iter,
187 calibrate_cycles(delaymin),
188 calibrate_cycles(delayavg),
189 calibrate_cycles(delaymax));
190 }
191 return 0;
192 }
193
194 static int trylock_reader_thread(void *data)
195 {
196 int i;
197 int prev, cur;
198 unsigned long iter = 0, success_iter = 0;
199
200 printk("trylock_reader_thread/%lu runnning\n", (unsigned long)data);
201 do {
202 while (!wrap_read_trylock())
203 iter++;
204 success_iter++;
205 prev = var[0];
206 for (i = 1; i < NR_VARS; i++) {
207 cur = var[i];
208 if (cur != prev)
209 printk(KERN_ALERT
210 "Unequal cur %d/prev %d at i %d, iter %lu "
211 "in thread\n", cur, prev, i, iter);
212 }
213 wrap_read_unlock();
214 if (THREAD_READER_DELAY)
215 msleep(THREAD_READER_DELAY);
216 } while (!kthread_should_stop());
217 printk("trylock_reader_thread/%lu iterations : %lu, "
218 "successful iterations : %lu\n",
219 (unsigned long)data, iter, success_iter);
220 return 0;
221 }
222
223 DEFINE_PER_CPU(cycles_t, int_delaymin);
224 DEFINE_PER_CPU(cycles_t, int_delayavg);
225 DEFINE_PER_CPU(cycles_t, int_delaymax);
226 DEFINE_PER_CPU(cycles_t, int_ipi_nr);
227
228 static void interrupt_reader_ipi(void *data)
229 {
230 int i;
231 int prev, cur;
232 cycles_t time1, time2;
233 cycles_t *delaymax, *delaymin, *delayavg, *ipi_nr, delay;
234
235 /*
236 * Skip the ipi caller, not in irq context.
237 */
238 if (!in_irq())
239 return;
240
241 delaymax = &per_cpu(int_delaymax, smp_processor_id());
242 delaymin = &per_cpu(int_delaymin, smp_processor_id());
243 delayavg = &per_cpu(int_delayavg, smp_processor_id());
244 ipi_nr = &per_cpu(int_ipi_nr, smp_processor_id());
245
246 rdtsc_barrier();
247 time1 = get_cycles();
248 rdtsc_barrier();
249
250 wrap_read_lock_irq();
251
252 rdtsc_barrier();
253 time2 = get_cycles();
254 rdtsc_barrier();
255 delay = time2 - time1;
256 *delaymax = max(*delaymax, delay);
257 *delaymin = min(*delaymin, delay);
258 *delayavg += delay;
259 (*ipi_nr)++;
260 prev = var[0];
261 for (i = 1; i < NR_VARS; i++) {
262 cur = var[i];
263 if (cur != prev)
264 printk(KERN_ALERT
265 "Unequal cur %d/prev %d at i %d in interrupt\n",
266 cur, prev, i);
267 }
268 wrap_read_unlock_irq();
269 }
270
271 DEFINE_PER_CPU(unsigned long, trylock_int_iter);
272 DEFINE_PER_CPU(unsigned long, trylock_int_success);
273
274 static void trylock_interrupt_reader_ipi(void *data)
275 {
276 int i;
277 int prev, cur;
278
279 /*
280 * Skip the ipi caller, not in irq context.
281 */
282 if (!in_irq())
283 return;
284
285 per_cpu(trylock_int_iter, smp_processor_id())++;
286 while (!wrap_read_trylock_irq())
287 per_cpu(trylock_int_iter, smp_processor_id())++;
288 per_cpu(trylock_int_success, smp_processor_id())++;
289 prev = var[0];
290 for (i = 1; i < NR_VARS; i++) {
291 cur = var[i];
292 if (cur != prev)
293 printk(KERN_ALERT
294 "Unequal cur %d/prev %d at i %d in interrupt\n",
295 cur, prev, i);
296 }
297 wrap_read_unlock_irq();
298 }
299
300
301 static int interrupt_reader_thread(void *data)
302 {
303 unsigned long iter = 0;
304 int i;
305
306 for_each_online_cpu(i) {
307 per_cpu(int_delaymax, i) = 0;
308 per_cpu(int_delaymin, i) = ULLONG_MAX;
309 per_cpu(int_delayavg, i) = 0;
310 per_cpu(int_ipi_nr, i) = 0;
311 }
312 do {
313 iter++;
314 on_each_cpu(interrupt_reader_ipi, NULL, 0);
315 if (INTERRUPT_READER_DELAY)
316 msleep(INTERRUPT_READER_DELAY);
317 } while (!kthread_should_stop());
318 printk("interrupt_reader_thread/%lu iterations : %lu\n",
319 (unsigned long)data, iter);
320 for_each_online_cpu(i) {
321 if (!per_cpu(int_ipi_nr, i))
322 continue;
323 per_cpu(int_delayavg, i) /= per_cpu(int_ipi_nr, i);
324 printk("interrupt readers on CPU %i, "
325 "lock delay [min,avg,max] %llu,%llu,%llu cycles\n",
326 i,
327 calibrate_cycles(per_cpu(int_delaymin, i)),
328 calibrate_cycles(per_cpu(int_delayavg, i)),
329 calibrate_cycles(per_cpu(int_delaymax, i)));
330 }
331 return 0;
332 }
333
334 static int trylock_interrupt_reader_thread(void *data)
335 {
336 unsigned long iter = 0;
337 int i;
338
339 do {
340 iter++;
341 on_each_cpu(trylock_interrupt_reader_ipi, NULL, 0);
342 if (INTERRUPT_READER_DELAY)
343 msleep(INTERRUPT_READER_DELAY);
344 } while (!kthread_should_stop());
345 printk("trylock_interrupt_reader_thread/%lu iterations : %lu\n",
346 (unsigned long)data, iter);
347 for_each_online_cpu(i) {
348 printk("trylock interrupt readers on CPU %i, "
349 "iterations %lu, "
350 "successful iterations : %lu\n",
351 i, per_cpu(trylock_int_iter, i),
352 per_cpu(trylock_int_success, i));
353 per_cpu(trylock_int_iter, i) = 0;
354 per_cpu(trylock_int_success, i) = 0;
355 }
356 return 0;
357 }
358
359 static int writer_thread(void *data)
360 {
361 int i;
362 int new;
363 unsigned long iter = 0;
364 cycles_t time1, time2, delay, delaymax = 0, delaymin = ULLONG_MAX,
365 delayavg = 0;
366
367 printk("writer_thread/%lu runnning\n", (unsigned long)data);
368 do {
369 iter++;
370 preempt_disable(); /* for get_cycles accuracy */
371 rdtsc_barrier();
372 time1 = get_cycles();
373 rdtsc_barrier();
374
375 wrap_write_lock();
376
377 rdtsc_barrier();
378 time2 = get_cycles();
379 rdtsc_barrier();
380 delay = time2 - time1;
381 delaymax = max(delaymax, delay);
382 delaymin = min(delaymin, delay);
383 delayavg += delay;
384 new = (int)get_cycles();
385 for (i = 0; i < NR_VARS; i++) {
386 var[i] = new;
387 }
388
389 wrap_write_unlock();
390
391 preempt_enable(); /* for get_cycles accuracy */
392 if (WRITER_DELAY > 0)
393 udelay(WRITER_DELAY);
394 } while (!kthread_should_stop());
395 delayavg /= iter;
396 printk("writer_thread/%lu iterations : %lu, "
397 "lock delay [min,avg,max] %llu,%llu,%llu cycles\n",
398 (unsigned long)data, iter,
399 calibrate_cycles(delaymin),
400 calibrate_cycles(delayavg),
401 calibrate_cycles(delaymax));
402 return 0;
403 }
404
405 #if (TEST_STD_RWLOCK)
406 static int trylock_writer_thread(void *data)
407 {
408 int i;
409 int new;
410 unsigned long iter = 0, success = 0, fail = 0;
411
412 printk("trylock_writer_thread/%lu runnning\n", (unsigned long)data);
413 do {
414 #if (TEST_INTERRUPTS)
415 /* std write trylock cannot disable interrupts. */
416 local_irq_disable();
417 #endif
418
419 #if (TRYLOCK_WRITERS_FAIL_ITER == -1)
420 for (;;) {
421 iter++;
422 if (write_trylock(&std_rw_lock))
423 goto locked;
424 }
425 #else
426 for (i = 0; i < TRYLOCK_WRITERS_FAIL_ITER; i++) {
427 iter++;
428 if (write_trylock(&std_rw_lock))
429 goto locked;
430 }
431 #endif
432 fail++;
433 #if (TEST_INTERRUPTS)
434 local_irq_enable();
435 #endif
436 goto loop;
437 locked:
438 success++;
439 new = (int)get_cycles();
440 for (i = 0; i < NR_VARS; i++) {
441 var[i] = new;
442 }
443 #if (TEST_INTERRUPTS)
444 write_unlock_irq(&std_rw_lock);
445 #else
446 write_unlock(&std_rw_lock);
447 #endif
448 loop:
449 if (TRYLOCK_WRITER_DELAY > 0)
450 udelay(TRYLOCK_WRITER_DELAY);
451 } while (!kthread_should_stop());
452 printk("trylock_writer_thread/%lu iterations : "
453 "[try,success,fail after %d try], "
454 "%lu,%lu,%lu\n",
455 (unsigned long)data, TRYLOCK_WRITERS_FAIL_ITER,
456 iter, success, fail);
457 return 0;
458 }
459
460 #else /* !TEST_STD_RWLOCK */
461
462 static int trylock_writer_thread(void *data)
463 {
464 int i;
465 int new;
466 unsigned long iter = 0, success = 0, fail = 0;
467
468 printk("trylock_writer_thread/%lu runnning\n", (unsigned long)data);
469 do {
470 iter++;
471 #if (TEST_INTERRUPTS)
472 if (fair_write_trylock_irq_else_subscribe(&frwlock))
473 #else
474 if (fair_write_trylock_else_subscribe(&frwlock))
475 #endif
476 goto locked;
477
478 #if (TRYLOCK_WRITERS_FAIL_ITER == -1)
479 for (;;) {
480 iter++;
481 #if (TEST_INTERRUPTS)
482 if (fair_write_trylock_irq_subscribed(&frwlock))
483 #else
484 if (fair_write_trylock_subscribed(&frwlock))
485 #endif
486 goto locked;
487 }
488 #else
489 for (i = 0; i < TRYLOCK_WRITERS_FAIL_ITER - 1; i++) {
490 iter++;
491 #if (TEST_INTERRUPTS)
492 if (fair_write_trylock_irq_subscribed(&frwlock))
493 #else
494 if (fair_write_trylock_subscribed(&frwlock))
495 #endif
496 goto locked;
497 }
498 #endif
499 fail++;
500 fair_write_unsubscribe(&frwlock);
501 goto loop;
502 locked:
503 success++;
504 new = (int)get_cycles();
505 for (i = 0; i < NR_VARS; i++) {
506 var[i] = new;
507 }
508 #if (TEST_INTERRUPTS)
509 fair_write_unlock_irq(&frwlock);
510 #else
511 fair_write_unlock(&frwlock);
512 #endif
513 loop:
514 if (TRYLOCK_WRITER_DELAY > 0)
515 udelay(TRYLOCK_WRITER_DELAY);
516 } while (!kthread_should_stop());
517 printk("trylock_writer_thread/%lu iterations : "
518 "[try,success,fail after %d try], "
519 "%lu,%lu,%lu\n",
520 (unsigned long)data, TRYLOCK_WRITERS_FAIL_ITER,
521 iter, success, fail);
522 return 0;
523 }
524
525 #endif /* TEST_STD_RWLOCK */
526
527 static void fair_rwlock_create(void)
528 {
529 unsigned long i;
530
531 for (i = 0; i < NR_READERS; i++) {
532 printk("starting reader thread %lu\n", i);
533 reader_threads[i] = kthread_run(reader_thread, (void *)i,
534 "frwlock_reader");
535 BUG_ON(!reader_threads[i]);
536 }
537
538 for (i = 0; i < NR_TRYLOCK_READERS; i++) {
539 printk("starting trylock reader thread %lu\n", i);
540 trylock_reader_threads[i] = kthread_run(trylock_reader_thread,
541 (void *)i, "frwlock_trylock_reader");
542 BUG_ON(!trylock_reader_threads[i]);
543 }
544 for (i = 0; i < NR_INTERRUPT_READERS; i++) {
545 printk("starting interrupt reader %lu\n", i);
546 interrupt_reader[i] = kthread_run(interrupt_reader_thread,
547 (void *)i,
548 "frwlock_interrupt_reader");
549 }
550 for (i = 0; i < NR_TRYLOCK_INTERRUPT_READERS; i++) {
551 printk("starting trylock interrupt reader %lu\n", i);
552 trylock_interrupt_reader[i] =
553 kthread_run(trylock_interrupt_reader_thread,
554 (void *)i, "frwlock_trylock_interrupt_reader");
555 }
556 for (i = 0; i < NR_WRITERS; i++) {
557 printk("starting writer thread %lu\n", i);
558 writer_threads[i] = kthread_run(writer_thread, (void *)i,
559 "frwlock_writer");
560 BUG_ON(!writer_threads[i]);
561 }
562 for (i = 0; i < NR_TRYLOCK_WRITERS; i++) {
563 printk("starting trylock writer thread %lu\n", i);
564 trylock_writer_threads[i] = kthread_run(trylock_writer_thread,
565 (void *)i, "frwlock_trylock_writer");
566 BUG_ON(!trylock_writer_threads[i]);
567 }
568 }
569
570 static void fair_rwlock_stop(void)
571 {
572 unsigned long i;
573
574 for (i = 0; i < NR_WRITERS; i++)
575 kthread_stop(writer_threads[i]);
576 for (i = 0; i < NR_TRYLOCK_WRITERS; i++)
577 kthread_stop(trylock_writer_threads[i]);
578 for (i = 0; i < NR_READERS; i++)
579 kthread_stop(reader_threads[i]);
580 for (i = 0; i < NR_TRYLOCK_READERS; i++)
581 kthread_stop(trylock_reader_threads[i]);
582 for (i = 0; i < NR_INTERRUPT_READERS; i++)
583 kthread_stop(interrupt_reader[i]);
584 for (i = 0; i < NR_TRYLOCK_INTERRUPT_READERS; i++)
585 kthread_stop(trylock_interrupt_reader[i]);
586 }
587
588
589 static void perform_test(const char *name, void (*callback)(void))
590 {
591 printk("%s\n", name);
592 callback();
593 }
594
595 static int my_open(struct inode *inode, struct file *file)
596 {
597 unsigned long i;
598 cycles_t time1, time2, delay;
599
600 printk("** get_cycles calibration **\n");
601 cycles_calibration_min = ULLONG_MAX;
602 cycles_calibration_avg = 0;
603 cycles_calibration_max = 0;
604
605 local_irq_disable();
606 for (i = 0; i < 10; i++) {
607 rdtsc_barrier();
608 time1 = get_cycles();
609 rdtsc_barrier();
610 rdtsc_barrier();
611 time2 = get_cycles();
612 rdtsc_barrier();
613 delay = time2 - time1;
614 cycles_calibration_min = min(cycles_calibration_min, delay);
615 cycles_calibration_avg += delay;
616 cycles_calibration_max = max(cycles_calibration_max, delay);
617 }
618 cycles_calibration_avg /= 10;
619 local_irq_enable();
620
621 printk("get_cycles takes [min,avg,max] %llu,%llu,%llu cycles, "
622 "results calibrated on avg\n",
623 cycles_calibration_min,
624 cycles_calibration_avg,
625 cycles_calibration_max);
626
627 printk("** Single writer test, no contention **\n");
628 writer_threads[0] = kthread_run(writer_thread, (void *)0,
629 "frwlock_writer");
630 BUG_ON(!writer_threads[0]);
631 ssleep(SINGLE_WRITER_TEST_DURATION);
632 kthread_stop(writer_threads[0]);
633
634 printk("** Single trylock writer test, no contention **\n");
635 trylock_writer_threads[0] = kthread_run(trylock_writer_thread,
636 (void *)0,
637 "trylock_frwlock_writer");
638 BUG_ON(!trylock_writer_threads[0]);
639 ssleep(SINGLE_WRITER_TEST_DURATION);
640 kthread_stop(trylock_writer_threads[0]);
641
642 printk("** Single reader test, no contention **\n");
643 reader_threads[0] = kthread_run(reader_thread, (void *)0,
644 "frwlock_reader");
645 BUG_ON(!reader_threads[0]);
646 ssleep(SINGLE_READER_TEST_DURATION);
647 kthread_stop(reader_threads[0]);
648
649 printk("** Multiple readers test, no contention **\n");
650 for (i = 0; i < NR_READERS; i++) {
651 printk("starting reader thread %lu\n", i);
652 reader_threads[i] = kthread_run(reader_thread, (void *)i,
653 "frwlock_reader");
654 BUG_ON(!reader_threads[i]);
655 }
656 ssleep(SINGLE_READER_TEST_DURATION);
657 for (i = 0; i < NR_READERS; i++)
658 kthread_stop(reader_threads[i]);
659
660 printk("** High contention test **\n");
661 perform_test("fair-rwlock-create", fair_rwlock_create);
662 ssleep(TEST_DURATION);
663 perform_test("fair-rwlock-stop", fair_rwlock_stop);
664
665 return -EPERM;
666 }
667
668
669 static struct file_operations my_operations = {
670 .open = my_open,
671 };
672
673 int init_module(void)
674 {
675 pentry = create_proc_entry("testfrwlock", 0444, NULL);
676 if (pentry)
677 pentry->proc_fops = &my_operations;
678
679 printk("NR_CPUS : %d\n", NR_CPUS);
680 printk("THREAD_ROFFSET : %lX\n", THREAD_ROFFSET);
681 printk("THREAD_RMASK : %lX\n", THREAD_RMASK);
682 printk("SOFTIRQ_ROFFSET : %lX\n", SOFTIRQ_ROFFSET);
683 printk("SOFTIRQ_RMASK : %lX\n", SOFTIRQ_RMASK);
684 printk("HARDIRQ_ROFFSET : %lX\n", HARDIRQ_ROFFSET);
685 printk("HARDIRQ_RMASK : %lX\n", HARDIRQ_RMASK);
686 printk("SUBSCRIBERS_WOFFSET : %lX\n", SUBSCRIBERS_WOFFSET);
687 printk("SUBSCRIBERS_WMASK : %lX\n", SUBSCRIBERS_WMASK);
688 printk("WRITER_MUTEX : %lX\n", WRITER_MUTEX);
689 printk("SOFTIRQ_WMASK : %lX\n", SOFTIRQ_WMASK);
690 printk("HARDIRQ_WMASK : %lX\n", HARDIRQ_WMASK);
691
692 return 0;
693 }
694
695 void cleanup_module(void)
696 {
697 remove_proc_entry("testfrwlock", NULL);
698 }
699
700 MODULE_LICENSE("GPL");
701 MODULE_AUTHOR("Mathieu Desnoyers");
702 MODULE_DESCRIPTION("Fair rwlock test");
This page took 0.044741 seconds and 4 git commands to generate.