2 #define read_free_race (read_generation[0] == last_free_gen)
3 #define read_free (free_done && data_access[0])
6 //#define TEST_SIGNAL_ON_READ
7 //#define TEST_SIGNAL_ON_WRITE
9 #define RCU_GP_CTR_BIT (1 << 7)
10 #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
12 #ifndef READER_NEST_LEVEL
13 #define READER_NEST_LEVEL 1
14 //#define READER_NEST_LEVEL 2
17 #define REMOTE_BARRIERS
19 * mem.spin: Promela code to validate memory barriers with OOO memory.
21 * This program is free software; you can redistribute it and/or modify
22 * it under the terms of the GNU General Public License as published by
23 * the Free Software Foundation; either version 2 of the License, or
24 * (at your option) any later version.
26 * This program is distributed in the hope that it will be useful,
27 * but WITHOUT ANY WARRANTY; without even the implied warranty of
28 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
29 * GNU General Public License for more details.
31 * You should have received a copy of the GNU General Public License
32 * along with this program; if not, write to the Free Software
33 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
35 * Copyright (c) 2009 Mathieu Desnoyers
38 /* Promela validation variables. */
40 /* specific defines "included" here */
41 /* DEFINES file "included" here */
43 /* All signal readers have same PID and uses same reader variable */
44 #ifdef TEST_SIGNAL_ON_WRITE
46 #define NR_READERS 1 /* the writer is also a signal reader */
53 #elif defined(TEST_SIGNAL_ON_READ)
55 #define get_pid() ((_pid < 2) -> 0 : 1)
64 #define get_pid() (_pid)
73 #define get_readerid() (get_pid())
76 * Each process have its own data in cache. Caches are randomly updated.
77 * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
81 typedef per_proc_byte {
85 /* Bitfield has a maximum of 8 procs */
86 typedef per_proc_bit {
90 #define DECLARE_CACHED_VAR(type, x) \
92 per_proc_##type cached_##x; \
93 per_proc_bit cache_dirty_##x;
95 #define INIT_CACHED_VAR(x, v, j) \
97 cache_dirty_##x.bitfield = 0; \
101 cached_##x.val[j] = v; \
103 :: j >= NR_PROCS -> break \
106 #define IS_CACHE_DIRTY(x, id) (cache_dirty_##x.bitfield & (1 << id))
108 #define READ_CACHED_VAR(x) (cached_##x.val[get_pid()])
110 #define WRITE_CACHED_VAR(x, v) \
112 cached_##x.val[get_pid()] = v; \
113 cache_dirty_##x.bitfield = \
114 cache_dirty_##x.bitfield | (1 << get_pid()); \
117 #define CACHE_WRITE_TO_MEM(x, id) \
119 :: IS_CACHE_DIRTY(x, id) -> \
120 mem_##x = cached_##x.val[id]; \
121 cache_dirty_##x.bitfield = \
122 cache_dirty_##x.bitfield & (~(1 << id)); \
127 #define CACHE_READ_FROM_MEM(x, id) \
129 :: !IS_CACHE_DIRTY(x, id) -> \
130 cached_##x.val[id] = mem_##x;\
136 * May update other caches if cache is dirty, or not.
138 #define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
140 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
144 #define RANDOM_CACHE_READ_FROM_MEM(x, id)\
146 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
151 * Remote barriers tests the scheme where a signal (or IPI) is sent to all
152 * reader threads to promote their compiler barrier to a smp_mb().
154 #ifdef REMOTE_BARRIERS
156 inline smp_rmb_pid(i, j)
159 CACHE_READ_FROM_MEM(urcu_gp_ctr, i);
163 CACHE_READ_FROM_MEM(urcu_active_readers[j], i);
165 :: j >= NR_READERS -> break
167 CACHE_READ_FROM_MEM(generation_ptr, i);
171 inline smp_wmb_pid(i, j)
174 CACHE_WRITE_TO_MEM(urcu_gp_ctr, i);
178 CACHE_WRITE_TO_MEM(urcu_active_readers[j], i);
180 :: j >= NR_READERS -> break
182 CACHE_WRITE_TO_MEM(generation_ptr, i);
186 inline smp_mb_pid(i, j)
204 * Readers do a simple barrier(), writers are doing a smp_mb() _and_ sending a
205 * signal or IPI to have all readers execute a smp_mb.
206 * We are not modeling the whole rendez-vous between readers and writers here,
207 * we just let the writer update each reader's caches remotely.
209 inline smp_mb_writer(i, j)
211 smp_mb_pid(get_pid(), j);
217 :: i >= NR_READERS -> break
219 smp_mb_pid(get_pid(), j);
222 inline smp_mb_reader(i, j)
232 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
236 CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
238 :: i >= NR_READERS -> break
240 CACHE_READ_FROM_MEM(generation_ptr, get_pid());
247 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
251 CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
253 :: i >= NR_READERS -> break
255 CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
276 inline smp_mb_writer(i, j)
281 inline smp_mb_reader(i, j)
288 /* Keep in sync manually with smp_rmb, wmp_wmb, ooo_mem and init() */
289 DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
290 /* Note ! currently only two readers */
291 DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
292 /* pointer generation */
293 DECLARE_CACHED_VAR(byte, generation_ptr);
295 byte last_free_gen = 0;
297 byte read_generation[NR_READERS];
298 bit data_access[NR_READERS];
304 bit sighand_exec = 0;
306 inline wait_init_done()
309 :: init_done == 0 -> skip;
316 inline wait_for_sighand_exec()
320 :: sighand_exec == 0 -> skip;
325 #ifdef TOO_BIG_STATE_SPACE
326 inline wait_for_sighand_exec()
330 :: sighand_exec == 0 -> skip;
334 :: 1 -> sighand_exec = 0;
343 inline wait_for_sighand_exec()
350 #ifdef TEST_SIGNAL_ON_WRITE
351 /* Block on signal handler execution */
352 inline dispatch_sighand_write_exec()
356 :: sighand_exec == 1 ->
365 inline dispatch_sighand_write_exec()
372 #ifdef TEST_SIGNAL_ON_READ
373 /* Block on signal handler execution */
374 inline dispatch_sighand_read_exec()
378 :: sighand_exec == 1 ->
387 inline dispatch_sighand_read_exec()
398 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
402 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
405 :: i >= NR_READERS -> break
407 RANDOM_CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
408 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
412 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
415 :: i >= NR_READERS -> break
417 RANDOM_CACHE_READ_FROM_MEM(generation_ptr, get_pid());
421 inline wait_for_reader(tmp, tmp2, i, j)
425 tmp2 = READ_CACHED_VAR(urcu_active_readers[tmp]);
427 dispatch_sighand_write_exec();
429 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
430 && ((tmp2 ^ READ_CACHED_VAR(urcu_gp_ctr))
432 #ifndef GEN_ERROR_WRITER_PROGRESS
437 dispatch_sighand_write_exec();
444 inline wait_for_quiescent_state(tmp, tmp2, i, j)
448 :: tmp < NR_READERS ->
449 wait_for_reader(tmp, tmp2, i, j);
451 :: (NR_READERS > 1) && (tmp < NR_READERS - 1)
453 dispatch_sighand_write_exec();
458 :: tmp >= NR_READERS -> break
462 /* Model the RCU read-side critical section. */
464 #ifndef TEST_SIGNAL_ON_WRITE
466 inline urcu_one_read(i, j, nest_i, tmp, tmp2)
470 :: nest_i < READER_NEST_LEVEL ->
472 dispatch_sighand_read_exec();
473 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
475 dispatch_sighand_read_exec();
477 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
479 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
481 dispatch_sighand_read_exec();
482 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
485 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
489 dispatch_sighand_read_exec();
491 :: nest_i >= READER_NEST_LEVEL -> break;
494 read_generation[get_readerid()] = READ_CACHED_VAR(generation_ptr);
495 data_access[get_readerid()] = 1;
496 data_access[get_readerid()] = 0;
500 :: nest_i < READER_NEST_LEVEL ->
502 dispatch_sighand_read_exec();
503 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
505 dispatch_sighand_read_exec();
506 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);
508 :: nest_i >= READER_NEST_LEVEL -> break;
511 //dispatch_sighand_read_exec();
512 //smp_mc(i); /* added */
515 active proctype urcu_reader()
522 assert(get_pid() < NR_PROCS);
528 * We do not test reader's progress here, because we are mainly
529 * interested in writer's progress. The reader never blocks
530 * anyway. We have to test for reader/writer's progress
531 * separately, otherwise we could think the writer is doing
532 * progress when it's blocked by an always progressing reader.
534 #ifdef READER_PROGRESS
537 urcu_one_read(i, j, nest_i, tmp, tmp2);
541 #endif //!TEST_SIGNAL_ON_WRITE
544 /* signal handler reader */
546 inline urcu_one_read_sig(i, j, nest_i, tmp, tmp2)
550 :: nest_i < READER_NEST_LEVEL ->
552 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
555 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
557 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
559 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
562 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
567 :: nest_i >= READER_NEST_LEVEL -> break;
570 read_generation[get_readerid()] = READ_CACHED_VAR(generation_ptr);
571 data_access[get_readerid()] = 1;
572 data_access[get_readerid()] = 0;
576 :: nest_i < READER_NEST_LEVEL ->
578 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
580 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);
582 :: nest_i >= READER_NEST_LEVEL -> break;
585 //smp_mc(i); /* added */
588 active proctype urcu_reader_sig()
595 assert(get_pid() < NR_PROCS);
600 wait_for_sighand_exec();
602 * We do not test reader's progress here, because we are mainly
603 * interested in writer's progress. The reader never blocks
604 * anyway. We have to test for reader/writer's progress
605 * separately, otherwise we could think the writer is doing
606 * progress when it's blocked by an always progressing reader.
608 #ifdef READER_PROGRESS
611 urcu_one_read_sig(i, j, nest_i, tmp, tmp2);
617 /* Model the RCU update process. */
619 active proctype urcu_writer()
627 assert(get_pid() < NR_PROCS);
630 :: (READ_CACHED_VAR(generation_ptr) < 5) ->
631 #ifdef WRITER_PROGRESS
635 dispatch_sighand_write_exec();
637 old_gen = READ_CACHED_VAR(generation_ptr);
638 WRITE_CACHED_VAR(generation_ptr, old_gen + 1);
641 dispatch_sighand_write_exec();
647 :: write_lock == 0 ->
656 dispatch_sighand_write_exec();
657 tmp = READ_CACHED_VAR(urcu_gp_ctr);
659 dispatch_sighand_write_exec();
660 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
662 dispatch_sighand_write_exec();
664 wait_for_quiescent_state(tmp, tmp2, i, j);
668 dispatch_sighand_write_exec();
669 tmp = READ_CACHED_VAR(urcu_gp_ctr);
671 dispatch_sighand_write_exec();
672 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
675 dispatch_sighand_write_exec();
676 wait_for_quiescent_state(tmp, tmp2, i, j);
679 dispatch_sighand_write_exec();
681 /* free-up step, e.g., kfree(). */
683 last_free_gen = old_gen;
689 * Given the reader loops infinitely, let the writer also busy-loop
690 * with progress here so, with weak fairness, we can test the
696 #ifdef WRITER_PROGRESS
699 dispatch_sighand_write_exec();
703 /* Leave after the readers and writers so the pid count is ok. */
708 INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
709 INIT_CACHED_VAR(generation_ptr, 0, j);
714 INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
715 read_generation[i] = 1;
718 :: i >= NR_READERS -> break