2 * mem.spin: Promela code to validate memory barriers with OOO memory.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (c) 2009 Mathieu Desnoyers
21 /* Promela validation variables. */
23 /* specific defines "included" here */
24 /* DEFINES file "included" here */
26 /* All signal readers have same PID and uses same reader variable */
27 #ifdef TEST_SIGNAL_ON_WRITE
28 #define get_pid() ((_pid < 1) -> 0 : 1)
29 #elif defined(TEST_SIGNAL_ON_READ)
30 #define get_pid() ((_pid < 2) -> 0 : 1)
32 #define get_pid() (_pid)
35 #define get_readerid() (get_pid())
38 * Each process have its own data in cache. Caches are randomly updated.
39 * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
43 typedef per_proc_byte {
47 /* Bitfield has a maximum of 8 procs */
48 typedef per_proc_bit {
52 #define DECLARE_CACHED_VAR(type, x) \
54 per_proc_##type cached_##x; \
55 per_proc_bit cache_dirty_##x;
57 #define INIT_CACHED_VAR(x, v, j) \
59 cache_dirty_##x.bitfield = 0; \
63 cached_##x.val[j] = v; \
65 :: j >= NR_PROCS -> break \
68 #define IS_CACHE_DIRTY(x, id) (cache_dirty_##x.bitfield & (1 << id))
70 #define READ_CACHED_VAR(x) (cached_##x.val[get_pid()])
72 #define WRITE_CACHED_VAR(x, v) \
74 cached_##x.val[get_pid()] = v; \
75 cache_dirty_##x.bitfield = \
76 cache_dirty_##x.bitfield | (1 << get_pid()); \
79 #define CACHE_WRITE_TO_MEM(x, id) \
81 :: IS_CACHE_DIRTY(x, id) -> \
82 mem_##x = cached_##x.val[id]; \
83 cache_dirty_##x.bitfield = \
84 cache_dirty_##x.bitfield & (~(1 << id)); \
89 #define CACHE_READ_FROM_MEM(x, id) \
91 :: !IS_CACHE_DIRTY(x, id) -> \
92 cached_##x.val[id] = mem_##x;\
98 * May update other caches if cache is dirty, or not.
100 #define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
102 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
106 #define RANDOM_CACHE_READ_FROM_MEM(x, id)\
108 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
113 * Remote barriers tests the scheme where a signal (or IPI) is sent to all
114 * reader threads to promote their compiler barrier to a smp_mb().
116 #ifdef REMOTE_BARRIERS
118 inline smp_rmb_pid(i, j)
121 CACHE_READ_FROM_MEM(urcu_gp_ctr, i);
125 CACHE_READ_FROM_MEM(urcu_active_readers[j], i);
127 :: j >= NR_READERS -> break
129 CACHE_READ_FROM_MEM(generation_ptr, i);
133 inline smp_wmb_pid(i, j)
136 CACHE_WRITE_TO_MEM(urcu_gp_ctr, i);
140 CACHE_WRITE_TO_MEM(urcu_active_readers[j], i);
142 :: j >= NR_READERS -> break
144 CACHE_WRITE_TO_MEM(generation_ptr, i);
148 inline smp_mb_pid(i, j)
166 * Readers do a simple barrier(), writers are doing a smp_mb() _and_ sending a
167 * signal or IPI to have all readers execute a smp_mb.
168 * We are not modeling the whole rendez-vous between readers and writers here,
169 * we just let the writer update each reader's caches remotely.
174 :: get_pid() >= NR_READERS ->
175 smp_mb_pid(get_pid(), j);
181 :: i >= NR_READERS -> break
183 smp_mb_pid(get_pid(), j);
193 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
197 CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
199 :: i >= NR_READERS -> break
201 CACHE_READ_FROM_MEM(generation_ptr, get_pid());
208 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
212 CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
214 :: i >= NR_READERS -> break
216 CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
239 /* Keep in sync manually with smp_rmb, wmp_wmb, ooo_mem and init() */
240 DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
241 /* Note ! currently only two readers */
242 DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
243 /* pointer generation */
244 DECLARE_CACHED_VAR(byte, generation_ptr);
246 byte last_free_gen = 0;
248 byte read_generation[NR_READERS];
249 bit data_access[NR_READERS];
255 bit sighand_exec = 0;
257 inline wait_init_done()
260 :: init_done == 0 -> skip;
267 inline wait_for_sighand_exec()
271 :: sighand_exec == 0 -> skip;
276 #ifdef TOO_BIG_STATE_SPACE
277 inline wait_for_sighand_exec()
281 :: sighand_exec == 0 -> skip;
285 :: 1 -> sighand_exec = 0;
294 inline wait_for_sighand_exec()
301 #ifdef TEST_SIGNAL_ON_WRITE
302 /* Block on signal handler execution */
303 inline dispatch_sighand_write_exec()
307 :: sighand_exec == 1 ->
316 inline dispatch_sighand_write_exec()
323 #ifdef TEST_SIGNAL_ON_READ
324 /* Block on signal handler execution */
325 inline dispatch_sighand_read_exec()
329 :: sighand_exec == 1 ->
338 inline dispatch_sighand_read_exec()
349 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
353 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
356 :: i >= NR_READERS -> break
358 RANDOM_CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
359 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
363 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
366 :: i >= NR_READERS -> break
368 RANDOM_CACHE_READ_FROM_MEM(generation_ptr, get_pid());
372 inline wait_for_reader(tmp, tmp2, i, j)
376 tmp2 = READ_CACHED_VAR(urcu_active_readers[tmp]);
378 dispatch_sighand_write_exec();
380 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
381 && ((tmp2 ^ READ_CACHED_VAR(urcu_gp_ctr))
383 #ifndef GEN_ERROR_WRITER_PROGRESS
388 dispatch_sighand_write_exec();
395 inline wait_for_quiescent_state(tmp, tmp2, i, j)
399 :: tmp < NR_READERS ->
400 wait_for_reader(tmp, tmp2, i, j);
402 :: (NR_READERS > 1) && (tmp < NR_READERS - 1)
404 dispatch_sighand_write_exec();
409 :: tmp >= NR_READERS -> break
413 /* Model the RCU read-side critical section. */
415 inline urcu_one_read(i, j, nest_i, tmp, tmp2)
419 :: nest_i < READER_NEST_LEVEL ->
421 dispatch_sighand_read_exec();
422 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
424 dispatch_sighand_read_exec();
426 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
428 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
430 dispatch_sighand_read_exec();
431 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
434 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
438 dispatch_sighand_read_exec();
440 :: nest_i >= READER_NEST_LEVEL -> break;
443 read_generation[get_readerid()] = READ_CACHED_VAR(generation_ptr);
444 data_access[get_readerid()] = 1;
445 data_access[get_readerid()] = 0;
449 :: nest_i < READER_NEST_LEVEL ->
451 dispatch_sighand_read_exec();
452 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
454 dispatch_sighand_read_exec();
455 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);
457 :: nest_i >= READER_NEST_LEVEL -> break;
460 //dispatch_sighand_read_exec();
461 //smp_mc(i); /* added */
464 active proctype urcu_reader()
471 assert(get_pid() < NR_PROCS);
477 * We do not test reader's progress here, because we are mainly
478 * interested in writer's progress. The reader never blocks
479 * anyway. We have to test for reader/writer's progress
480 * separately, otherwise we could think the writer is doing
481 * progress when it's blocked by an always progressing reader.
483 #ifdef READER_PROGRESS
484 /* Only test progress of one random reader. They are all the
487 :: get_readerid() == 0 ->
492 urcu_one_read(i, j, nest_i, tmp, tmp2);
497 /* signal handler reader */
499 inline urcu_one_read_sig(i, j, nest_i, tmp, tmp2)
503 :: nest_i < READER_NEST_LEVEL ->
505 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
508 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
510 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
512 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
515 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
520 :: nest_i >= READER_NEST_LEVEL -> break;
523 read_generation[get_readerid()] = READ_CACHED_VAR(generation_ptr);
524 data_access[get_readerid()] = 1;
525 data_access[get_readerid()] = 0;
529 :: nest_i < READER_NEST_LEVEL ->
531 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
533 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);
535 :: nest_i >= READER_NEST_LEVEL -> break;
538 //smp_mc(i); /* added */
541 active proctype urcu_reader_sig()
548 assert(get_pid() < NR_PROCS);
553 wait_for_sighand_exec();
555 * We do not test reader's progress here, because we are mainly
556 * interested in writer's progress. The reader never blocks
557 * anyway. We have to test for reader/writer's progress
558 * separately, otherwise we could think the writer is doing
559 * progress when it's blocked by an always progressing reader.
561 #ifdef READER_PROGRESS
562 /* Only test progress of one random reader. They are all the
565 :: get_readerid() == 0 ->
570 urcu_one_read_sig(i, j, nest_i, tmp, tmp2);
576 /* Model the RCU update process. */
578 active proctype urcu_writer()
586 assert(get_pid() < NR_PROCS);
589 :: (READ_CACHED_VAR(generation_ptr) < 5) ->
590 #ifdef WRITER_PROGRESS
594 dispatch_sighand_write_exec();
596 old_gen = READ_CACHED_VAR(generation_ptr);
597 WRITE_CACHED_VAR(generation_ptr, old_gen + 1);
600 dispatch_sighand_write_exec();
606 :: write_lock == 0 ->
615 dispatch_sighand_write_exec();
616 tmp = READ_CACHED_VAR(urcu_gp_ctr);
618 dispatch_sighand_write_exec();
619 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
621 dispatch_sighand_write_exec();
623 wait_for_quiescent_state(tmp, tmp2, i, j);
627 dispatch_sighand_write_exec();
628 tmp = READ_CACHED_VAR(urcu_gp_ctr);
630 dispatch_sighand_write_exec();
631 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
634 dispatch_sighand_write_exec();
635 wait_for_quiescent_state(tmp, tmp2, i, j);
638 dispatch_sighand_write_exec();
640 /* free-up step, e.g., kfree(). */
642 last_free_gen = old_gen;
648 * Given the reader loops infinitely, let the writer also busy-loop
649 * with progress here so, with weak fairness, we can test the
655 #ifdef WRITER_PROGRESS
658 dispatch_sighand_write_exec();
662 /* Leave after the readers and writers so the pid count is ok. */
667 INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
668 INIT_CACHED_VAR(generation_ptr, 0, j);
673 INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
674 read_generation[i] = 1;
677 :: i >= NR_READERS -> break