2 #define read_free_race (read_generation[0] == last_free_gen)
3 #define read_free (free_done && data_access[0])
6 //#define TEST_SIGNAL_ON_READ
7 #define TEST_SIGNAL_ON_WRITE
9 #define RCU_GP_CTR_BIT (1 << 7)
10 #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
12 #ifndef READER_NEST_LEVEL
13 #define READER_NEST_LEVEL 1
16 #define REMOTE_BARRIERS
18 * mem.spin: Promela code to validate memory barriers with OOO memory.
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
34 * Copyright (c) 2009 Mathieu Desnoyers
37 /* Promela validation variables. */
39 /* specific defines "included" here */
40 /* DEFINES file "included" here */
42 /* All signal readers have same PID and uses same reader variable */
43 #ifdef TEST_SIGNAL_ON_WRITE
45 #define NR_READERS 1 /* the writer is also a signal reader */
52 #elif defined(TEST_SIGNAL_ON_READ)
54 #define get_pid() ((_pid < 2) -> 0 : 1)
63 #define get_pid() (_pid)
72 #define get_readerid() (get_pid())
75 * Each process have its own data in cache. Caches are randomly updated.
76 * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
80 typedef per_proc_byte {
84 /* Bitfield has a maximum of 8 procs */
85 typedef per_proc_bit {
89 #define DECLARE_CACHED_VAR(type, x) \
91 per_proc_##type cached_##x; \
92 per_proc_bit cache_dirty_##x;
94 #define INIT_CACHED_VAR(x, v, j) \
96 cache_dirty_##x.bitfield = 0; \
100 cached_##x.val[j] = v; \
102 :: j >= NR_PROCS -> break \
105 #define IS_CACHE_DIRTY(x, id) (cache_dirty_##x.bitfield & (1 << id))
107 #define READ_CACHED_VAR(x) (cached_##x.val[get_pid()])
109 #define WRITE_CACHED_VAR(x, v) \
111 cached_##x.val[get_pid()] = v; \
112 cache_dirty_##x.bitfield = \
113 cache_dirty_##x.bitfield | (1 << get_pid()); \
116 #define CACHE_WRITE_TO_MEM(x, id) \
118 :: IS_CACHE_DIRTY(x, id) -> \
119 mem_##x = cached_##x.val[id]; \
120 cache_dirty_##x.bitfield = \
121 cache_dirty_##x.bitfield & (~(1 << id)); \
126 #define CACHE_READ_FROM_MEM(x, id) \
128 :: !IS_CACHE_DIRTY(x, id) -> \
129 cached_##x.val[id] = mem_##x;\
135 * May update other caches if cache is dirty, or not.
137 #define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
139 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
143 #define RANDOM_CACHE_READ_FROM_MEM(x, id)\
145 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
150 * Remote barriers tests the scheme where a signal (or IPI) is sent to all
151 * reader threads to promote their compiler barrier to a smp_mb().
153 #ifdef REMOTE_BARRIERS
155 inline smp_rmb_pid(i, j)
158 CACHE_READ_FROM_MEM(urcu_gp_ctr, i);
162 CACHE_READ_FROM_MEM(urcu_active_readers[j], i);
164 :: j >= NR_READERS -> break
166 CACHE_READ_FROM_MEM(generation_ptr, i);
170 inline smp_wmb_pid(i, j)
173 CACHE_WRITE_TO_MEM(urcu_gp_ctr, i);
177 CACHE_WRITE_TO_MEM(urcu_active_readers[j], i);
179 :: j >= NR_READERS -> break
181 CACHE_WRITE_TO_MEM(generation_ptr, i);
185 inline smp_mb_pid(i, j)
203 * Readers do a simple barrier(), writers are doing a smp_mb() _and_ sending a
204 * signal or IPI to have all readers execute a smp_mb.
205 * We are not modeling the whole rendez-vous between readers and writers here,
206 * we just let the writer update each reader's caches remotely.
208 inline smp_mb_writer(i, j)
210 smp_mb_pid(get_pid(), j);
216 :: i >= NR_READERS -> break
218 smp_mb_pid(get_pid(), j);
221 inline smp_mb_reader(i, j)
231 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
235 CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
237 :: i >= NR_READERS -> break
239 CACHE_READ_FROM_MEM(generation_ptr, get_pid());
246 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
250 CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
252 :: i >= NR_READERS -> break
254 CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
275 inline smp_mb_writer(i, j)
280 inline smp_mb_reader(i, j)
287 /* Keep in sync manually with smp_rmb, wmp_wmb, ooo_mem and init() */
288 DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
289 /* Note ! currently only two readers */
290 DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
291 /* pointer generation */
292 DECLARE_CACHED_VAR(byte, generation_ptr);
294 byte last_free_gen = 0;
296 byte read_generation[NR_READERS];
297 bit data_access[NR_READERS];
303 bit sighand_exec = 0;
305 inline wait_init_done()
308 :: init_done == 0 -> skip;
315 inline wait_for_sighand_exec()
319 :: sighand_exec == 0 -> skip;
324 #ifdef TOO_BIG_STATE_SPACE
325 inline wait_for_sighand_exec()
329 :: sighand_exec == 0 -> skip;
333 :: 1 -> sighand_exec = 0;
342 inline wait_for_sighand_exec()
349 #ifdef TEST_SIGNAL_ON_WRITE
350 /* Block on signal handler execution */
351 inline dispatch_sighand_write_exec()
355 :: sighand_exec == 1 ->
364 inline dispatch_sighand_write_exec()
371 #ifdef TEST_SIGNAL_ON_READ
372 /* Block on signal handler execution */
373 inline dispatch_sighand_read_exec()
377 :: sighand_exec == 1 ->
386 inline dispatch_sighand_read_exec()
397 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
401 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
404 :: i >= NR_READERS -> break
406 RANDOM_CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
407 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
411 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
414 :: i >= NR_READERS -> break
416 RANDOM_CACHE_READ_FROM_MEM(generation_ptr, get_pid());
420 inline wait_for_reader(tmp, tmp2, i, j)
424 tmp2 = READ_CACHED_VAR(urcu_active_readers[tmp]);
426 dispatch_sighand_write_exec();
428 :: (tmp2 & RCU_GP_CTR_NEST_MASK)
429 && ((tmp2 ^ READ_CACHED_VAR(urcu_gp_ctr))
431 #ifndef GEN_ERROR_WRITER_PROGRESS
436 dispatch_sighand_write_exec();
443 inline wait_for_quiescent_state(tmp, tmp2, i, j)
447 :: tmp < NR_READERS ->
448 wait_for_reader(tmp, tmp2, i, j);
450 :: (NR_READERS > 1) && (tmp < NR_READERS - 1)
452 dispatch_sighand_write_exec();
457 :: tmp >= NR_READERS -> break
461 /* Model the RCU read-side critical section. */
463 #ifndef TEST_SIGNAL_ON_WRITE
465 inline urcu_one_read(i, j, nest_i, tmp, tmp2)
469 :: nest_i < READER_NEST_LEVEL ->
471 dispatch_sighand_read_exec();
472 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
474 dispatch_sighand_read_exec();
476 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
478 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
480 dispatch_sighand_read_exec();
481 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
484 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
488 dispatch_sighand_read_exec();
490 :: nest_i >= READER_NEST_LEVEL -> break;
493 read_generation[get_readerid()] = READ_CACHED_VAR(generation_ptr);
494 data_access[get_readerid()] = 1;
495 data_access[get_readerid()] = 0;
499 :: nest_i < READER_NEST_LEVEL ->
501 dispatch_sighand_read_exec();
502 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
504 dispatch_sighand_read_exec();
505 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);
507 :: nest_i >= READER_NEST_LEVEL -> break;
510 //dispatch_sighand_read_exec();
511 //smp_mc(i); /* added */
514 active proctype urcu_reader()
521 assert(get_pid() < NR_PROCS);
527 * We do not test reader's progress here, because we are mainly
528 * interested in writer's progress. The reader never blocks
529 * anyway. We have to test for reader/writer's progress
530 * separately, otherwise we could think the writer is doing
531 * progress when it's blocked by an always progressing reader.
533 #ifdef READER_PROGRESS
536 urcu_one_read(i, j, nest_i, tmp, tmp2);
540 #endif //!TEST_SIGNAL_ON_WRITE
543 /* signal handler reader */
545 inline urcu_one_read_sig(i, j, nest_i, tmp, tmp2)
549 :: nest_i < READER_NEST_LEVEL ->
551 tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
554 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
556 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
558 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
561 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],
566 :: nest_i >= READER_NEST_LEVEL -> break;
569 read_generation[get_readerid()] = READ_CACHED_VAR(generation_ptr);
570 data_access[get_readerid()] = 1;
571 data_access[get_readerid()] = 0;
575 :: nest_i < READER_NEST_LEVEL ->
577 tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
579 WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);
581 :: nest_i >= READER_NEST_LEVEL -> break;
584 //smp_mc(i); /* added */
587 active proctype urcu_reader_sig()
594 assert(get_pid() < NR_PROCS);
599 wait_for_sighand_exec();
601 * We do not test reader's progress here, because we are mainly
602 * interested in writer's progress. The reader never blocks
603 * anyway. We have to test for reader/writer's progress
604 * separately, otherwise we could think the writer is doing
605 * progress when it's blocked by an always progressing reader.
607 #ifdef READER_PROGRESS
610 urcu_one_read_sig(i, j, nest_i, tmp, tmp2);
616 /* Model the RCU update process. */
618 active proctype urcu_writer()
626 assert(get_pid() < NR_PROCS);
629 :: (READ_CACHED_VAR(generation_ptr) < 5) ->
630 #ifdef WRITER_PROGRESS
634 dispatch_sighand_write_exec();
636 old_gen = READ_CACHED_VAR(generation_ptr);
637 WRITE_CACHED_VAR(generation_ptr, old_gen + 1);
640 dispatch_sighand_write_exec();
646 :: write_lock == 0 ->
655 dispatch_sighand_write_exec();
656 tmp = READ_CACHED_VAR(urcu_gp_ctr);
658 dispatch_sighand_write_exec();
659 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
661 dispatch_sighand_write_exec();
663 wait_for_quiescent_state(tmp, tmp2, i, j);
667 dispatch_sighand_write_exec();
668 tmp = READ_CACHED_VAR(urcu_gp_ctr);
670 dispatch_sighand_write_exec();
671 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
674 dispatch_sighand_write_exec();
675 wait_for_quiescent_state(tmp, tmp2, i, j);
678 dispatch_sighand_write_exec();
680 /* free-up step, e.g., kfree(). */
682 last_free_gen = old_gen;
688 * Given the reader loops infinitely, let the writer also busy-loop
689 * with progress here so, with weak fairness, we can test the
695 #ifdef WRITER_PROGRESS
698 dispatch_sighand_write_exec();
702 /* Leave after the readers and writers so the pid count is ok. */
707 INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
708 INIT_CACHED_VAR(generation_ptr, 0, j);
713 INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
714 read_generation[i] = 1;
717 :: i >= NR_READERS -> break