2 * mem.spin: Promela code to validate memory barriers with OOO memory.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (c) 2009 Mathieu Desnoyers
21 /* Promela validation variables. */
28 #define get_pid() (_pid)
31 * Each process have its own data in cache. Caches are randomly updated.
32 * smp_wmb and smp_rmb forces cache updates (write and read), wmb_mb forces
36 #define DECLARE_CACHED_VAR(type, x, v) \
38 type cached_##x[NR_PROCS] = v; \
39 bit cache_dirty_##x[NR_PROCS] = 0
41 #define IS_CACHE_DIRTY(x, id) (cache_dirty_##x[id])
43 #define READ_CACHED_VAR(x) (cached_##x[get_pid()])
45 #define WRITE_CACHED_VAR(x, v) \
47 cached_##x[get_pid()] = v; \
48 cache_dirty_##x[get_pid()] = 1; \
51 #define CACHE_WRITE_TO_MEM(x, id) \
53 :: IS_CACHE_DIRTY(x, id) -> \
54 mem_##x = cached_##x[id]; \
55 cache_dirty_##x[id] = 0; \
60 #define CACHE_READ_FROM_MEM(x, id) \
62 :: !IS_CACHE_DIRTY(x, id) -> \
63 cached_##x[id] = mem_##x;\
69 * May update other caches if cache is dirty, or not.
71 #define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
73 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
77 #define RANDOM_CACHE_READ_FROM_MEM(x, id)\
79 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
86 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
87 CACHE_READ_FROM_MEM(urcu_active_readers_one, get_pid());
88 CACHE_READ_FROM_MEM(generation_ptr, get_pid());
95 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
96 CACHE_WRITE_TO_MEM(urcu_active_readers_one, get_pid());
97 CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
114 /* Keep in sync manually with smp_rmb, wmp_wmb and ooo_mem */
115 DECLARE_CACHED_VAR(byte, urcu_gp_ctr, 1);
116 /* Note ! currently only one reader */
117 DECLARE_CACHED_VAR(byte, urcu_active_readers_one, 0);
118 /* pointer generation */
119 DECLARE_CACHED_VAR(byte, generation_ptr, 0);
121 byte last_free_gen = 0;
123 byte read_generation = 1;
131 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
132 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers_one,
134 RANDOM_CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
135 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
136 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers_one,
138 RANDOM_CACHE_READ_FROM_MEM(generation_ptr, get_pid());
142 #define get_readerid() (get_pid())
143 #define get_writerid() (get_readerid() + NR_READERS)
145 inline wait_for_reader(tmp, id, i)
150 tmp = READ_CACHED_VAR(urcu_active_readers_one);
152 :: (tmp & RCU_GP_CTR_NEST_MASK)
153 && ((tmp ^ READ_CACHED_VAR(urcu_gp_ctr))
155 #ifndef GEN_ERROR_WRITER_PROGRESS
166 inline wait_for_quiescent_state(tmp, i, j)
171 wait_for_reader(tmp, i, j);
173 :: i >= NR_READERS -> break
177 /* Model the RCU read-side critical section. */
179 active [NR_READERS] proctype urcu_reader()
184 assert(get_pid() < NR_PROCS);
190 * We do not test reader's progress here, because we are mainly
191 * interested in writer's progress. The reader never blocks
192 * anyway. We have to test for reader/writer's progress
193 * separately, otherwise we could think the writer is doing
194 * progress when it's blocked by an always progressing reader.
196 #ifdef READER_PROGRESS
201 :: nest_i < READER_NEST_LEVEL ->
203 tmp = READ_CACHED_VAR(urcu_active_readers_one);
206 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
208 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
210 WRITE_CACHED_VAR(urcu_active_readers_one, tmp2);
212 WRITE_CACHED_VAR(urcu_active_readers_one, tmp + 1);
217 :: nest_i >= READER_NEST_LEVEL -> break;
221 read_generation = READ_CACHED_VAR(generation_ptr);
229 :: nest_i < READER_NEST_LEVEL ->
233 tmp2 = READ_CACHED_VAR(urcu_active_readers_one);
235 WRITE_CACHED_VAR(urcu_active_readers_one, tmp2 - 1);
237 :: nest_i >= READER_NEST_LEVEL -> break;
240 //smp_mc(i); /* added */
245 /* Model the RCU update process. */
247 active [NR_WRITERS] proctype urcu_writer()
253 assert(get_pid() < NR_PROCS);
256 :: (READ_CACHED_VAR(generation_ptr) < 5) ->
257 #ifdef WRITER_PROGRESS
262 old_gen = READ_CACHED_VAR(generation_ptr);
263 WRITE_CACHED_VAR(generation_ptr, old_gen + 1);
271 :: write_lock == 0 ->
281 tmp = READ_CACHED_VAR(urcu_gp_ctr);
283 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
286 wait_for_quiescent_state(tmp, i, j);
290 tmp = READ_CACHED_VAR(urcu_gp_ctr);
292 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
295 wait_for_quiescent_state(tmp, i, j);
301 /* free-up step, e.g., kfree(). */
303 last_free_gen = old_gen;
309 * Given the reader loops infinitely, let the writer also busy-loop
310 * with progress here so, with weak fairness, we can test the
316 #ifdef WRITER_PROGRESS