2 * mem.spin: Promela code to validate memory barriers with OOO memory.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (c) 2009 Mathieu Desnoyers
21 /* Promela validation variables. */
26 /* Number of reader and writer processes */
29 /* Includes reader, writer and init process */
30 #define MAX_NR_PROCS 5
32 #define get_pid() (_pid)
35 * Each process have its own data in cache. Caches are randomly updated.
36 * smp_wmb and smp_rmb forces cache updates (write and read), wmb_mb forces
40 #define DECLARE_CACHED_VAR(type, x, v) \
42 type cached_##x[NR_PROCS] = v; \
43 bit cache_dirty_##x[NR_PROCS] = 0
45 #define IS_CACHE_DIRTY(x, id) (cache_dirty_##x[id])
47 #define READ_CACHED_VAR(x) (cached_##x[get_pid()])
49 #define WRITE_CACHED_VAR(x, v) \
51 cached_##x[get_pid()] = v; \
52 cache_dirty_##x[get_pid()] = 1; \
55 #define CACHE_WRITE_TO_MEM(x, id) \
57 :: IS_CACHE_DIRTY(x, id) -> \
58 mem_##x = cached_##x[id]; \
59 cache_dirty_##x[id] = 0; \
64 #define CACHE_READ_FROM_MEM(x, id) \
66 :: !IS_CACHE_DIRTY(x, id) -> \
67 cached_##x[id] = mem_##x;\
73 * May update other caches if cache is dirty, or not.
75 #define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
77 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
81 #define RANDOM_CACHE_READ_FROM_MEM(x, id)\
83 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
88 * Remote barriers tests the scheme where a signal (or IPI) is sent to all
89 * reader threads to promote their compiler barrier to a smp_mb().
91 #ifdef REMOTE_BARRIERS
96 CACHE_READ_FROM_MEM(urcu_gp_ctr, i);
97 CACHE_READ_FROM_MEM(urcu_active_readers_one, i);
98 CACHE_READ_FROM_MEM(generation_ptr, i);
102 inline smp_wmb_pid(i)
105 CACHE_WRITE_TO_MEM(urcu_gp_ctr, i);
106 CACHE_WRITE_TO_MEM(urcu_active_readers_one, i);
107 CACHE_WRITE_TO_MEM(generation_ptr, i);
125 * Readers do a simple barrier(), writers are doing a smp_mb() _and_ sending a
126 * signal or IPI to have all readers execute a smp_mb.
127 * We are not modeling the whole rendez-vous between readers and writers here,
128 * we just let the writer update each reader's caches remotely.
133 :: get_pid() >= NR_READERS ->
134 smp_mb_pid(get_pid());
140 :: i >= NR_READERS -> break
142 smp_mb_pid(get_pid());
152 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
153 CACHE_READ_FROM_MEM(urcu_active_readers_one, get_pid());
154 CACHE_READ_FROM_MEM(generation_ptr, get_pid());
161 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
162 CACHE_WRITE_TO_MEM(urcu_active_readers_one, get_pid());
163 CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
182 /* Keep in sync manually with smp_rmb, wmp_wmb and ooo_mem */
183 DECLARE_CACHED_VAR(byte, urcu_gp_ctr, 1);
184 /* Note ! currently only one reader */
185 DECLARE_CACHED_VAR(byte, urcu_active_readers_one, 0);
186 /* pointer generation */
187 DECLARE_CACHED_VAR(byte, generation_ptr, 0);
189 byte last_free_gen = 0;
191 byte read_generation = 1;
199 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
200 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers_one,
202 RANDOM_CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
203 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
204 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers_one,
206 RANDOM_CACHE_READ_FROM_MEM(generation_ptr, get_pid());
210 #define get_writerid() (get_pid())
211 #define get_readerid() (get_writerrid() + NR_READERS)
213 inline wait_for_reader(tmp, id, i)
218 tmp = READ_CACHED_VAR(urcu_active_readers_one);
221 :: (tmp & RCU_GP_CTR_NEST_MASK)
222 && ((tmp ^ READ_CACHED_VAR(urcu_gp_ctr))
224 #ifndef GEN_ERROR_WRITER_PROGRESS
235 inline wait_for_quiescent_state(tmp, i, j)
240 wait_for_reader(tmp, i, j);
242 :: i >= NR_READERS -> break
246 /* Model the RCU read-side critical section. */
248 inline urcu_one_read(i, nest_i, tmp, tmp2, nest)
254 tmp = READ_CACHED_VAR(urcu_active_readers_one);
257 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
259 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
261 WRITE_CACHED_VAR(urcu_active_readers_one, tmp2);
263 WRITE_CACHED_VAR(urcu_active_readers_one,
269 :: nest_i >= nest -> break;
273 read_generation = READ_CACHED_VAR(generation_ptr);
285 tmp2 = READ_CACHED_VAR(urcu_active_readers_one);
287 WRITE_CACHED_VAR(urcu_active_readers_one, tmp2 - 1);
289 :: nest_i >= nest -> break;
292 //smp_mc(i); /* added */
295 active [2] proctype urcu_reader()
300 assert(get_pid() < NR_PROCS);
306 * We do not test reader's progress here, because we are mainly
307 * interested in writer's progress. The reader never blocks
308 * anyway. We have to test for reader/writer's progress
309 * separately, otherwise we could think the writer is doing
310 * progress when it's blocked by an always progressing reader.
312 #ifdef READER_PROGRESS
315 urcu_one_read(i, nest_i, tmp, tmp2, 2);
319 /* Model the RCU update process. */
321 active [1] proctype urcu_writer()
327 assert(get_pid() < NR_PROCS);
330 :: (READ_CACHED_VAR(generation_ptr) < 5) ->
331 #ifdef WRITER_PROGRESS
336 old_gen = READ_CACHED_VAR(generation_ptr);
337 WRITE_CACHED_VAR(generation_ptr, old_gen + 1);
345 :: write_lock == 0 ->
355 tmp = READ_CACHED_VAR(urcu_gp_ctr);
357 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
360 wait_for_quiescent_state(tmp, i, j);
364 tmp = READ_CACHED_VAR(urcu_gp_ctr);
366 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
369 wait_for_quiescent_state(tmp, i, j);
375 /* free-up step, e.g., kfree(). */
377 last_free_gen = old_gen;
383 * Given the reader loops infinitely, let the writer also busy-loop
384 * with progress here so, with weak fairness, we can test the
390 #ifdef WRITER_PROGRESS