1 #define WRITER_PROGRESS
2 #define GEN_ERROR_WRITER_PROGRESS
3 #define RCU_GP_CTR_BIT (1 << 7)
4 #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
6 #define read_free_race (read_generation == last_free_gen)
7 #define read_free (free_done && data_access)
9 #ifndef READER_NEST_LEVEL
10 #define READER_NEST_LEVEL 2
13 #define REMOTE_BARRIERS
15 * mem.spin: Promela code to validate memory barriers with OOO memory.
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2 of the License, or
20 * (at your option) any later version.
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
31 * Copyright (c) 2009 Mathieu Desnoyers
34 /* Promela validation variables. */
41 #define get_pid() (_pid)
44 * Each process have its own data in cache. Caches are randomly updated.
45 * smp_wmb and smp_rmb forces cache updates (write and read), wmb_mb forces
49 #define DECLARE_CACHED_VAR(type, x, v) \
51 type cached_##x[NR_PROCS] = v; \
52 bit cache_dirty_##x[NR_PROCS] = 0
54 #define IS_CACHE_DIRTY(x, id) (cache_dirty_##x[id])
56 #define READ_CACHED_VAR(x) (cached_##x[get_pid()])
58 #define WRITE_CACHED_VAR(x, v) \
60 cached_##x[get_pid()] = v; \
61 cache_dirty_##x[get_pid()] = 1; \
64 #define CACHE_WRITE_TO_MEM(x, id) \
66 :: IS_CACHE_DIRTY(x, id) -> \
67 mem_##x = cached_##x[id]; \
68 cache_dirty_##x[id] = 0; \
73 #define CACHE_READ_FROM_MEM(x, id) \
75 :: !IS_CACHE_DIRTY(x, id) -> \
76 cached_##x[id] = mem_##x;\
82 * May update other caches if cache is dirty, or not.
84 #define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
86 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
90 #define RANDOM_CACHE_READ_FROM_MEM(x, id)\
92 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
97 * Remote barriers tests the scheme where a signal (or IPI) is sent to all
98 * reader threads to promote their compiler barrier to a smp_mb().
100 #ifdef REMOTE_BARRIERS
102 inline smp_rmb_pid(i)
105 CACHE_READ_FROM_MEM(urcu_gp_ctr, i);
106 CACHE_READ_FROM_MEM(urcu_active_readers_one, i);
107 CACHE_READ_FROM_MEM(generation_ptr, i);
111 inline smp_wmb_pid(i)
114 CACHE_WRITE_TO_MEM(urcu_gp_ctr, i);
115 CACHE_WRITE_TO_MEM(urcu_active_readers_one, i);
116 CACHE_WRITE_TO_MEM(generation_ptr, i);
138 * Readers do a simple barrier(), writers are doing a smp_mb() _and_ sending a
139 * signal or IPI to have all readers execute a smp_mb.
140 * We are not modeling the whole rendez-vous between readers and writers here,
141 * we just let the writer update each reader's caches remotely.
146 :: get_pid() >= NR_READERS ->
147 smp_mb_pid(get_pid());
153 :: i >= NR_READERS -> break
155 smp_mb_pid(get_pid());
165 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
166 CACHE_READ_FROM_MEM(urcu_active_readers_one, get_pid());
167 CACHE_READ_FROM_MEM(generation_ptr, get_pid());
174 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
175 CACHE_WRITE_TO_MEM(urcu_active_readers_one, get_pid());
176 CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
199 /* Keep in sync manually with smp_rmb, wmp_wmb and ooo_mem */
200 DECLARE_CACHED_VAR(byte, urcu_gp_ctr, 1);
201 /* Note ! currently only one reader */
202 DECLARE_CACHED_VAR(byte, urcu_active_readers_one, 0);
203 /* pointer generation */
204 DECLARE_CACHED_VAR(byte, generation_ptr, 0);
206 byte last_free_gen = 0;
208 byte read_generation = 1;
216 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
217 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers_one,
219 RANDOM_CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
220 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
221 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers_one,
223 RANDOM_CACHE_READ_FROM_MEM(generation_ptr, get_pid());
227 #define get_readerid() (get_pid())
228 #define get_writerid() (get_readerid() + NR_READERS)
230 inline wait_for_reader(tmp, id, i)
234 tmp = READ_CACHED_VAR(urcu_active_readers_one);
237 :: (tmp & RCU_GP_CTR_NEST_MASK)
238 && ((tmp ^ READ_CACHED_VAR(urcu_gp_ctr))
240 #ifndef GEN_ERROR_WRITER_PROGRESS
251 inline wait_for_quiescent_state(tmp, i, j)
256 wait_for_reader(tmp, i, j);
258 :: (NR_READERS > 1) && (i < NR_READERS - 1)
264 :: i >= NR_READERS -> break
268 /* Model the RCU read-side critical section. */
270 inline urcu_one_read(i, nest_i, tmp, tmp2)
274 :: nest_i < READER_NEST_LEVEL ->
276 tmp = READ_CACHED_VAR(urcu_active_readers_one);
279 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
281 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
283 WRITE_CACHED_VAR(urcu_active_readers_one, tmp2);
285 WRITE_CACHED_VAR(urcu_active_readers_one,
290 :: nest_i >= READER_NEST_LEVEL -> break;
294 read_generation = READ_CACHED_VAR(generation_ptr);
302 :: nest_i < READER_NEST_LEVEL ->
304 tmp2 = READ_CACHED_VAR(urcu_active_readers_one);
306 WRITE_CACHED_VAR(urcu_active_readers_one, tmp2 - 1);
308 :: nest_i >= READER_NEST_LEVEL -> break;
311 //smp_mc(i); /* added */
314 active [NR_READERS] proctype urcu_reader()
319 assert(get_pid() < NR_PROCS);
325 * We do not test reader's progress here, because we are mainly
326 * interested in writer's progress. The reader never blocks
327 * anyway. We have to test for reader/writer's progress
328 * separately, otherwise we could think the writer is doing
329 * progress when it's blocked by an always progressing reader.
331 #ifdef READER_PROGRESS
334 urcu_one_read(i, nest_i, tmp, tmp2);
338 /* Model the RCU update process. */
340 active [NR_WRITERS] proctype urcu_writer()
346 assert(get_pid() < NR_PROCS);
349 :: (READ_CACHED_VAR(generation_ptr) < 5) ->
350 #ifdef WRITER_PROGRESS
355 old_gen = READ_CACHED_VAR(generation_ptr);
356 WRITE_CACHED_VAR(generation_ptr, old_gen + 1);
364 :: write_lock == 0 ->
373 tmp = READ_CACHED_VAR(urcu_gp_ctr);
375 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
378 wait_for_quiescent_state(tmp, i, j);
382 tmp = READ_CACHED_VAR(urcu_gp_ctr);
384 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
387 wait_for_quiescent_state(tmp, i, j);
391 /* free-up step, e.g., kfree(). */
393 last_free_gen = old_gen;
399 * Given the reader loops infinitely, let the writer also busy-loop
400 * with progress here so, with weak fairness, we can test the
406 #ifdef WRITER_PROGRESS