43e979c3e11d68ea49f93d5db34b80983b8a02fb
[urcu.git] /
1 #define WRITER_PROGRESS
2 #define GEN_ERROR_WRITER_PROGRESS
3 #define RCU_GP_CTR_BIT (1 << 7)
4 #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
5
6 #define read_free_race (read_generation == last_free_gen)
7 #define read_free (free_done && data_access)
8
9 #ifndef READER_NEST_LEVEL
10 #define READER_NEST_LEVEL 2
11 #endif
12
13 #define REMOTE_BARRIERS
14 /*
15 * mem.spin: Promela code to validate memory barriers with OOO memory.
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2 of the License, or
20 * (at your option) any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
30 *
31 * Copyright (c) 2009 Mathieu Desnoyers
32 */
33
34 /* Promela validation variables. */
35
36 #define NR_READERS 1
37 #define NR_WRITERS 1
38
39 #define NR_PROCS 2
40
41 #define get_pid() (_pid)
42
43 /*
44 * Each process have its own data in cache. Caches are randomly updated.
45 * smp_wmb and smp_rmb forces cache updates (write and read), wmb_mb forces
46 * both.
47 */
48
49 #define DECLARE_CACHED_VAR(type, x, v) \
50 type mem_##x = v; \
51 type cached_##x[NR_PROCS] = v; \
52 bit cache_dirty_##x[NR_PROCS] = 0
53
54 #define IS_CACHE_DIRTY(x, id) (cache_dirty_##x[id])
55
56 #define READ_CACHED_VAR(x) (cached_##x[get_pid()])
57
58 #define WRITE_CACHED_VAR(x, v) \
59 atomic { \
60 cached_##x[get_pid()] = v; \
61 cache_dirty_##x[get_pid()] = 1; \
62 }
63
64 #define CACHE_WRITE_TO_MEM(x, id) \
65 if \
66 :: IS_CACHE_DIRTY(x, id) -> \
67 mem_##x = cached_##x[id]; \
68 cache_dirty_##x[id] = 0; \
69 :: else -> \
70 skip \
71 fi;
72
73 #define CACHE_READ_FROM_MEM(x, id) \
74 if \
75 :: !IS_CACHE_DIRTY(x, id) -> \
76 cached_##x[id] = mem_##x;\
77 :: else -> \
78 skip \
79 fi;
80
81 /*
82 * May update other caches if cache is dirty, or not.
83 */
84 #define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
85 if \
86 :: 1 -> CACHE_WRITE_TO_MEM(x, id); \
87 :: 1 -> skip \
88 fi;
89
90 #define RANDOM_CACHE_READ_FROM_MEM(x, id)\
91 if \
92 :: 1 -> CACHE_READ_FROM_MEM(x, id); \
93 :: 1 -> skip \
94 fi;
95
96 /*
97 * Remote barriers tests the scheme where a signal (or IPI) is sent to all
98 * reader threads to promote their compiler barrier to a smp_mb().
99 */
100 #ifdef REMOTE_BARRIERS
101
102 inline smp_rmb_pid(i)
103 {
104 atomic {
105 CACHE_READ_FROM_MEM(urcu_gp_ctr, i);
106 CACHE_READ_FROM_MEM(urcu_active_readers_one, i);
107 CACHE_READ_FROM_MEM(generation_ptr, i);
108 }
109 }
110
111 inline smp_wmb_pid(i)
112 {
113 atomic {
114 CACHE_WRITE_TO_MEM(urcu_gp_ctr, i);
115 CACHE_WRITE_TO_MEM(urcu_active_readers_one, i);
116 CACHE_WRITE_TO_MEM(generation_ptr, i);
117 }
118 }
119
120 inline smp_mb_pid(i)
121 {
122 atomic {
123 #ifndef NO_WMB
124 smp_wmb_pid(i);
125 #endif
126 #ifndef NO_RMB
127 smp_rmb_pid(i);
128 #endif
129 #ifdef NO_WMB
130 #ifdef NO_RMB
131 ooo_mem(i);
132 #endif
133 #endif
134 }
135 }
136
137 /*
138 * Readers do a simple barrier(), writers are doing a smp_mb() _and_ sending a
139 * signal or IPI to have all readers execute a smp_mb.
140 * We are not modeling the whole rendez-vous between readers and writers here,
141 * we just let the writer update each reader's caches remotely.
142 */
143 inline smp_mb(i)
144 {
145 if
146 :: get_pid() >= NR_READERS ->
147 smp_mb_pid(get_pid());
148 i = 0;
149 do
150 :: i < NR_READERS ->
151 smp_mb_pid(i);
152 i++;
153 :: i >= NR_READERS -> break
154 od;
155 smp_mb_pid(get_pid());
156 :: else -> skip;
157 fi;
158 }
159
160 #else
161
162 inline smp_rmb(i)
163 {
164 atomic {
165 CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
166 CACHE_READ_FROM_MEM(urcu_active_readers_one, get_pid());
167 CACHE_READ_FROM_MEM(generation_ptr, get_pid());
168 }
169 }
170
171 inline smp_wmb(i)
172 {
173 atomic {
174 CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
175 CACHE_WRITE_TO_MEM(urcu_active_readers_one, get_pid());
176 CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
177 }
178 }
179
180 inline smp_mb(i)
181 {
182 atomic {
183 #ifndef NO_WMB
184 smp_wmb(i);
185 #endif
186 #ifndef NO_RMB
187 smp_rmb(i);
188 #endif
189 #ifdef NO_WMB
190 #ifdef NO_RMB
191 ooo_mem(i);
192 #endif
193 #endif
194 }
195 }
196
197 #endif
198
199 /* Keep in sync manually with smp_rmb, wmp_wmb and ooo_mem */
200 DECLARE_CACHED_VAR(byte, urcu_gp_ctr, 1);
201 /* Note ! currently only one reader */
202 DECLARE_CACHED_VAR(byte, urcu_active_readers_one, 0);
203 /* pointer generation */
204 DECLARE_CACHED_VAR(byte, generation_ptr, 0);
205
206 byte last_free_gen = 0;
207 bit free_done = 0;
208 byte read_generation = 1;
209 bit data_access = 0;
210
211 bit write_lock = 0;
212
213 inline ooo_mem(i)
214 {
215 atomic {
216 RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
217 RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers_one,
218 get_pid());
219 RANDOM_CACHE_WRITE_TO_MEM(generation_ptr, get_pid());
220 RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
221 RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers_one,
222 get_pid());
223 RANDOM_CACHE_READ_FROM_MEM(generation_ptr, get_pid());
224 }
225 }
226
227 #define get_readerid() (get_pid())
228 #define get_writerid() (get_readerid() + NR_READERS)
229
230 inline wait_for_reader(tmp, id, i)
231 {
232 do
233 :: 1 ->
234 tmp = READ_CACHED_VAR(urcu_active_readers_one);
235 ooo_mem(i);
236 if
237 :: (tmp & RCU_GP_CTR_NEST_MASK)
238 && ((tmp ^ READ_CACHED_VAR(urcu_gp_ctr))
239 & RCU_GP_CTR_BIT) ->
240 #ifndef GEN_ERROR_WRITER_PROGRESS
241 smp_mb(i);
242 #else
243 ooo_mem(i);
244 #endif
245 :: else ->
246 break;
247 fi;
248 od;
249 }
250
251 inline wait_for_quiescent_state(tmp, i, j)
252 {
253 i = 0;
254 do
255 :: i < NR_READERS ->
256 wait_for_reader(tmp, i, j);
257 if
258 :: (NR_READERS > 1) && (i < NR_READERS - 1)
259 -> ooo_mem(j);
260 :: else
261 -> skip;
262 fi;
263 i++
264 :: i >= NR_READERS -> break
265 od;
266 }
267
268 /* Model the RCU read-side critical section. */
269
270 inline urcu_one_read(i, nest_i, tmp, tmp2)
271 {
272 nest_i = 0;
273 do
274 :: nest_i < READER_NEST_LEVEL ->
275 ooo_mem(i);
276 tmp = READ_CACHED_VAR(urcu_active_readers_one);
277 ooo_mem(i);
278 if
279 :: (!(tmp & RCU_GP_CTR_NEST_MASK))
280 ->
281 tmp2 = READ_CACHED_VAR(urcu_gp_ctr);
282 ooo_mem(i);
283 WRITE_CACHED_VAR(urcu_active_readers_one, tmp2);
284 :: else ->
285 WRITE_CACHED_VAR(urcu_active_readers_one,
286 tmp + 1);
287 fi;
288 smp_mb(i);
289 nest_i++;
290 :: nest_i >= READER_NEST_LEVEL -> break;
291 od;
292
293 ooo_mem(i);
294 read_generation = READ_CACHED_VAR(generation_ptr);
295 ooo_mem(i);
296 data_access = 1;
297 ooo_mem(i);
298 data_access = 0;
299
300 nest_i = 0;
301 do
302 :: nest_i < READER_NEST_LEVEL ->
303 smp_mb(i);
304 tmp2 = READ_CACHED_VAR(urcu_active_readers_one);
305 ooo_mem(i);
306 WRITE_CACHED_VAR(urcu_active_readers_one, tmp2 - 1);
307 nest_i++;
308 :: nest_i >= READER_NEST_LEVEL -> break;
309 od;
310 ooo_mem(i);
311 //smp_mc(i); /* added */
312 }
313
314 active [NR_READERS] proctype urcu_reader()
315 {
316 byte i, nest_i;
317 byte tmp, tmp2;
318
319 assert(get_pid() < NR_PROCS);
320
321 end_reader:
322 do
323 :: 1 ->
324 /*
325 * We do not test reader's progress here, because we are mainly
326 * interested in writer's progress. The reader never blocks
327 * anyway. We have to test for reader/writer's progress
328 * separately, otherwise we could think the writer is doing
329 * progress when it's blocked by an always progressing reader.
330 */
331 #ifdef READER_PROGRESS
332 progress_reader:
333 #endif
334 urcu_one_read(i, nest_i, tmp, tmp2);
335 od;
336 }
337
338 /* Model the RCU update process. */
339
340 active [NR_WRITERS] proctype urcu_writer()
341 {
342 byte i, j;
343 byte tmp;
344 byte old_gen;
345
346 assert(get_pid() < NR_PROCS);
347
348 do
349 :: (READ_CACHED_VAR(generation_ptr) < 5) ->
350 #ifdef WRITER_PROGRESS
351 progress_writer1:
352 #endif
353 ooo_mem(i);
354 atomic {
355 old_gen = READ_CACHED_VAR(generation_ptr);
356 WRITE_CACHED_VAR(generation_ptr, old_gen + 1);
357 }
358 ooo_mem(i);
359
360 do
361 :: 1 ->
362 atomic {
363 if
364 :: write_lock == 0 ->
365 write_lock = 1;
366 break;
367 :: else ->
368 skip;
369 fi;
370 }
371 od;
372 smp_mb(i);
373 tmp = READ_CACHED_VAR(urcu_gp_ctr);
374 ooo_mem(i);
375 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
376 ooo_mem(i);
377 //smp_mc(i);
378 wait_for_quiescent_state(tmp, i, j);
379 //smp_mc(i);
380 #ifndef SINGLE_FLIP
381 ooo_mem(i);
382 tmp = READ_CACHED_VAR(urcu_gp_ctr);
383 ooo_mem(i);
384 WRITE_CACHED_VAR(urcu_gp_ctr, tmp ^ RCU_GP_CTR_BIT);
385 //smp_mc(i);
386 ooo_mem(i);
387 wait_for_quiescent_state(tmp, i, j);
388 #endif
389 smp_mb(i);
390 write_lock = 0;
391 /* free-up step, e.g., kfree(). */
392 atomic {
393 last_free_gen = old_gen;
394 free_done = 1;
395 }
396 :: else -> break;
397 od;
398 /*
399 * Given the reader loops infinitely, let the writer also busy-loop
400 * with progress here so, with weak fairness, we can test the
401 * writer's progress.
402 */
403 end_writer:
404 do
405 :: 1 ->
406 #ifdef WRITER_PROGRESS
407 progress_writer2:
408 #endif
409 skip;
410 od;
411 }
This page took 0.035836 seconds and 3 git commands to generate.