Commit | Line | Data |
---|---|---|
bac061a1 MD |
1 | /* |
2 | * mem.spin: Promela code to validate memory barriers with OOO memory. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
17 | * | |
18 | * Copyright (c) 2009 Mathieu Desnoyers | |
19 | */ | |
20 | ||
21 | /* Promela validation variables. */ | |
22 | ||
23 | #define NR_READERS 1 | |
24 | #define NR_WRITERS 1 | |
25 | ||
26 | #define NR_PROCS 2 | |
27 | ||
28 | #define get_pid() (_pid) | |
29 | ||
30 | /* | |
31 | * Each process have its own data in cache. Caches are randomly updated. | |
32 | * smp_wmb and smp_rmb forces cache updates (write and read), wmb_mb forces | |
33 | * both. | |
34 | */ | |
35 | ||
36 | #define DECLARE_CACHED_VAR(type, x, v) \ | |
37 | type mem_##x = v; \ | |
38 | type cached_##x[NR_PROCS] = v; \ | |
39 | bit cache_dirty_##x[NR_PROCS] = 0 | |
40 | ||
41 | #define IS_CACHE_DIRTY(x, id) (cache_dirty_##x[id]) | |
42 | ||
43 | #define READ_CACHED_VAR(x) (cached_##x[get_pid()]) | |
44 | ||
45 | #define WRITE_CACHED_VAR(x, v) \ | |
46 | atomic { \ | |
47 | cached_##x[get_pid()] = v; \ | |
48 | cache_dirty_##x[get_pid()] = 1; \ | |
49 | } | |
50 | ||
51 | #define CACHE_WRITE_TO_MEM(x, id) \ | |
52 | if \ | |
53 | :: IS_CACHE_DIRTY(x, id) -> \ | |
54 | mem_##x = cached_##x[id]; \ | |
55 | cache_dirty_##x[id] = 0; \ | |
56 | :: else -> \ | |
57 | skip \ | |
58 | fi; | |
59 | ||
60 | #define CACHE_READ_FROM_MEM(x, id) \ | |
61 | if \ | |
62 | :: !IS_CACHE_DIRTY(x, id) -> \ | |
63 | cached_##x[id] = mem_##x;\ | |
64 | :: else -> \ | |
65 | skip \ | |
66 | fi; | |
67 | ||
68 | /* | |
69 | * May update other caches if cache is dirty, or not. | |
70 | */ | |
71 | #define RANDOM_CACHE_WRITE_TO_MEM(x, id)\ | |
72 | if \ | |
73 | :: 1 -> CACHE_WRITE_TO_MEM(x, id); \ | |
74 | :: 1 -> skip \ | |
75 | fi; | |
76 | ||
77 | #define RANDOM_CACHE_READ_FROM_MEM(x, id)\ | |
78 | if \ | |
79 | :: 1 -> CACHE_READ_FROM_MEM(x, id); \ | |
80 | :: 1 -> skip \ | |
81 | fi; | |
82 | ||
83 | inline smp_rmb() | |
84 | { | |
85 | atomic { | |
86 | CACHE_READ_FROM_MEM(alpha, get_pid()); | |
87 | CACHE_READ_FROM_MEM(beta, get_pid()); | |
88 | } | |
89 | } | |
90 | ||
91 | inline smp_wmb() | |
92 | { | |
93 | atomic { | |
94 | CACHE_WRITE_TO_MEM(alpha, get_pid()); | |
95 | CACHE_WRITE_TO_MEM(beta, get_pid()); | |
96 | } | |
97 | } | |
98 | ||
99 | inline smp_mb() | |
100 | { | |
101 | atomic { | |
102 | smp_wmb(); | |
103 | smp_rmb(); | |
104 | } | |
105 | } | |
106 | ||
107 | /* | |
108 | * Instruction scheduling support. Declares instruction data flow dependency. | |
109 | * INSTRUCTION_SCHED_BEGIN/INSTRUCTION_SCHED_END can be nested. | |
110 | */ | |
111 | #define INSTRUCTION_SCHED_BEGIN(i) \ | |
112 | if \ | |
113 | :: 1 -> \ | |
114 | ooo_mem(i) | |
115 | ||
116 | /* No data flow dependency between two consecutive instructions */ | |
117 | #define INSTRUCTION_SCHED_NODEP_NEXT(i) \ | |
118 | :: 1 -> \ | |
119 | ooo_mem(i) | |
120 | ||
121 | /* Has dependency between two consecutive instructions */ | |
122 | #define INSTRUCTION_SCHED_DEP_NEXT(i) \ | |
123 | ooo_mem(i) | |
124 | ||
125 | #define INSTRUCTION_SCHED_END(i) \ | |
126 | fi | |
127 | ||
128 | inline ooo_mem() | |
129 | { | |
130 | atomic { | |
131 | RANDOM_CACHE_WRITE_TO_MEM(alpha, get_pid()); | |
132 | RANDOM_CACHE_WRITE_TO_MEM(beta, get_pid()); | |
133 | RANDOM_CACHE_READ_FROM_MEM(alpha, get_pid()); | |
134 | RANDOM_CACHE_READ_FROM_MEM(beta, get_pid()); | |
135 | } | |
136 | } | |
137 | ||
138 | /* Keep in sync manually with smp_rmb, wmp_wmb and ooo_mem */ | |
139 | DECLARE_CACHED_VAR(byte, alpha, 0); | |
140 | DECLARE_CACHED_VAR(byte, beta, 0); | |
141 | ||
142 | /* value 2 is uninitialized */ | |
143 | byte read_one = 2; | |
144 | byte read_two = 2; | |
145 | ||
146 | active proctype test_proc_one() | |
147 | { | |
148 | assert(get_pid() < NR_PROCS); | |
149 | ||
150 | INSTRUCTION_SCHED_BEGIN(); | |
151 | WRITE_CACHED_VAR(alpha, 1); | |
152 | INSTRUCTION_SCHED_DEP_NEXT(); | |
153 | smp_wmb(); | |
154 | #ifndef USE_SYNC_CORE | |
155 | INSTRUCTION_SCHED_NODEP_NEXT(); | |
156 | #else | |
157 | // if we use a sync_core(); (equivalent to smp_mb()) | |
158 | INSTRUCTION_SCHED_DEP_NEXT(); | |
159 | #endif | |
160 | smp_rmb(); | |
161 | INSTRUCTION_SCHED_DEP_NEXT(); | |
162 | read_one = READ_CACHED_VAR(beta); | |
163 | INSTRUCTION_SCHED_END(); | |
164 | ||
165 | // test : [] (read_one == 0 -> read_two != 0) | |
166 | // test : [] (read_two == 0 -> read_one != 0) | |
167 | assert(!(read_one == 0 && read_two == 0)); | |
168 | } | |
169 | ||
170 | active proctype test_proc_two() | |
171 | { | |
172 | assert(get_pid() < NR_PROCS); | |
173 | ||
174 | INSTRUCTION_SCHED_BEGIN(); | |
175 | WRITE_CACHED_VAR(beta, 1); | |
176 | INSTRUCTION_SCHED_DEP_NEXT(); | |
177 | smp_wmb(); | |
178 | #ifndef USE_SYNC_CORE | |
179 | INSTRUCTION_SCHED_NODEP_NEXT(); | |
180 | #else | |
181 | // if we use a sync_core(); (equivalent to smp_mb()) | |
182 | // INSTRUCTION_SCHED_DEP_NEXT(); | |
183 | #endif | |
184 | smp_rmb(); | |
185 | INSTRUCTION_SCHED_DEP_NEXT(); | |
186 | read_two = READ_CACHED_VAR(alpha); | |
187 | INSTRUCTION_SCHED_END(); | |
188 | ||
189 | // test : [] (read_one == 0 -> read_two != 0) | |
190 | // test : [] (read_two == 0 -> read_one != 0) | |
191 | assert(!(read_one == 0 && read_two == 0)); | |
192 | } |