Commit | Line | Data |
---|---|---|
27b012e2 MD |
1 | #ifndef _URCU_H |
2 | #define _URCU_H | |
3 | ||
b257a10b MD |
4 | /* |
5 | * urcu.h | |
6 | * | |
7 | * Userspace RCU header | |
8 | * | |
9 | * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> | |
10 | * | |
5e7e64b9 MD |
11 | * Credits for Paul e. McKenney <paulmck@linux.vnet.ibm.com> |
12 | * for inspiration coming from the Linux kernel RCU and rcu-preempt. | |
13 | * | |
14 | * The barrier, mb, rmb, wmb, atomic_inc, smp_read_barrier_depends, ACCESS_ONCE | |
15 | * and rcu_dereference primitives come from the Linux kernel. | |
16 | * | |
b257a10b MD |
17 | * Distributed under GPLv2 |
18 | */ | |
19 | ||
1430ee0b | 20 | #include <stdlib.h> |
69a757c9 | 21 | #include <pthread.h> |
1430ee0b | 22 | |
27b012e2 MD |
23 | /* The "volatile" is due to gcc bugs */ |
24 | #define barrier() __asm__ __volatile__("": : :"memory") | |
25 | ||
5b1da0c8 MD |
26 | #define likely(x) __builtin_expect(!!(x), 1) |
27 | #define unlikely(x) __builtin_expect(!!(x), 0) | |
28 | ||
3a86deba MD |
29 | /* Assume SMP machine, given we don't have this information */ |
30 | #define CONFIG_SMP 1 | |
31 | ||
32 | ||
b715b99e MD |
33 | #ifdef CONFIG_SMP |
34 | #define smp_mb() mb() | |
35 | #define smp_rmb() rmb() | |
36 | #define smp_wmb() wmb() | |
3a86deba MD |
37 | #define smp_mc() mc() |
38 | #define smp_rmc() rmc() | |
39 | #define smp_wmc() wmc() | |
b715b99e MD |
40 | #else |
41 | #define smp_mb() barrier() | |
42 | #define smp_rmb() barrier() | |
43 | #define smp_wmb() barrier() | |
3a86deba MD |
44 | #define smp_mc() barrier() |
45 | #define smp_rmc() barrier() | |
46 | #define smp_wmc() barrier() | |
b715b99e MD |
47 | #endif |
48 | ||
2d6debff | 49 | #include "arch.h" |
f4a486ac | 50 | |
27b012e2 MD |
51 | /* Nop everywhere except on alpha. */ |
52 | #define smp_read_barrier_depends() | |
53 | ||
41718ff9 MD |
54 | /* |
55 | * Prevent the compiler from merging or refetching accesses. The compiler | |
56 | * is also forbidden from reordering successive instances of ACCESS_ONCE(), | |
57 | * but only when the compiler is aware of some particular ordering. One way | |
58 | * to make the compiler aware of ordering is to put the two invocations of | |
59 | * ACCESS_ONCE() in different C statements. | |
60 | * | |
61 | * This macro does absolutely -nothing- to prevent the CPU from reordering, | |
62 | * merging, or refetching absolutely anything at any time. Its main intended | |
63 | * use is to mediate communication between process-level code and irq/NMI | |
64 | * handlers, all running on the same CPU. | |
65 | */ | |
66 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) | |
67 | ||
b0d5e790 MD |
68 | /* |
69 | * Identify a shared load. A smp_rmc() or smp_mc() should come before the load. | |
70 | */ | |
71 | #define _LOAD_SHARED(p) ACCESS_ONCE(p) | |
72 | ||
3a86deba | 73 | /* |
8895e525 | 74 | * Load a data from shared memory, doing a cache flush if required. |
3a86deba | 75 | */ |
b0d5e790 MD |
76 | #define LOAD_SHARED(p) \ |
77 | ({ \ | |
78 | smp_rmc(); \ | |
79 | _LOAD_SHARED(p); \ | |
80 | }) | |
81 | ||
82 | ||
83 | /* | |
84 | * Identify a shared store. A smp_wmc() or smp_mc() should follow the store. | |
85 | */ | |
86 | #define _STORE_SHARED(x, v) \ | |
87 | do { \ | |
88 | (x) = (v); \ | |
89 | } while (0) | |
3a86deba MD |
90 | |
91 | /* | |
8895e525 | 92 | * Store v into x, where x is located in shared memory. Performs the required |
3a86deba MD |
93 | * cache flush after writing. |
94 | */ | |
8895e525 | 95 | #define STORE_SHARED(x, v) \ |
3a86deba | 96 | do { \ |
b0d5e790 MD |
97 | _STORE_SHARED(x, v); \ |
98 | smp_wmc(); \ | |
3a86deba MD |
99 | } while (0) |
100 | ||
41718ff9 MD |
101 | /** |
102 | * rcu_dereference - fetch an RCU-protected pointer in an | |
103 | * RCU read-side critical section. This pointer may later | |
104 | * be safely dereferenced. | |
105 | * | |
106 | * Inserts memory barriers on architectures that require them | |
107 | * (currently only the Alpha), and, more importantly, documents | |
108 | * exactly which pointers are protected by RCU. | |
109 | */ | |
110 | ||
111 | #define rcu_dereference(p) ({ \ | |
8895e525 | 112 | typeof(p) _________p1 = LOAD_SHARED(p); \ |
41718ff9 MD |
113 | smp_read_barrier_depends(); \ |
114 | (_________p1); \ | |
115 | }) | |
116 | ||
27b012e2 MD |
117 | #define SIGURCU SIGUSR1 |
118 | ||
40e140c9 MD |
119 | /* |
120 | * If a reader is really non-cooperative and refuses to commit its | |
121 | * urcu_active_readers count to memory (there is no barrier in the reader | |
122 | * per-se), kick it after a few loops waiting for it. | |
123 | */ | |
124 | #define KICK_READER_LOOPS 10000 | |
125 | ||
cf380c2f MD |
126 | #ifdef DEBUG_YIELD |
127 | #include <sched.h> | |
9b171f46 MD |
128 | #include <time.h> |
129 | #include <pthread.h> | |
bb488185 | 130 | #include <unistd.h> |
cf380c2f MD |
131 | |
132 | #define YIELD_READ (1 << 0) | |
133 | #define YIELD_WRITE (1 << 1) | |
134 | ||
bb488185 MD |
135 | /* Updates without DEBUG_FULL_MB are much slower. Account this in the delay */ |
136 | #ifdef DEBUG_FULL_MB | |
137 | /* maximum sleep delay, in us */ | |
138 | #define MAX_SLEEP 50 | |
139 | #else | |
140 | #define MAX_SLEEP 30000 | |
141 | #endif | |
142 | ||
9d335088 MD |
143 | extern unsigned int yield_active; |
144 | extern unsigned int __thread rand_yield; | |
cf380c2f MD |
145 | |
146 | static inline void debug_yield_read(void) | |
147 | { | |
148 | if (yield_active & YIELD_READ) | |
9d335088 | 149 | if (rand_r(&rand_yield) & 0x1) |
bb488185 | 150 | usleep(rand_r(&rand_yield) % MAX_SLEEP); |
cf380c2f MD |
151 | } |
152 | ||
153 | static inline void debug_yield_write(void) | |
154 | { | |
155 | if (yield_active & YIELD_WRITE) | |
9d335088 | 156 | if (rand_r(&rand_yield) & 0x1) |
bb488185 | 157 | usleep(rand_r(&rand_yield) % MAX_SLEEP); |
9d335088 MD |
158 | } |
159 | ||
160 | static inline void debug_yield_init(void) | |
161 | { | |
162 | rand_yield = time(NULL) ^ pthread_self(); | |
cf380c2f MD |
163 | } |
164 | #else | |
165 | static inline void debug_yield_read(void) | |
166 | { | |
167 | } | |
168 | ||
169 | static inline void debug_yield_write(void) | |
170 | { | |
9d335088 MD |
171 | } |
172 | ||
173 | static inline void debug_yield_init(void) | |
174 | { | |
175 | ||
cf380c2f MD |
176 | } |
177 | #endif | |
178 | ||
bb488185 | 179 | #ifdef DEBUG_FULL_MB |
3a86deba | 180 | static inline void reader_barrier() |
bb488185 | 181 | { |
b715b99e | 182 | smp_mb(); |
bb488185 MD |
183 | } |
184 | #else | |
3a86deba | 185 | static inline void reader_barrier() |
bb488185 MD |
186 | { |
187 | barrier(); | |
188 | } | |
189 | #endif | |
190 | ||
1430ee0b | 191 | /* |
4917a879 MD |
192 | * The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a |
193 | * full 8-bits, 16-bits or 32-bits bitmask for the lower order bits. | |
1430ee0b | 194 | */ |
6e32665b | 195 | #define RCU_GP_COUNT (1UL << 0) |
4917a879 | 196 | /* Use the amount of bits equal to half of the architecture long size */ |
6e32665b | 197 | #define RCU_GP_CTR_BIT (1UL << (sizeof(long) << 2)) |
1430ee0b MD |
198 | #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1) |
199 | ||
5b1da0c8 MD |
200 | /* |
201 | * Global quiescent period counter with low-order bits unused. | |
202 | * Using a int rather than a char to eliminate false register dependencies | |
203 | * causing stalls on some architectures. | |
204 | */ | |
6e8b8429 | 205 | extern long urcu_gp_ctr; |
27b012e2 | 206 | |
6e8b8429 | 207 | extern long __thread urcu_active_readers; |
27b012e2 | 208 | |
128166c9 | 209 | static inline int rcu_old_gp_ongoing(long *value) |
27b012e2 | 210 | { |
6e8b8429 | 211 | long v; |
1430ee0b MD |
212 | |
213 | if (value == NULL) | |
214 | return 0; | |
9598a481 MD |
215 | /* |
216 | * Make sure both tests below are done on the same version of *value | |
217 | * to insure consistency. | |
218 | */ | |
8895e525 | 219 | v = LOAD_SHARED(*value); |
1430ee0b | 220 | return (v & RCU_GP_CTR_NEST_MASK) && |
9598a481 | 221 | ((v ^ urcu_gp_ctr) & RCU_GP_CTR_BIT); |
27b012e2 MD |
222 | } |
223 | ||
1430ee0b | 224 | static inline void rcu_read_lock(void) |
27b012e2 | 225 | { |
6e8b8429 | 226 | long tmp; |
1430ee0b | 227 | |
1430ee0b | 228 | tmp = urcu_active_readers; |
3a9e6e9d | 229 | /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */ |
3a86deba MD |
230 | /* |
231 | * The data dependency "read urcu_gp_ctr, write urcu_active_readers", | |
b0d5e790 MD |
232 | * serializes those two memory operations. The memory barrier in the |
233 | * signal handler ensures we receive the proper memory commit barriers | |
234 | * required by _STORE_SHARED and _LOAD_SHARED whenever communication | |
235 | * with the writer is needed. | |
3a86deba | 236 | */ |
5b1da0c8 | 237 | if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) |
b0d5e790 | 238 | _STORE_SHARED(urcu_active_readers, _LOAD_SHARED(urcu_gp_ctr)); |
1430ee0b | 239 | else |
b0d5e790 | 240 | _STORE_SHARED(urcu_active_readers, tmp + RCU_GP_COUNT); |
27b012e2 MD |
241 | /* |
242 | * Increment active readers count before accessing the pointer. | |
243 | * See force_mb_all_threads(). | |
244 | */ | |
3a86deba | 245 | reader_barrier(); |
27b012e2 MD |
246 | } |
247 | ||
1430ee0b | 248 | static inline void rcu_read_unlock(void) |
27b012e2 | 249 | { |
3a86deba | 250 | reader_barrier(); |
27b012e2 MD |
251 | /* |
252 | * Finish using rcu before decrementing the pointer. | |
253 | * See force_mb_all_threads(). | |
254 | */ | |
b0d5e790 | 255 | _STORE_SHARED(urcu_active_readers, urcu_active_readers - RCU_GP_COUNT); |
27b012e2 MD |
256 | } |
257 | ||
e462817e MD |
258 | /** |
259 | * rcu_assign_pointer - assign (publicize) a pointer to a newly | |
260 | * initialized structure that will be dereferenced by RCU read-side | |
261 | * critical sections. Returns the value assigned. | |
262 | * | |
263 | * Inserts memory barriers on architectures that require them | |
264 | * (pretty much all of them other than x86), and also prevents | |
265 | * the compiler from reordering the code that initializes the | |
266 | * structure after the pointer assignment. More importantly, this | |
267 | * call documents which pointers will be dereferenced by RCU read-side | |
268 | * code. | |
269 | */ | |
270 | ||
271 | #define rcu_assign_pointer(p, v) \ | |
272 | ({ \ | |
273 | if (!__builtin_constant_p(v) || \ | |
274 | ((v) != NULL)) \ | |
275 | wmb(); \ | |
b0d5e790 | 276 | STORE_SHARED(p, v); \ |
e462817e MD |
277 | }) |
278 | ||
f4a486ac MD |
279 | #define rcu_xchg_pointer(p, v) \ |
280 | ({ \ | |
281 | if (!__builtin_constant_p(v) || \ | |
282 | ((v) != NULL)) \ | |
283 | wmb(); \ | |
284 | xchg(p, v); \ | |
285 | }) | |
286 | ||
e462817e | 287 | extern void synchronize_rcu(void); |
27b012e2 | 288 | |
f4a486ac MD |
289 | /* |
290 | * Exchanges the pointer and waits for quiescent state. | |
291 | * The pointer returned can be freed. | |
292 | */ | |
293 | #define urcu_publish_content(p, v) \ | |
294 | ({ \ | |
295 | void *oldptr; \ | |
f4a486ac MD |
296 | oldptr = rcu_xchg_pointer(p, v); \ |
297 | synchronize_rcu(); \ | |
298 | oldptr; \ | |
299 | }) | |
300 | ||
27b012e2 MD |
301 | /* |
302 | * Reader thread registration. | |
303 | */ | |
304 | extern void urcu_register_thread(void); | |
5e7e64b9 | 305 | extern void urcu_unregister_thread(void); |
27b012e2 MD |
306 | |
307 | #endif /* _URCU_H */ |