Commit | Line | Data |
---|---|---|
27b012e2 MD |
1 | #ifndef _URCU_H |
2 | #define _URCU_H | |
3 | ||
b257a10b MD |
4 | /* |
5 | * urcu.h | |
6 | * | |
7 | * Userspace RCU header | |
8 | * | |
9 | * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> | |
10 | * | |
5e7e64b9 MD |
11 | * Credits for Paul e. McKenney <paulmck@linux.vnet.ibm.com> |
12 | * for inspiration coming from the Linux kernel RCU and rcu-preempt. | |
13 | * | |
14 | * The barrier, mb, rmb, wmb, atomic_inc, smp_read_barrier_depends, ACCESS_ONCE | |
15 | * and rcu_dereference primitives come from the Linux kernel. | |
16 | * | |
b257a10b MD |
17 | * Distributed under GPLv2 |
18 | */ | |
19 | ||
1430ee0b | 20 | #include <stdlib.h> |
69a757c9 | 21 | #include <pthread.h> |
1430ee0b | 22 | |
27b012e2 MD |
23 | /* The "volatile" is due to gcc bugs */ |
24 | #define barrier() __asm__ __volatile__("": : :"memory") | |
25 | ||
5b1da0c8 MD |
26 | #define likely(x) __builtin_expect(!!(x), 1) |
27 | #define unlikely(x) __builtin_expect(!!(x), 0) | |
28 | ||
3a86deba MD |
29 | /* |
30 | * Assume the architecture has coherent caches. Blackfin will want this unset. | |
31 | */ | |
32 | #define CONFIG_HAVE_MEM_COHERENCY 1 | |
33 | ||
82faadb5 | 34 | /* Assume P4 or newer */ |
3a86deba MD |
35 | #define CONFIG_HAVE_FENCE 1 |
36 | ||
37 | /* Assume SMP machine, given we don't have this information */ | |
38 | #define CONFIG_SMP 1 | |
39 | ||
40 | ||
41 | #ifdef CONFIG_HAVE_MEM_COHERENCY | |
42 | /* | |
43 | * Caches are coherent, no need to flush them. | |
44 | */ | |
45 | #define mc() barrier() | |
46 | #define rmc() barrier() | |
47 | #define wmc() barrier() | |
48 | #else | |
49 | #error "The architecture must create its own cache flush primitives" | |
50 | #define mc() arch_cache_flush() | |
51 | #define rmc() arch_cache_flush_read() | |
52 | #define wmc() arch_cache_flush_write() | |
53 | #endif | |
54 | ||
55 | ||
56 | #ifdef CONFIG_HAVE_MEM_COHERENCY | |
82faadb5 | 57 | |
27b012e2 | 58 | /* x86 32/64 specific */ |
3a86deba | 59 | #ifdef CONFIG_HAVE_FENCE |
27b012e2 MD |
60 | #define mb() asm volatile("mfence":::"memory") |
61 | #define rmb() asm volatile("lfence":::"memory") | |
82faadb5 MD |
62 | #define wmb() asm volatile("sfence"::: "memory") |
63 | #else | |
64 | /* | |
65 | * Some non-Intel clones support out of order store. wmb() ceases to be a | |
66 | * nop for these. | |
67 | */ | |
68 | #define mb() asm volatile("lock; addl $0,0(%%esp)":::"memory") | |
69 | #define rmb() asm volatile("lock; addl $0,0(%%esp)":::"memory") | |
70 | #define wmb() asm volatile("lock; addl $0,0(%%esp)"::: "memory") | |
71 | #endif | |
27b012e2 | 72 | |
3a86deba MD |
73 | #else /* !CONFIG_HAVE_MEM_COHERENCY */ |
74 | ||
75 | /* | |
76 | * Without cache coherency, the memory barriers become cache flushes. | |
77 | */ | |
78 | #define mb() mc() | |
79 | #define rmb() rmc() | |
80 | #define wmb() wmc() | |
81 | ||
82 | #endif /* !CONFIG_HAVE_MEM_COHERENCY */ | |
83 | ||
b715b99e MD |
84 | |
85 | #ifdef CONFIG_SMP | |
86 | #define smp_mb() mb() | |
87 | #define smp_rmb() rmb() | |
88 | #define smp_wmb() wmb() | |
3a86deba MD |
89 | #define smp_mc() mc() |
90 | #define smp_rmc() rmc() | |
91 | #define smp_wmc() wmc() | |
b715b99e MD |
92 | #else |
93 | #define smp_mb() barrier() | |
94 | #define smp_rmb() barrier() | |
95 | #define smp_wmb() barrier() | |
3a86deba MD |
96 | #define smp_mc() barrier() |
97 | #define smp_rmc() barrier() | |
98 | #define smp_wmc() barrier() | |
b715b99e MD |
99 | #endif |
100 | ||
3a86deba MD |
101 | /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ |
102 | static inline void rep_nop(void) | |
103 | { | |
104 | asm volatile("rep; nop" ::: "memory"); | |
105 | } | |
106 | ||
107 | static inline void cpu_relax(void) | |
108 | { | |
109 | rep_nop(); | |
110 | } | |
111 | ||
27b012e2 MD |
112 | static inline void atomic_inc(int *v) |
113 | { | |
114 | asm volatile("lock; incl %0" | |
f69f195a | 115 | : "+m" (*v)); |
27b012e2 MD |
116 | } |
117 | ||
f4a486ac MD |
118 | #define xchg(ptr, v) \ |
119 | ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr)))) | |
120 | ||
121 | struct __xchg_dummy { | |
122 | unsigned long a[100]; | |
123 | }; | |
124 | #define __xg(x) ((struct __xchg_dummy *)(x)) | |
125 | ||
126 | /* | |
127 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway | |
128 | * Note 2: xchg has side effect, so that attribute volatile is necessary, | |
129 | * but generally the primitive is invalid, *ptr is output argument. --ANK | |
3a86deba | 130 | * x is considered local, ptr is considered remote. |
f4a486ac MD |
131 | */ |
132 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, | |
133 | int size) | |
134 | { | |
135 | switch (size) { | |
136 | case 1: | |
137 | asm volatile("xchgb %b0,%1" | |
138 | : "=q" (x) | |
139 | : "m" (*__xg(ptr)), "0" (x) | |
140 | : "memory"); | |
141 | break; | |
142 | case 2: | |
143 | asm volatile("xchgw %w0,%1" | |
144 | : "=r" (x) | |
145 | : "m" (*__xg(ptr)), "0" (x) | |
146 | : "memory"); | |
147 | break; | |
148 | case 4: | |
5b1da0c8 MD |
149 | asm volatile("xchgl %k0,%1" |
150 | : "=r" (x) | |
151 | : "m" (*__xg(ptr)), "0" (x) | |
152 | : "memory"); | |
153 | break; | |
154 | case 8: | |
155 | asm volatile("xchgq %0,%1" | |
f4a486ac MD |
156 | : "=r" (x) |
157 | : "m" (*__xg(ptr)), "0" (x) | |
158 | : "memory"); | |
159 | break; | |
160 | } | |
3a86deba | 161 | smp_wmc(); |
f4a486ac MD |
162 | return x; |
163 | } | |
164 | ||
27b012e2 MD |
165 | /* Nop everywhere except on alpha. */ |
166 | #define smp_read_barrier_depends() | |
167 | ||
41718ff9 MD |
168 | /* |
169 | * Prevent the compiler from merging or refetching accesses. The compiler | |
170 | * is also forbidden from reordering successive instances of ACCESS_ONCE(), | |
171 | * but only when the compiler is aware of some particular ordering. One way | |
172 | * to make the compiler aware of ordering is to put the two invocations of | |
173 | * ACCESS_ONCE() in different C statements. | |
174 | * | |
175 | * This macro does absolutely -nothing- to prevent the CPU from reordering, | |
176 | * merging, or refetching absolutely anything at any time. Its main intended | |
177 | * use is to mediate communication between process-level code and irq/NMI | |
178 | * handlers, all running on the same CPU. | |
179 | */ | |
180 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) | |
181 | ||
b0d5e790 MD |
182 | /* |
183 | * Identify a shared load. A smp_rmc() or smp_mc() should come before the load. | |
184 | */ | |
185 | #define _LOAD_SHARED(p) ACCESS_ONCE(p) | |
186 | ||
3a86deba | 187 | /* |
8895e525 | 188 | * Load a data from shared memory, doing a cache flush if required. |
3a86deba | 189 | */ |
b0d5e790 MD |
190 | #define LOAD_SHARED(p) \ |
191 | ({ \ | |
192 | smp_rmc(); \ | |
193 | _LOAD_SHARED(p); \ | |
194 | }) | |
195 | ||
196 | ||
197 | /* | |
198 | * Identify a shared store. A smp_wmc() or smp_mc() should follow the store. | |
199 | */ | |
200 | #define _STORE_SHARED(x, v) \ | |
201 | do { \ | |
202 | (x) = (v); \ | |
203 | } while (0) | |
3a86deba MD |
204 | |
205 | /* | |
8895e525 | 206 | * Store v into x, where x is located in shared memory. Performs the required |
3a86deba MD |
207 | * cache flush after writing. |
208 | */ | |
8895e525 | 209 | #define STORE_SHARED(x, v) \ |
3a86deba | 210 | do { \ |
b0d5e790 MD |
211 | _STORE_SHARED(x, v); \ |
212 | smp_wmc(); \ | |
3a86deba MD |
213 | } while (0) |
214 | ||
41718ff9 MD |
215 | /** |
216 | * rcu_dereference - fetch an RCU-protected pointer in an | |
217 | * RCU read-side critical section. This pointer may later | |
218 | * be safely dereferenced. | |
219 | * | |
220 | * Inserts memory barriers on architectures that require them | |
221 | * (currently only the Alpha), and, more importantly, documents | |
222 | * exactly which pointers are protected by RCU. | |
223 | */ | |
224 | ||
225 | #define rcu_dereference(p) ({ \ | |
8895e525 | 226 | typeof(p) _________p1 = LOAD_SHARED(p); \ |
41718ff9 MD |
227 | smp_read_barrier_depends(); \ |
228 | (_________p1); \ | |
229 | }) | |
230 | ||
27b012e2 MD |
231 | #define SIGURCU SIGUSR1 |
232 | ||
40e140c9 MD |
233 | /* |
234 | * If a reader is really non-cooperative and refuses to commit its | |
235 | * urcu_active_readers count to memory (there is no barrier in the reader | |
236 | * per-se), kick it after a few loops waiting for it. | |
237 | */ | |
238 | #define KICK_READER_LOOPS 10000 | |
239 | ||
cf380c2f MD |
240 | #ifdef DEBUG_YIELD |
241 | #include <sched.h> | |
9b171f46 MD |
242 | #include <time.h> |
243 | #include <pthread.h> | |
bb488185 | 244 | #include <unistd.h> |
cf380c2f MD |
245 | |
246 | #define YIELD_READ (1 << 0) | |
247 | #define YIELD_WRITE (1 << 1) | |
248 | ||
bb488185 MD |
249 | /* Updates without DEBUG_FULL_MB are much slower. Account this in the delay */ |
250 | #ifdef DEBUG_FULL_MB | |
251 | /* maximum sleep delay, in us */ | |
252 | #define MAX_SLEEP 50 | |
253 | #else | |
254 | #define MAX_SLEEP 30000 | |
255 | #endif | |
256 | ||
9d335088 MD |
257 | extern unsigned int yield_active; |
258 | extern unsigned int __thread rand_yield; | |
cf380c2f MD |
259 | |
260 | static inline void debug_yield_read(void) | |
261 | { | |
262 | if (yield_active & YIELD_READ) | |
9d335088 | 263 | if (rand_r(&rand_yield) & 0x1) |
bb488185 | 264 | usleep(rand_r(&rand_yield) % MAX_SLEEP); |
cf380c2f MD |
265 | } |
266 | ||
267 | static inline void debug_yield_write(void) | |
268 | { | |
269 | if (yield_active & YIELD_WRITE) | |
9d335088 | 270 | if (rand_r(&rand_yield) & 0x1) |
bb488185 | 271 | usleep(rand_r(&rand_yield) % MAX_SLEEP); |
9d335088 MD |
272 | } |
273 | ||
274 | static inline void debug_yield_init(void) | |
275 | { | |
276 | rand_yield = time(NULL) ^ pthread_self(); | |
cf380c2f MD |
277 | } |
278 | #else | |
279 | static inline void debug_yield_read(void) | |
280 | { | |
281 | } | |
282 | ||
283 | static inline void debug_yield_write(void) | |
284 | { | |
9d335088 MD |
285 | } |
286 | ||
287 | static inline void debug_yield_init(void) | |
288 | { | |
289 | ||
cf380c2f MD |
290 | } |
291 | #endif | |
292 | ||
bb488185 | 293 | #ifdef DEBUG_FULL_MB |
3a86deba | 294 | static inline void reader_barrier() |
bb488185 | 295 | { |
b715b99e | 296 | smp_mb(); |
bb488185 MD |
297 | } |
298 | #else | |
3a86deba | 299 | static inline void reader_barrier() |
bb488185 MD |
300 | { |
301 | barrier(); | |
302 | } | |
303 | #endif | |
304 | ||
1430ee0b | 305 | /* |
4917a879 MD |
306 | * The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a |
307 | * full 8-bits, 16-bits or 32-bits bitmask for the lower order bits. | |
1430ee0b | 308 | */ |
6e32665b | 309 | #define RCU_GP_COUNT (1UL << 0) |
4917a879 | 310 | /* Use the amount of bits equal to half of the architecture long size */ |
6e32665b | 311 | #define RCU_GP_CTR_BIT (1UL << (sizeof(long) << 2)) |
1430ee0b MD |
312 | #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1) |
313 | ||
5b1da0c8 MD |
314 | /* |
315 | * Global quiescent period counter with low-order bits unused. | |
316 | * Using a int rather than a char to eliminate false register dependencies | |
317 | * causing stalls on some architectures. | |
318 | */ | |
6e8b8429 | 319 | extern long urcu_gp_ctr; |
27b012e2 | 320 | |
6e8b8429 | 321 | extern long __thread urcu_active_readers; |
27b012e2 | 322 | |
128166c9 | 323 | static inline int rcu_old_gp_ongoing(long *value) |
27b012e2 | 324 | { |
6e8b8429 | 325 | long v; |
1430ee0b MD |
326 | |
327 | if (value == NULL) | |
328 | return 0; | |
9598a481 MD |
329 | /* |
330 | * Make sure both tests below are done on the same version of *value | |
331 | * to insure consistency. | |
332 | */ | |
8895e525 | 333 | v = LOAD_SHARED(*value); |
1430ee0b | 334 | return (v & RCU_GP_CTR_NEST_MASK) && |
9598a481 | 335 | ((v ^ urcu_gp_ctr) & RCU_GP_CTR_BIT); |
27b012e2 MD |
336 | } |
337 | ||
1430ee0b | 338 | static inline void rcu_read_lock(void) |
27b012e2 | 339 | { |
6e8b8429 | 340 | long tmp; |
1430ee0b | 341 | |
1430ee0b | 342 | tmp = urcu_active_readers; |
3a9e6e9d | 343 | /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */ |
3a86deba MD |
344 | /* |
345 | * The data dependency "read urcu_gp_ctr, write urcu_active_readers", | |
b0d5e790 MD |
346 | * serializes those two memory operations. The memory barrier in the |
347 | * signal handler ensures we receive the proper memory commit barriers | |
348 | * required by _STORE_SHARED and _LOAD_SHARED whenever communication | |
349 | * with the writer is needed. | |
3a86deba | 350 | */ |
5b1da0c8 | 351 | if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) |
b0d5e790 | 352 | _STORE_SHARED(urcu_active_readers, _LOAD_SHARED(urcu_gp_ctr)); |
1430ee0b | 353 | else |
b0d5e790 | 354 | _STORE_SHARED(urcu_active_readers, tmp + RCU_GP_COUNT); |
27b012e2 MD |
355 | /* |
356 | * Increment active readers count before accessing the pointer. | |
357 | * See force_mb_all_threads(). | |
358 | */ | |
3a86deba | 359 | reader_barrier(); |
27b012e2 MD |
360 | } |
361 | ||
1430ee0b | 362 | static inline void rcu_read_unlock(void) |
27b012e2 | 363 | { |
3a86deba | 364 | reader_barrier(); |
27b012e2 MD |
365 | /* |
366 | * Finish using rcu before decrementing the pointer. | |
367 | * See force_mb_all_threads(). | |
368 | */ | |
b0d5e790 | 369 | _STORE_SHARED(urcu_active_readers, urcu_active_readers - RCU_GP_COUNT); |
27b012e2 MD |
370 | } |
371 | ||
e462817e MD |
372 | /** |
373 | * rcu_assign_pointer - assign (publicize) a pointer to a newly | |
374 | * initialized structure that will be dereferenced by RCU read-side | |
375 | * critical sections. Returns the value assigned. | |
376 | * | |
377 | * Inserts memory barriers on architectures that require them | |
378 | * (pretty much all of them other than x86), and also prevents | |
379 | * the compiler from reordering the code that initializes the | |
380 | * structure after the pointer assignment. More importantly, this | |
381 | * call documents which pointers will be dereferenced by RCU read-side | |
382 | * code. | |
383 | */ | |
384 | ||
385 | #define rcu_assign_pointer(p, v) \ | |
386 | ({ \ | |
387 | if (!__builtin_constant_p(v) || \ | |
388 | ((v) != NULL)) \ | |
389 | wmb(); \ | |
b0d5e790 | 390 | STORE_SHARED(p, v); \ |
e462817e MD |
391 | }) |
392 | ||
f4a486ac MD |
393 | #define rcu_xchg_pointer(p, v) \ |
394 | ({ \ | |
395 | if (!__builtin_constant_p(v) || \ | |
396 | ((v) != NULL)) \ | |
397 | wmb(); \ | |
398 | xchg(p, v); \ | |
399 | }) | |
400 | ||
e462817e | 401 | extern void synchronize_rcu(void); |
27b012e2 | 402 | |
f4a486ac MD |
403 | /* |
404 | * Exchanges the pointer and waits for quiescent state. | |
405 | * The pointer returned can be freed. | |
406 | */ | |
407 | #define urcu_publish_content(p, v) \ | |
408 | ({ \ | |
409 | void *oldptr; \ | |
f4a486ac MD |
410 | oldptr = rcu_xchg_pointer(p, v); \ |
411 | synchronize_rcu(); \ | |
412 | oldptr; \ | |
413 | }) | |
414 | ||
27b012e2 MD |
415 | /* |
416 | * Reader thread registration. | |
417 | */ | |
418 | extern void urcu_register_thread(void); | |
5e7e64b9 | 419 | extern void urcu_unregister_thread(void); |
27b012e2 MD |
420 | |
421 | #endif /* _URCU_H */ |