9 * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
11 * Credits for Paul e. McKenney <paulmck@linux.vnet.ibm.com>
12 * for inspiration coming from the Linux kernel RCU and rcu-preempt.
14 * The barrier, mb, rmb, wmb, atomic_inc, smp_read_barrier_depends, ACCESS_ONCE
15 * and rcu_dereference primitives come from the Linux kernel.
17 * Distributed under GPLv2
23 /* The "volatile" is due to gcc bugs */
24 #define barrier() __asm__ __volatile__("": : :"memory")
26 #define likely(x) __builtin_expect(!!(x), 1)
27 #define unlikely(x) __builtin_expect(!!(x), 0)
29 /* Assume SMP machine, given we don't have this information */
35 #define smp_rmb() rmb()
36 #define smp_wmb() wmb()
38 #define smp_rmc() rmc()
39 #define smp_wmc() wmc()
41 #define smp_mb() barrier()
42 #define smp_rmb() barrier()
43 #define smp_wmb() barrier()
44 #define smp_mc() barrier()
45 #define smp_rmc() barrier()
46 #define smp_wmc() barrier()
51 /* Nop everywhere except on alpha. */
52 #define smp_read_barrier_depends()
55 * Prevent the compiler from merging or refetching accesses. The compiler
56 * is also forbidden from reordering successive instances of ACCESS_ONCE(),
57 * but only when the compiler is aware of some particular ordering. One way
58 * to make the compiler aware of ordering is to put the two invocations of
59 * ACCESS_ONCE() in different C statements.
61 * This macro does absolutely -nothing- to prevent the CPU from reordering,
62 * merging, or refetching absolutely anything at any time. Its main intended
63 * use is to mediate communication between process-level code and irq/NMI
64 * handlers, all running on the same CPU.
66 #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
69 * Identify a shared load. A smp_rmc() or smp_mc() should come before the load.
71 #define _LOAD_SHARED(p) ACCESS_ONCE(p)
74 * Load a data from shared memory, doing a cache flush if required.
76 #define LOAD_SHARED(p) \
84 * Identify a shared store. A smp_wmc() or smp_mc() should follow the store.
86 #define _STORE_SHARED(x, v) \
92 * Store v into x, where x is located in shared memory. Performs the required
93 * cache flush after writing.
95 #define STORE_SHARED(x, v) \
97 _STORE_SHARED(x, v); \
102 * rcu_dereference - fetch an RCU-protected pointer in an
103 * RCU read-side critical section. This pointer may later
104 * be safely dereferenced.
106 * Inserts memory barriers on architectures that require them
107 * (currently only the Alpha), and, more importantly, documents
108 * exactly which pointers are protected by RCU.
111 #define rcu_dereference(p) ({ \
112 typeof(p) _________p1 = LOAD_SHARED(p); \
113 smp_read_barrier_depends(); \
117 #define SIGURCU SIGUSR1
120 * If a reader is really non-cooperative and refuses to commit its
121 * urcu_active_readers count to memory (there is no barrier in the reader
122 * per-se), kick it after a few loops waiting for it.
124 #define KICK_READER_LOOPS 10000
132 #define YIELD_READ (1 << 0)
133 #define YIELD_WRITE (1 << 1)
135 /* Updates without DEBUG_FULL_MB are much slower. Account this in the delay */
137 /* maximum sleep delay, in us */
140 #define MAX_SLEEP 30000
143 extern unsigned int yield_active
;
144 extern unsigned int __thread rand_yield
;
146 static inline void debug_yield_read(void)
148 if (yield_active
& YIELD_READ
)
149 if (rand_r(&rand_yield
) & 0x1)
150 usleep(rand_r(&rand_yield
) % MAX_SLEEP
);
153 static inline void debug_yield_write(void)
155 if (yield_active
& YIELD_WRITE
)
156 if (rand_r(&rand_yield
) & 0x1)
157 usleep(rand_r(&rand_yield
) % MAX_SLEEP
);
160 static inline void debug_yield_init(void)
162 rand_yield
= time(NULL
) ^ pthread_self();
165 static inline void debug_yield_read(void)
169 static inline void debug_yield_write(void)
173 static inline void debug_yield_init(void)
180 static inline void reader_barrier()
185 static inline void reader_barrier()
192 * The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a
193 * full 8-bits, 16-bits or 32-bits bitmask for the lower order bits.
195 #define RCU_GP_COUNT (1UL << 0)
196 /* Use the amount of bits equal to half of the architecture long size */
197 #define RCU_GP_CTR_BIT (1UL << (sizeof(long) << 2))
198 #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
201 * Global quiescent period counter with low-order bits unused.
202 * Using a int rather than a char to eliminate false register dependencies
203 * causing stalls on some architectures.
205 extern long urcu_gp_ctr
;
207 extern long __thread urcu_active_readers
;
209 static inline int rcu_old_gp_ongoing(long *value
)
216 * Make sure both tests below are done on the same version of *value
217 * to insure consistency.
219 v
= LOAD_SHARED(*value
);
220 return (v
& RCU_GP_CTR_NEST_MASK
) &&
221 ((v
^ urcu_gp_ctr
) & RCU_GP_CTR_BIT
);
224 static inline void rcu_read_lock(void)
228 tmp
= urcu_active_readers
;
229 /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */
231 * The data dependency "read urcu_gp_ctr, write urcu_active_readers",
232 * serializes those two memory operations. The memory barrier in the
233 * signal handler ensures we receive the proper memory commit barriers
234 * required by _STORE_SHARED and _LOAD_SHARED whenever communication
235 * with the writer is needed.
237 if (likely(!(tmp
& RCU_GP_CTR_NEST_MASK
)))
238 _STORE_SHARED(urcu_active_readers
, _LOAD_SHARED(urcu_gp_ctr
));
240 _STORE_SHARED(urcu_active_readers
, tmp
+ RCU_GP_COUNT
);
242 * Increment active readers count before accessing the pointer.
243 * See force_mb_all_threads().
248 static inline void rcu_read_unlock(void)
252 * Finish using rcu before decrementing the pointer.
253 * See force_mb_all_threads().
255 _STORE_SHARED(urcu_active_readers
, urcu_active_readers
- RCU_GP_COUNT
);
259 * rcu_assign_pointer - assign (publicize) a pointer to a newly
260 * initialized structure that will be dereferenced by RCU read-side
261 * critical sections. Returns the value assigned.
263 * Inserts memory barriers on architectures that require them
264 * (pretty much all of them other than x86), and also prevents
265 * the compiler from reordering the code that initializes the
266 * structure after the pointer assignment. More importantly, this
267 * call documents which pointers will be dereferenced by RCU read-side
271 #define rcu_assign_pointer(p, v) \
273 if (!__builtin_constant_p(v) || \
276 STORE_SHARED(p, v); \
279 #define rcu_xchg_pointer(p, v) \
281 if (!__builtin_constant_p(v) || \
287 extern void synchronize_rcu(void);
290 * Exchanges the pointer and waits for quiescent state.
291 * The pointer returned can be freed.
293 #define urcu_publish_content(p, v) \
296 oldptr = rcu_xchg_pointer(p, v); \
302 * Reader thread registration.
304 extern void urcu_register_thread(void);
305 extern void urcu_unregister_thread(void);