9 * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
11 * Credits for Paul e. McKenney <paulmck@linux.vnet.ibm.com>
12 * for inspiration coming from the Linux kernel RCU and rcu-preempt.
14 * The barrier, mb, rmb, wmb, atomic_inc, smp_read_barrier_depends, ACCESS_ONCE
15 * and rcu_dereference primitives come from the Linux kernel.
17 * Distributed under GPLv2
19 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
25 /* The "volatile" is due to gcc bugs */
26 #define barrier() __asm__ __volatile__("": : :"memory")
28 #define likely(x) __builtin_expect(!!(x), 1)
29 #define unlikely(x) __builtin_expect(!!(x), 0)
31 /* Assume SMP machine, given we don't have this information */
37 #define smp_rmb() rmb()
38 #define smp_wmb() wmb()
40 #define smp_rmc() rmc()
41 #define smp_wmc() wmc()
43 #define smp_mb() barrier()
44 #define smp_rmb() barrier()
45 #define smp_wmb() barrier()
46 #define smp_mc() barrier()
47 #define smp_rmc() barrier()
48 #define smp_wmc() barrier()
53 /* Nop everywhere except on alpha. */
54 #define smp_read_barrier_depends()
57 * Prevent the compiler from merging or refetching accesses. The compiler
58 * is also forbidden from reordering successive instances of ACCESS_ONCE(),
59 * but only when the compiler is aware of some particular ordering. One way
60 * to make the compiler aware of ordering is to put the two invocations of
61 * ACCESS_ONCE() in different C statements.
63 * This macro does absolutely -nothing- to prevent the CPU from reordering,
64 * merging, or refetching absolutely anything at any time. Its main intended
65 * use is to mediate communication between process-level code and irq/NMI
66 * handlers, all running on the same CPU.
68 #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
71 * Identify a shared load. A smp_rmc() or smp_mc() should come before the load.
73 #define _LOAD_SHARED(p) ACCESS_ONCE(p)
76 * Load a data from shared memory, doing a cache flush if required.
78 #define LOAD_SHARED(p) \
86 * Identify a shared store. A smp_wmc() or smp_mc() should follow the store.
88 #define _STORE_SHARED(x, v) \
90 ACCESS_ONCE(x) = (v); \
94 * Store v into x, where x is located in shared memory. Performs the required
95 * cache flush after writing.
97 #define STORE_SHARED(x, v) \
99 _STORE_SHARED(x, v); \
104 * rcu_dereference - fetch an RCU-protected pointer in an
105 * RCU read-side critical section. This pointer may later
106 * be safely dereferenced.
108 * Inserts memory barriers on architectures that require them
109 * (currently only the Alpha), and, more importantly, documents
110 * exactly which pointers are protected by RCU.
113 #define rcu_dereference(p) ({ \
114 typeof(p) _________p1 = LOAD_SHARED(p); \
115 smp_read_barrier_depends(); \
119 #define SIGURCU SIGUSR1
122 * If a reader is really non-cooperative and refuses to commit its
123 * urcu_active_readers count to memory (there is no barrier in the reader
124 * per-se), kick it after a few loops waiting for it.
126 #define KICK_READER_LOOPS 10000
134 #define YIELD_READ (1 << 0)
135 #define YIELD_WRITE (1 << 1)
137 /* Updates without DEBUG_FULL_MB are much slower. Account this in the delay */
139 /* maximum sleep delay, in us */
142 #define MAX_SLEEP 30000
145 extern unsigned int yield_active
;
146 extern unsigned int __thread rand_yield
;
148 static inline void debug_yield_read(void)
150 if (yield_active
& YIELD_READ
)
151 if (rand_r(&rand_yield
) & 0x1)
152 usleep(rand_r(&rand_yield
) % MAX_SLEEP
);
155 static inline void debug_yield_write(void)
157 if (yield_active
& YIELD_WRITE
)
158 if (rand_r(&rand_yield
) & 0x1)
159 usleep(rand_r(&rand_yield
) % MAX_SLEEP
);
162 static inline void debug_yield_init(void)
164 rand_yield
= time(NULL
) ^ pthread_self();
167 static inline void debug_yield_read(void)
171 static inline void debug_yield_write(void)
175 static inline void debug_yield_init(void)
182 static inline void reader_barrier()
187 static inline void reader_barrier()
194 * The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a
195 * full 8-bits, 16-bits or 32-bits bitmask for the lower order bits.
197 #define RCU_GP_COUNT (1UL << 0)
198 /* Use the amount of bits equal to half of the architecture long size */
199 #define RCU_GP_CTR_BIT (1UL << (sizeof(long) << 2))
200 #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
203 * Global quiescent period counter with low-order bits unused.
204 * Using a int rather than a char to eliminate false register dependencies
205 * causing stalls on some architectures.
207 extern long urcu_gp_ctr
;
209 extern long __thread urcu_active_readers
;
211 static inline int rcu_old_gp_ongoing(long *value
)
218 * Make sure both tests below are done on the same version of *value
219 * to insure consistency.
221 v
= LOAD_SHARED(*value
);
222 return (v
& RCU_GP_CTR_NEST_MASK
) &&
223 ((v
^ urcu_gp_ctr
) & RCU_GP_CTR_BIT
);
226 static inline void rcu_read_lock(void)
230 tmp
= urcu_active_readers
;
231 /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */
233 * The data dependency "read urcu_gp_ctr, write urcu_active_readers",
234 * serializes those two memory operations. The memory barrier in the
235 * signal handler ensures we receive the proper memory commit barriers
236 * required by _STORE_SHARED and _LOAD_SHARED whenever communication
237 * with the writer is needed.
239 if (likely(!(tmp
& RCU_GP_CTR_NEST_MASK
)))
240 _STORE_SHARED(urcu_active_readers
, _LOAD_SHARED(urcu_gp_ctr
));
242 _STORE_SHARED(urcu_active_readers
, tmp
+ RCU_GP_COUNT
);
244 * Increment active readers count before accessing the pointer.
245 * See force_mb_all_threads().
250 static inline void rcu_read_unlock(void)
254 * Finish using rcu before decrementing the pointer.
255 * See force_mb_all_threads().
257 _STORE_SHARED(urcu_active_readers
, urcu_active_readers
- RCU_GP_COUNT
);
261 * rcu_assign_pointer - assign (publicize) a pointer to a newly
262 * initialized structure that will be dereferenced by RCU read-side
263 * critical sections. Returns the value assigned.
265 * Inserts memory barriers on architectures that require them
266 * (pretty much all of them other than x86), and also prevents
267 * the compiler from reordering the code that initializes the
268 * structure after the pointer assignment. More importantly, this
269 * call documents which pointers will be dereferenced by RCU read-side
273 #define rcu_assign_pointer(p, v) \
275 if (!__builtin_constant_p(v) || \
278 STORE_SHARED(p, v); \
281 #define rcu_xchg_pointer(p, v) \
283 if (!__builtin_constant_p(v) || \
289 extern void synchronize_rcu(void);
292 * Exchanges the pointer and waits for quiescent state.
293 * The pointer returned can be freed.
295 #define urcu_publish_content(p, v) \
298 oldptr = rcu_xchg_pointer(p, v); \
304 * Reader thread registration.
306 extern void urcu_register_thread(void);
307 extern void urcu_unregister_thread(void);