9 * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
11 * Credits for Paul e. McKenney <paulmck@linux.vnet.ibm.com>
12 * for inspiration coming from the Linux kernel RCU and rcu-preempt.
14 * The barrier, mb, rmb, wmb, atomic_inc, smp_read_barrier_depends, ACCESS_ONCE
15 * and rcu_dereference primitives come from the Linux kernel.
17 * Distributed under GPLv2
23 /* The "volatile" is due to gcc bugs */
24 #define barrier() __asm__ __volatile__("": : :"memory")
26 #define likely(x) __builtin_expect(!!(x), 1)
27 #define unlikely(x) __builtin_expect(!!(x), 0)
29 /* x86 32/64 specific */
30 #define mb() asm volatile("mfence":::"memory")
31 #define rmb() asm volatile("lfence":::"memory")
32 #define wmb() asm volatile("sfence" ::: "memory")
34 /* Assume SMP machine, given we don't have this information */
39 #define smp_rmb() rmb()
40 #define smp_wmb() wmb()
42 #define smp_mb() barrier()
43 #define smp_rmb() barrier()
44 #define smp_wmb() barrier()
47 static inline void atomic_inc(int *v
)
49 asm volatile("lock; incl %0"
53 #define xchg(ptr, v) \
54 ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr))))
59 #define __xg(x) ((struct __xchg_dummy *)(x))
62 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
63 * Note 2: xchg has side effect, so that attribute volatile is necessary,
64 * but generally the primitive is invalid, *ptr is output argument. --ANK
66 static inline unsigned long __xchg(unsigned long x
, volatile void *ptr
,
71 asm volatile("xchgb %b0,%1"
73 : "m" (*__xg(ptr
)), "0" (x
)
77 asm volatile("xchgw %w0,%1"
79 : "m" (*__xg(ptr
)), "0" (x
)
83 asm volatile("xchgl %k0,%1"
85 : "m" (*__xg(ptr
)), "0" (x
)
89 asm volatile("xchgq %0,%1"
91 : "m" (*__xg(ptr
)), "0" (x
)
98 /* Nop everywhere except on alpha. */
99 #define smp_read_barrier_depends()
102 * Prevent the compiler from merging or refetching accesses. The compiler
103 * is also forbidden from reordering successive instances of ACCESS_ONCE(),
104 * but only when the compiler is aware of some particular ordering. One way
105 * to make the compiler aware of ordering is to put the two invocations of
106 * ACCESS_ONCE() in different C statements.
108 * This macro does absolutely -nothing- to prevent the CPU from reordering,
109 * merging, or refetching absolutely anything at any time. Its main intended
110 * use is to mediate communication between process-level code and irq/NMI
111 * handlers, all running on the same CPU.
113 #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
116 * rcu_dereference - fetch an RCU-protected pointer in an
117 * RCU read-side critical section. This pointer may later
118 * be safely dereferenced.
120 * Inserts memory barriers on architectures that require them
121 * (currently only the Alpha), and, more importantly, documents
122 * exactly which pointers are protected by RCU.
125 #define rcu_dereference(p) ({ \
126 typeof(p) _________p1 = ACCESS_ONCE(p); \
127 smp_read_barrier_depends(); \
131 #define SIGURCU SIGUSR1
134 * If a reader is really non-cooperative and refuses to commit its
135 * urcu_active_readers count to memory (there is no barrier in the reader
136 * per-se), kick it after a few loops waiting for it.
138 #define KICK_READER_LOOPS 10000
146 #define YIELD_READ (1 << 0)
147 #define YIELD_WRITE (1 << 1)
149 /* Updates without DEBUG_FULL_MB are much slower. Account this in the delay */
151 /* maximum sleep delay, in us */
154 #define MAX_SLEEP 30000
157 extern unsigned int yield_active
;
158 extern unsigned int __thread rand_yield
;
160 static inline void debug_yield_read(void)
162 if (yield_active
& YIELD_READ
)
163 if (rand_r(&rand_yield
) & 0x1)
164 usleep(rand_r(&rand_yield
) % MAX_SLEEP
);
167 static inline void debug_yield_write(void)
169 if (yield_active
& YIELD_WRITE
)
170 if (rand_r(&rand_yield
) & 0x1)
171 usleep(rand_r(&rand_yield
) % MAX_SLEEP
);
174 static inline void debug_yield_init(void)
176 rand_yield
= time(NULL
) ^ pthread_self();
179 static inline void debug_yield_read(void)
183 static inline void debug_yield_write(void)
187 static inline void debug_yield_init(void)
194 static inline void read_barrier()
199 static inline void read_barrier()
206 * The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a
207 * full 8-bits, 16-bits or 32-bits bitmask for the lower order bits.
209 #define RCU_GP_COUNT (1UL << 0)
210 /* Use the amount of bits equal to half of the architecture long size */
211 #define RCU_GP_CTR_BIT (1UL << (sizeof(long) << 2))
212 #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
215 * Global quiescent period counter with low-order bits unused.
216 * Using a int rather than a char to eliminate false register dependencies
217 * causing stalls on some architectures.
219 extern long urcu_gp_ctr
;
221 extern long __thread urcu_active_readers
;
223 static inline int rcu_old_gp_ongoing(long *value
)
230 * Make sure both tests below are done on the same version of *value
231 * to insure consistency.
233 v
= ACCESS_ONCE(*value
);
234 return (v
& RCU_GP_CTR_NEST_MASK
) &&
235 ((v
^ urcu_gp_ctr
) & RCU_GP_CTR_BIT
);
238 static inline void rcu_read_lock(void)
242 tmp
= urcu_active_readers
;
243 /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */
244 /* The data dependency "read urcu_gp_ctr, write urcu_active_readers",
245 * serializes those two memory operations. */
246 if (likely(!(tmp
& RCU_GP_CTR_NEST_MASK
)))
247 urcu_active_readers
= ACCESS_ONCE(urcu_gp_ctr
);
249 urcu_active_readers
= tmp
+ RCU_GP_COUNT
;
251 * Increment active readers count before accessing the pointer.
252 * See force_mb_all_threads().
257 static inline void rcu_read_unlock(void)
261 * Finish using rcu before decrementing the pointer.
262 * See force_mb_all_threads().
264 urcu_active_readers
-= RCU_GP_COUNT
;
268 * rcu_assign_pointer - assign (publicize) a pointer to a newly
269 * initialized structure that will be dereferenced by RCU read-side
270 * critical sections. Returns the value assigned.
272 * Inserts memory barriers on architectures that require them
273 * (pretty much all of them other than x86), and also prevents
274 * the compiler from reordering the code that initializes the
275 * structure after the pointer assignment. More importantly, this
276 * call documents which pointers will be dereferenced by RCU read-side
280 #define rcu_assign_pointer(p, v) \
282 if (!__builtin_constant_p(v) || \
288 #define rcu_xchg_pointer(p, v) \
290 if (!__builtin_constant_p(v) || \
296 extern void synchronize_rcu(void);
299 * Exchanges the pointer and waits for quiescent state.
300 * The pointer returned can be freed.
302 #define urcu_publish_content(p, v) \
305 oldptr = rcu_xchg_pointer(p, v); \
311 * Reader thread registration.
313 extern void urcu_register_thread(void);
314 extern void urcu_unregister_thread(void);