9 * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
11 * Credits for Paul e. McKenney <paulmck@linux.vnet.ibm.com>
12 * for inspiration coming from the Linux kernel RCU and rcu-preempt.
14 * The barrier, mb, rmb, wmb, atomic_inc, smp_read_barrier_depends, ACCESS_ONCE
15 * and rcu_dereference primitives come from the Linux kernel.
17 * Distributed under GPLv2
23 /* The "volatile" is due to gcc bugs */
24 #define barrier() __asm__ __volatile__("": : :"memory")
26 /* x86 32/64 specific */
27 #define mb() asm volatile("mfence":::"memory")
28 #define rmb() asm volatile("lfence":::"memory")
29 #define wmb() asm volatile("sfence" ::: "memory")
31 static inline void atomic_inc(int *v
)
33 asm volatile("lock; incl %0"
37 #define xchg(ptr, v) \
38 ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr))))
43 #define __xg(x) ((struct __xchg_dummy *)(x))
46 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
47 * Note 2: xchg has side effect, so that attribute volatile is necessary,
48 * but generally the primitive is invalid, *ptr is output argument. --ANK
50 static inline unsigned long __xchg(unsigned long x
, volatile void *ptr
,
55 asm volatile("xchgb %b0,%1"
57 : "m" (*__xg(ptr
)), "0" (x
)
61 asm volatile("xchgw %w0,%1"
63 : "m" (*__xg(ptr
)), "0" (x
)
67 asm volatile("xchgl %0,%1"
69 : "m" (*__xg(ptr
)), "0" (x
)
76 /* Nop everywhere except on alpha. */
77 #define smp_read_barrier_depends()
80 * Prevent the compiler from merging or refetching accesses. The compiler
81 * is also forbidden from reordering successive instances of ACCESS_ONCE(),
82 * but only when the compiler is aware of some particular ordering. One way
83 * to make the compiler aware of ordering is to put the two invocations of
84 * ACCESS_ONCE() in different C statements.
86 * This macro does absolutely -nothing- to prevent the CPU from reordering,
87 * merging, or refetching absolutely anything at any time. Its main intended
88 * use is to mediate communication between process-level code and irq/NMI
89 * handlers, all running on the same CPU.
91 #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
94 * rcu_dereference - fetch an RCU-protected pointer in an
95 * RCU read-side critical section. This pointer may later
96 * be safely dereferenced.
98 * Inserts memory barriers on architectures that require them
99 * (currently only the Alpha), and, more importantly, documents
100 * exactly which pointers are protected by RCU.
103 #define rcu_dereference(p) ({ \
104 typeof(p) _________p1 = ACCESS_ONCE(p); \
105 smp_read_barrier_depends(); \
109 #define SIGURCU SIGUSR1
116 #define YIELD_READ (1 << 0)
117 #define YIELD_WRITE (1 << 1)
119 extern unsigned int yield_active
;
120 extern unsigned int __thread rand_yield
;
122 static inline void debug_yield_read(void)
124 if (yield_active
& YIELD_READ
)
125 if (rand_r(&rand_yield
) & 0x1)
129 static inline void debug_yield_write(void)
131 if (yield_active
& YIELD_WRITE
)
132 if (rand_r(&rand_yield
) & 0x1)
136 static inline void debug_yield_init(void)
138 rand_yield
= time(NULL
) ^ pthread_self();
141 static inline void debug_yield_read(void)
145 static inline void debug_yield_write(void)
149 static inline void debug_yield_init(void)
156 * Limiting the nesting level to 256 to keep instructions small in the read
159 #define RCU_GP_COUNT (1U << 0)
160 #define RCU_GP_CTR_BIT (1U << 8)
161 #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
163 /* Global quiescent period counter with low-order bits unused. */
164 extern int urcu_gp_ctr
;
166 extern int __thread urcu_active_readers
;
168 static inline int rcu_old_gp_ongoing(int *value
)
175 v
= ACCESS_ONCE(*value
);
177 return (v
& RCU_GP_CTR_NEST_MASK
) &&
178 ((v
^ ACCESS_ONCE(urcu_gp_ctr
)) & RCU_GP_CTR_BIT
);
181 static inline void rcu_read_lock(void)
186 tmp
= urcu_active_readers
;
188 if (!(tmp
& RCU_GP_CTR_NEST_MASK
))
189 urcu_active_readers
= urcu_gp_ctr
+ RCU_GP_COUNT
;
191 urcu_active_readers
= tmp
+ RCU_GP_COUNT
;
194 * Increment active readers count before accessing the pointer.
195 * See force_mb_all_threads().
201 static inline void rcu_read_unlock(void)
207 * Finish using rcu before decrementing the pointer.
208 * See force_mb_all_threads().
210 urcu_active_readers
-= RCU_GP_COUNT
;
215 * rcu_assign_pointer - assign (publicize) a pointer to a newly
216 * initialized structure that will be dereferenced by RCU read-side
217 * critical sections. Returns the value assigned.
219 * Inserts memory barriers on architectures that require them
220 * (pretty much all of them other than x86), and also prevents
221 * the compiler from reordering the code that initializes the
222 * structure after the pointer assignment. More importantly, this
223 * call documents which pointers will be dereferenced by RCU read-side
227 #define rcu_assign_pointer(p, v) \
229 if (!__builtin_constant_p(v) || \
235 #define rcu_xchg_pointer(p, v) \
237 if (!__builtin_constant_p(v) || \
243 extern void synchronize_rcu(void);
246 * Exchanges the pointer and waits for quiescent state.
247 * The pointer returned can be freed.
249 #define urcu_publish_content(p, v) \
252 debug_yield_write(); \
253 oldptr = rcu_xchg_pointer(p, v); \
259 * Reader thread registration.
261 extern void urcu_register_thread(void);
262 extern void urcu_unregister_thread(void);