Commit | Line | Data |
---|---|---|
27b012e2 MD |
1 | #ifndef _URCU_H |
2 | #define _URCU_H | |
3 | ||
b257a10b MD |
4 | /* |
5 | * urcu.h | |
6 | * | |
7 | * Userspace RCU header | |
8 | * | |
9 | * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> | |
10 | * | |
5e7e64b9 MD |
11 | * Credits for Paul e. McKenney <paulmck@linux.vnet.ibm.com> |
12 | * for inspiration coming from the Linux kernel RCU and rcu-preempt. | |
13 | * | |
14 | * The barrier, mb, rmb, wmb, atomic_inc, smp_read_barrier_depends, ACCESS_ONCE | |
15 | * and rcu_dereference primitives come from the Linux kernel. | |
16 | * | |
b257a10b MD |
17 | * Distributed under GPLv2 |
18 | */ | |
19 | ||
27b012e2 MD |
20 | /* The "volatile" is due to gcc bugs */ |
21 | #define barrier() __asm__ __volatile__("": : :"memory") | |
22 | ||
23 | /* x86 32/64 specific */ | |
24 | #define mb() asm volatile("mfence":::"memory") | |
25 | #define rmb() asm volatile("lfence":::"memory") | |
26 | #define wmb() asm volatile("sfence" ::: "memory") | |
27 | ||
27b012e2 MD |
28 | static inline void atomic_inc(int *v) |
29 | { | |
30 | asm volatile("lock; incl %0" | |
f69f195a | 31 | : "+m" (*v)); |
27b012e2 MD |
32 | } |
33 | ||
34 | /* Nop everywhere except on alpha. */ | |
35 | #define smp_read_barrier_depends() | |
36 | ||
41718ff9 MD |
37 | /* |
38 | * Prevent the compiler from merging or refetching accesses. The compiler | |
39 | * is also forbidden from reordering successive instances of ACCESS_ONCE(), | |
40 | * but only when the compiler is aware of some particular ordering. One way | |
41 | * to make the compiler aware of ordering is to put the two invocations of | |
42 | * ACCESS_ONCE() in different C statements. | |
43 | * | |
44 | * This macro does absolutely -nothing- to prevent the CPU from reordering, | |
45 | * merging, or refetching absolutely anything at any time. Its main intended | |
46 | * use is to mediate communication between process-level code and irq/NMI | |
47 | * handlers, all running on the same CPU. | |
48 | */ | |
49 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) | |
50 | ||
51 | /** | |
52 | * rcu_dereference - fetch an RCU-protected pointer in an | |
53 | * RCU read-side critical section. This pointer may later | |
54 | * be safely dereferenced. | |
55 | * | |
56 | * Inserts memory barriers on architectures that require them | |
57 | * (currently only the Alpha), and, more importantly, documents | |
58 | * exactly which pointers are protected by RCU. | |
59 | */ | |
60 | ||
61 | #define rcu_dereference(p) ({ \ | |
62 | typeof(p) _________p1 = ACCESS_ONCE(p); \ | |
63 | smp_read_barrier_depends(); \ | |
64 | (_________p1); \ | |
65 | }) | |
66 | ||
27b012e2 MD |
67 | #define SIGURCU SIGUSR1 |
68 | ||
cf380c2f MD |
69 | #ifdef DEBUG_YIELD |
70 | #include <sched.h> | |
71 | ||
72 | #define YIELD_READ (1 << 0) | |
73 | #define YIELD_WRITE (1 << 1) | |
74 | ||
75 | extern int yield_active; | |
76 | ||
77 | static inline void debug_yield_read(void) | |
78 | { | |
79 | if (yield_active & YIELD_READ) | |
80 | sched_yield(); | |
81 | } | |
82 | ||
83 | static inline void debug_yield_write(void) | |
84 | { | |
85 | if (yield_active & YIELD_WRITE) | |
86 | sched_yield(); | |
87 | } | |
88 | #else | |
89 | static inline void debug_yield_read(void) | |
90 | { | |
91 | } | |
92 | ||
93 | static inline void debug_yield_write(void) | |
94 | { | |
95 | } | |
96 | #endif | |
97 | ||
27b012e2 MD |
98 | /* Global quiescent period parity */ |
99 | extern int urcu_qparity; | |
100 | ||
101 | extern int __thread urcu_active_readers[2]; | |
102 | ||
103 | static inline int get_urcu_qparity(void) | |
104 | { | |
105 | return urcu_qparity; | |
106 | } | |
107 | ||
108 | /* | |
c265818b | 109 | * urcu_parity should be declared on the caller's stack. |
27b012e2 | 110 | */ |
c265818b | 111 | static inline void rcu_read_lock(int *urcu_parity) |
27b012e2 | 112 | { |
cf380c2f | 113 | debug_yield_read(); |
c265818b | 114 | *urcu_parity = get_urcu_qparity(); |
cf380c2f | 115 | debug_yield_read(); |
c265818b | 116 | urcu_active_readers[*urcu_parity]++; |
cf380c2f | 117 | debug_yield_read(); |
27b012e2 MD |
118 | /* |
119 | * Increment active readers count before accessing the pointer. | |
120 | * See force_mb_all_threads(). | |
121 | */ | |
122 | barrier(); | |
cf380c2f | 123 | debug_yield_read(); |
27b012e2 MD |
124 | } |
125 | ||
c265818b | 126 | static inline void rcu_read_unlock(int *urcu_parity) |
27b012e2 | 127 | { |
cf380c2f | 128 | debug_yield_read(); |
27b012e2 | 129 | barrier(); |
cf380c2f | 130 | debug_yield_read(); |
27b012e2 MD |
131 | /* |
132 | * Finish using rcu before decrementing the pointer. | |
133 | * See force_mb_all_threads(). | |
134 | */ | |
c265818b | 135 | urcu_active_readers[*urcu_parity]--; |
cf380c2f | 136 | debug_yield_read(); |
27b012e2 MD |
137 | } |
138 | ||
cdcb92bb | 139 | extern void *urcu_publish_content(void **ptr, void *new); |
27b012e2 MD |
140 | |
141 | /* | |
142 | * Reader thread registration. | |
143 | */ | |
144 | extern void urcu_register_thread(void); | |
5e7e64b9 | 145 | extern void urcu_unregister_thread(void); |
27b012e2 MD |
146 | |
147 | #endif /* _URCU_H */ |