Commit | Line | Data |
---|---|---|
ed73d058 PMF |
1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
17 | * | |
18 | * Copyright IBM Corporation, 2001 | |
19 | * | |
20 | * Author: Dipankar Sarma <dipankar@in.ibm.com> | |
21 | * | |
22 | * Based on the original work by Paul McKenney <paulmck@us.ibm.com> | |
23 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | |
24 | * Papers: | |
25 | * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf | |
26 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) | |
27 | * | |
28 | * For detailed explanation of Read-Copy Update mechanism see - | |
29 | * http://lse.sourceforge.net/locking/rcupdate.html | |
30 | * | |
31 | */ | |
32 | ||
33 | #ifndef __LINUX_RCUPDATE_H | |
34 | #define __LINUX_RCUPDATE_H | |
35 | ||
59b161cd PMF |
36 | //ust// #include <linux/cache.h> |
37 | //ust// #include <linux/spinlock.h> | |
38 | //ust// #include <linux/threads.h> | |
39 | //ust// #include <linux/percpu.h> | |
40 | //ust// #include <linux/cpumask.h> | |
41 | //ust// #include <linux/seqlock.h> | |
42 | //ust// #include <linux/lockdep.h> | |
43 | //ust// #include <linux/completion.h> | |
ed73d058 PMF |
44 | |
45 | /** | |
46 | * struct rcu_head - callback structure for use with RCU | |
47 | * @next: next update requests in a list | |
48 | * @func: actual update function to call after the grace period. | |
49 | */ | |
50 | struct rcu_head { | |
51 | struct rcu_head *next; | |
52 | void (*func)(struct rcu_head *head); | |
53 | }; | |
54 | ||
59b161cd PMF |
55 | //ust// #if defined(CONFIG_CLASSIC_RCU) |
56 | //ust// #include <linux/rcuclassic.h> | |
57 | //ust// #elif defined(CONFIG_TREE_RCU) | |
58 | //ust// #include <linux/rcutree.h> | |
59 | //ust// #elif defined(CONFIG_PREEMPT_RCU) | |
60 | //ust// #include <linux/rcupreempt.h> | |
61 | //ust// #else | |
62 | //ust// #error "Unknown RCU implementation specified to kernel configuration" | |
63 | //ust// #endif /* #else #if defined(CONFIG_CLASSIC_RCU) */ | |
64 | //ust// | |
65 | //ust// #define RCU_HEAD_INIT { .next = NULL, .func = NULL } | |
66 | //ust// #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT | |
67 | //ust// #define INIT_RCU_HEAD(ptr) do { \ | |
68 | //ust// (ptr)->next = NULL; (ptr)->func = NULL; \ | |
69 | //ust// } while (0) | |
70 | //ust// | |
71 | //ust// /** | |
72 | //ust// * rcu_read_lock - mark the beginning of an RCU read-side critical section. | |
73 | //ust// * | |
74 | //ust// * When synchronize_rcu() is invoked on one CPU while other CPUs | |
75 | //ust// * are within RCU read-side critical sections, then the | |
76 | //ust// * synchronize_rcu() is guaranteed to block until after all the other | |
77 | //ust// * CPUs exit their critical sections. Similarly, if call_rcu() is invoked | |
78 | //ust// * on one CPU while other CPUs are within RCU read-side critical | |
79 | //ust// * sections, invocation of the corresponding RCU callback is deferred | |
80 | //ust// * until after the all the other CPUs exit their critical sections. | |
81 | //ust// * | |
82 | //ust// * Note, however, that RCU callbacks are permitted to run concurrently | |
83 | //ust// * with RCU read-side critical sections. One way that this can happen | |
84 | //ust// * is via the following sequence of events: (1) CPU 0 enters an RCU | |
85 | //ust// * read-side critical section, (2) CPU 1 invokes call_rcu() to register | |
86 | //ust// * an RCU callback, (3) CPU 0 exits the RCU read-side critical section, | |
87 | //ust// * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU | |
88 | //ust// * callback is invoked. This is legal, because the RCU read-side critical | |
89 | //ust// * section that was running concurrently with the call_rcu() (and which | |
90 | //ust// * therefore might be referencing something that the corresponding RCU | |
91 | //ust// * callback would free up) has completed before the corresponding | |
92 | //ust// * RCU callback is invoked. | |
93 | //ust// * | |
94 | //ust// * RCU read-side critical sections may be nested. Any deferred actions | |
95 | //ust// * will be deferred until the outermost RCU read-side critical section | |
96 | //ust// * completes. | |
97 | //ust// * | |
98 | //ust// * It is illegal to block while in an RCU read-side critical section. | |
99 | //ust// */ | |
100 | //ust// #define rcu_read_lock() __rcu_read_lock() | |
101 | //ust// | |
102 | //ust// /** | |
103 | //ust// * rcu_read_unlock - marks the end of an RCU read-side critical section. | |
104 | //ust// * | |
105 | //ust// * See rcu_read_lock() for more information. | |
106 | //ust// */ | |
107 | //ust// | |
108 | //ust// /* | |
109 | //ust// * So where is rcu_write_lock()? It does not exist, as there is no | |
110 | //ust// * way for writers to lock out RCU readers. This is a feature, not | |
111 | //ust// * a bug -- this property is what provides RCU's performance benefits. | |
112 | //ust// * Of course, writers must coordinate with each other. The normal | |
113 | //ust// * spinlock primitives work well for this, but any other technique may be | |
114 | //ust// * used as well. RCU does not care how the writers keep out of each | |
115 | //ust// * others' way, as long as they do so. | |
116 | //ust// */ | |
117 | //ust// #define rcu_read_unlock() __rcu_read_unlock() | |
118 | //ust// | |
119 | //ust// /** | |
120 | //ust// * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section | |
121 | //ust// * | |
122 | //ust// * This is equivalent of rcu_read_lock(), but to be used when updates | |
123 | //ust// * are being done using call_rcu_bh(). Since call_rcu_bh() callbacks | |
124 | //ust// * consider completion of a softirq handler to be a quiescent state, | |
125 | //ust// * a process in RCU read-side critical section must be protected by | |
126 | //ust// * disabling softirqs. Read-side critical sections in interrupt context | |
127 | //ust// * can use just rcu_read_lock(). | |
128 | //ust// * | |
129 | //ust// */ | |
130 | //ust// #define rcu_read_lock_bh() __rcu_read_lock_bh() | |
131 | //ust// | |
132 | //ust// /* | |
133 | //ust// * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section | |
134 | //ust// * | |
135 | //ust// * See rcu_read_lock_bh() for more information. | |
136 | //ust// */ | |
137 | //ust// #define rcu_read_unlock_bh() __rcu_read_unlock_bh() | |
138 | //ust// | |
139 | //ust// /** | |
140 | //ust// * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section | |
141 | //ust// * | |
142 | //ust// * Should be used with either | |
143 | //ust// * - synchronize_sched() | |
144 | //ust// * or | |
145 | //ust// * - call_rcu_sched() and rcu_barrier_sched() | |
146 | //ust// * on the write-side to insure proper synchronization. | |
147 | //ust// */ | |
148 | //ust// #define rcu_read_lock_sched() preempt_disable() | |
149 | //ust// #define rcu_read_lock_sched_notrace() preempt_disable_notrace() | |
150 | //ust// | |
151 | //ust// /* | |
152 | //ust// * rcu_read_unlock_sched - marks the end of a RCU-classic critical section | |
153 | //ust// * | |
154 | //ust// * See rcu_read_lock_sched for more information. | |
155 | //ust// */ | |
156 | //ust// #define rcu_read_unlock_sched() preempt_enable() | |
157 | //ust// #define rcu_read_unlock_sched_notrace() preempt_enable_notrace() | |
158 | //ust// | |
159 | //ust// | |
160 | //ust// | |
161 | //ust// /** | |
162 | //ust// * rcu_dereference - fetch an RCU-protected pointer in an | |
163 | //ust// * RCU read-side critical section. This pointer may later | |
164 | //ust// * be safely dereferenced. | |
165 | //ust// * | |
166 | //ust// * Inserts memory barriers on architectures that require them | |
167 | //ust// * (currently only the Alpha), and, more importantly, documents | |
168 | //ust// * exactly which pointers are protected by RCU. | |
169 | //ust// */ | |
170 | //ust// | |
171 | //ust// #define rcu_dereference(p) ({ \ | |
172 | //ust// typeof(p) _________p1 = ACCESS_ONCE(p); \ | |
173 | //ust// smp_read_barrier_depends(); \ | |
174 | //ust// (_________p1); \ | |
175 | //ust// }) | |
176 | //ust// | |
177 | //ust// /** | |
178 | //ust// * rcu_assign_pointer - assign (publicize) a pointer to a newly | |
179 | //ust// * initialized structure that will be dereferenced by RCU read-side | |
180 | //ust// * critical sections. Returns the value assigned. | |
181 | //ust// * | |
182 | //ust// * Inserts memory barriers on architectures that require them | |
183 | //ust// * (pretty much all of them other than x86), and also prevents | |
184 | //ust// * the compiler from reordering the code that initializes the | |
185 | //ust// * structure after the pointer assignment. More importantly, this | |
186 | //ust// * call documents which pointers will be dereferenced by RCU read-side | |
187 | //ust// * code. | |
188 | //ust// */ | |
189 | //ust// | |
190 | //ust// #define rcu_assign_pointer(p, v) \ | |
191 | //ust// ({ \ | |
192 | //ust// if (!__builtin_constant_p(v) || \ | |
193 | //ust// ((v) != NULL)) \ | |
194 | //ust// smp_wmb(); \ | |
195 | //ust// (p) = (v); \ | |
196 | //ust// }) | |
197 | //ust// | |
198 | //ust// /* Infrastructure to implement the synchronize_() primitives. */ | |
199 | //ust// | |
200 | //ust// struct rcu_synchronize { | |
201 | //ust// struct rcu_head head; | |
202 | //ust// struct completion completion; | |
203 | //ust// }; | |
204 | //ust// | |
205 | //ust// extern void wakeme_after_rcu(struct rcu_head *head); | |
206 | //ust// | |
207 | //ust// /** | |
208 | //ust// * synchronize_sched - block until all CPUs have exited any non-preemptive | |
209 | //ust// * kernel code sequences. | |
210 | //ust// * | |
211 | //ust// * This means that all preempt_disable code sequences, including NMI and | |
212 | //ust// * hardware-interrupt handlers, in progress on entry will have completed | |
213 | //ust// * before this primitive returns. However, this does not guarantee that | |
214 | //ust// * softirq handlers will have completed, since in some kernels, these | |
215 | //ust// * handlers can run in process context, and can block. | |
216 | //ust// * | |
217 | //ust// * This primitive provides the guarantees made by the (now removed) | |
218 | //ust// * synchronize_kernel() API. In contrast, synchronize_rcu() only | |
219 | //ust// * guarantees that rcu_read_lock() sections will have completed. | |
220 | //ust// * In "classic RCU", these two guarantees happen to be one and | |
221 | //ust// * the same, but can differ in realtime RCU implementations. | |
222 | //ust// */ | |
223 | //ust// #define synchronize_sched() __synchronize_sched() | |
224 | //ust// | |
225 | //ust// /** | |
226 | //ust// * call_rcu - Queue an RCU callback for invocation after a grace period. | |
227 | //ust// * @head: structure to be used for queueing the RCU updates. | |
228 | //ust// * @func: actual update function to be invoked after the grace period | |
229 | //ust// * | |
230 | //ust// * The update function will be invoked some time after a full grace | |
231 | //ust// * period elapses, in other words after all currently executing RCU | |
232 | //ust// * read-side critical sections have completed. RCU read-side critical | |
233 | //ust// * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | |
234 | //ust// * and may be nested. | |
235 | //ust// */ | |
236 | //ust// extern void call_rcu(struct rcu_head *head, | |
237 | //ust// void (*func)(struct rcu_head *head)); | |
238 | //ust// | |
239 | //ust// /** | |
240 | //ust// * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. | |
241 | //ust// * @head: structure to be used for queueing the RCU updates. | |
242 | //ust// * @func: actual update function to be invoked after the grace period | |
243 | //ust// * | |
244 | //ust// * The update function will be invoked some time after a full grace | |
245 | //ust// * period elapses, in other words after all currently executing RCU | |
246 | //ust// * read-side critical sections have completed. call_rcu_bh() assumes | |
247 | //ust// * that the read-side critical sections end on completion of a softirq | |
248 | //ust// * handler. This means that read-side critical sections in process | |
249 | //ust// * context must not be interrupted by softirqs. This interface is to be | |
250 | //ust// * used when most of the read-side critical sections are in softirq context. | |
251 | //ust// * RCU read-side critical sections are delimited by : | |
252 | //ust// * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context. | |
253 | //ust// * OR | |
254 | //ust// * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. | |
255 | //ust// * These may be nested. | |
256 | //ust// */ | |
257 | //ust// extern void call_rcu_bh(struct rcu_head *head, | |
258 | //ust// void (*func)(struct rcu_head *head)); | |
259 | //ust// | |
260 | //ust// /* Exported common interfaces */ | |
261 | //ust// extern void synchronize_rcu(void); | |
262 | //ust// extern void rcu_barrier(void); | |
263 | //ust// extern void rcu_barrier_bh(void); | |
264 | //ust// extern void rcu_barrier_sched(void); | |
265 | //ust// | |
266 | //ust// /* Internal to kernel */ | |
267 | //ust// extern void rcu_init(void); | |
268 | //ust// extern int rcu_needs_cpu(int cpu); | |
269 | //ust// | |
ed73d058 | 270 | #endif /* __LINUX_RCUPDATE_H */ |