Commit | Line | Data |
---|---|---|
acdb82a2 MJ |
1 | // SPDX-FileCopyrightText: 2010 Paul E. McKenney <paulmck@linux.vnet.ibm.com> |
2 | // | |
3 | // SPDX-License-Identifier: LGPL-2.1-or-later | |
4 | ||
b57aee66 | 5 | /* |
b57aee66 | 6 | * Userspace RCU library - batch memory reclamation with kernel API |
b57aee66 PM |
7 | */ |
8 | ||
5161f31e | 9 | #define _LGPL_SOURCE |
b57aee66 PM |
10 | #include <stdio.h> |
11 | #include <pthread.h> | |
12 | #include <signal.h> | |
b57aee66 | 13 | #include <stdlib.h> |
6d841bc2 | 14 | #include <stdint.h> |
b57aee66 PM |
15 | #include <string.h> |
16 | #include <errno.h> | |
17 | #include <poll.h> | |
18 | #include <sys/time.h> | |
b57aee66 | 19 | #include <unistd.h> |
c1d2c60b | 20 | #include <sched.h> |
b57aee66 | 21 | |
a47dd11c | 22 | #include "compat-getcpu.h" |
01477510 | 23 | #include <urcu/assert.h> |
4477a870 MD |
24 | #include <urcu/wfcqueue.h> |
25 | #include <urcu/call-rcu.h> | |
26 | #include <urcu/pointer.h> | |
27 | #include <urcu/list.h> | |
28 | #include <urcu/futex.h> | |
29 | #include <urcu/tls-compat.h> | |
30 | #include <urcu/ref.h> | |
4a6d7378 | 31 | #include "urcu-die.h" |
4477a870 | 32 | #include "urcu-utils.h" |
5cfe81b7 | 33 | #include "compat-smp.h" |
b57aee66 | 34 | |
8e3690db MD |
35 | #define SET_AFFINITY_CHECK_PERIOD (1U << 8) /* 256 */ |
36 | #define SET_AFFINITY_CHECK_PERIOD_MASK (SET_AFFINITY_CHECK_PERIOD - 1) | |
37 | ||
b57aee66 PM |
38 | /* Data structure that identifies a call_rcu thread. */ |
39 | ||
40 | struct call_rcu_data { | |
5161f31e | 41 | /* |
0b8ab7df MD |
42 | * We do not align head on a different cache-line than tail |
43 | * mainly because call_rcu callback-invocation threads use | |
44 | * batching ("splice") to get an entire list of callbacks, which | |
45 | * effectively empties the queue, and requires to touch the tail | |
46 | * anyway. | |
5161f31e | 47 | */ |
b9f893b6 | 48 | struct cds_wfcq_tail cbs_tail; |
0b8ab7df | 49 | struct cds_wfcq_head cbs_head; |
b57aee66 | 50 | unsigned long flags; |
6d841bc2 | 51 | int32_t futex; |
73987721 | 52 | unsigned long qlen; /* maintained for debugging. */ |
b57aee66 | 53 | pthread_t tid; |
c1d2c60b | 54 | int cpu_affinity; |
8e3690db | 55 | unsigned long gp_count; |
3c24913f | 56 | struct cds_list_head list; |
b57aee66 PM |
57 | } __attribute__((aligned(CAA_CACHE_LINE_SIZE))); |
58 | ||
b7f721d9 MD |
59 | struct call_rcu_completion { |
60 | int barrier_count; | |
61 | int32_t futex; | |
81dd9134 | 62 | struct urcu_ref ref; |
b7f721d9 MD |
63 | }; |
64 | ||
65 | struct call_rcu_completion_work { | |
66 | struct rcu_head head; | |
67 | struct call_rcu_completion *completion; | |
68 | }; | |
69 | ||
1cf55ba4 MD |
70 | enum crdf_flags { |
71 | CRDF_FLAG_JOIN_THREAD = (1 << 0), | |
72 | }; | |
73 | ||
3c24913f PM |
74 | /* |
75 | * List of all call_rcu_data structures to keep valgrind happy. | |
76 | * Protected by call_rcu_mutex. | |
77 | */ | |
78 | ||
bab44e28 | 79 | static CDS_LIST_HEAD(call_rcu_data_list); |
3c24913f | 80 | |
b57aee66 PM |
81 | /* Link a thread using call_rcu() to its call_rcu thread. */ |
82 | ||
2f661865 | 83 | static DEFINE_URCU_TLS(struct call_rcu_data *, thread_call_rcu_data); |
b57aee66 | 84 | |
e85451a1 MD |
85 | /* |
86 | * Guard call_rcu thread creation and atfork handlers. | |
87 | */ | |
b57aee66 PM |
88 | static pthread_mutex_t call_rcu_mutex = PTHREAD_MUTEX_INITIALIZER; |
89 | ||
90 | /* If a given thread does not have its own call_rcu thread, this is default. */ | |
91 | ||
92 | static struct call_rcu_data *default_call_rcu_data; | |
93 | ||
d0ec0ed2 | 94 | static struct urcu_atfork *registered_rculfhash_atfork; |
d0ec0ed2 | 95 | |
b57aee66 PM |
96 | /* |
97 | * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are | |
98 | * available, then we can have call_rcu threads assigned to individual | |
99 | * CPUs rather than only to specific threads. | |
100 | */ | |
101 | ||
a47dd11c | 102 | #if defined(HAVE_SYSCONF) && (defined(HAVE_SCHED_GETCPU) || defined(HAVE_GETCPUID)) |
b57aee66 PM |
103 | |
104 | /* | |
105 | * Pointer to array of pointers to per-CPU call_rcu_data structures | |
618b2595 MD |
106 | * and # CPUs. per_cpu_call_rcu_data is a RCU-protected pointer to an |
107 | * array of RCU-protected pointers to call_rcu_data. call_rcu acts as a | |
108 | * RCU read-side and reads per_cpu_call_rcu_data and the per-cpu pointer | |
109 | * without mutex. The call_rcu_mutex protects updates. | |
b57aee66 PM |
110 | */ |
111 | ||
112 | static struct call_rcu_data **per_cpu_call_rcu_data; | |
5cfe81b7 | 113 | static long cpus_array_len; |
b57aee66 | 114 | |
5cfe81b7 | 115 | static void cpus_array_len_reset(void) |
60af049d | 116 | { |
5cfe81b7 | 117 | cpus_array_len = 0; |
60af049d LJ |
118 | } |
119 | ||
b57aee66 PM |
120 | /* Allocate the array if it has not already been allocated. */ |
121 | ||
122 | static void alloc_cpu_call_rcu_data(void) | |
123 | { | |
124 | struct call_rcu_data **p; | |
125 | static int warned = 0; | |
126 | ||
5cfe81b7 | 127 | if (cpus_array_len != 0) |
b57aee66 | 128 | return; |
5cfe81b7 MJ |
129 | cpus_array_len = get_possible_cpus_array_len(); |
130 | if (cpus_array_len <= 0) { | |
b57aee66 PM |
131 | return; |
132 | } | |
5cfe81b7 | 133 | p = malloc(cpus_array_len * sizeof(*per_cpu_call_rcu_data)); |
b57aee66 | 134 | if (p != NULL) { |
5cfe81b7 | 135 | memset(p, '\0', cpus_array_len * sizeof(*per_cpu_call_rcu_data)); |
618b2595 | 136 | rcu_set_pointer(&per_cpu_call_rcu_data, p); |
b57aee66 PM |
137 | } else { |
138 | if (!warned) { | |
139 | fprintf(stderr, "[error] liburcu: unable to allocate per-CPU pointer array\n"); | |
140 | } | |
141 | warned = 1; | |
142 | } | |
143 | } | |
144 | ||
63b495d8 | 145 | #else /* #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) */ |
b57aee66 | 146 | |
f9437098 MD |
147 | /* |
148 | * per_cpu_call_rcu_data should be constant, but some functions below, used both | |
149 | * for cases where cpu number is available and not available, assume it it not | |
150 | * constant. | |
151 | */ | |
152 | static struct call_rcu_data **per_cpu_call_rcu_data = NULL; | |
5cfe81b7 | 153 | static const long cpus_array_len = -1; |
b57aee66 | 154 | |
5cfe81b7 | 155 | static void cpus_array_len_reset(void) |
60af049d LJ |
156 | { |
157 | } | |
158 | ||
b57aee66 PM |
159 | static void alloc_cpu_call_rcu_data(void) |
160 | { | |
161 | } | |
162 | ||
63b495d8 | 163 | #endif /* #else #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) */ |
b57aee66 PM |
164 | |
165 | /* Acquire the specified pthread mutex. */ | |
166 | ||
167 | static void call_rcu_lock(pthread_mutex_t *pmp) | |
168 | { | |
4a6d7378 MD |
169 | int ret; |
170 | ||
171 | ret = pthread_mutex_lock(pmp); | |
172 | if (ret) | |
173 | urcu_die(ret); | |
b57aee66 PM |
174 | } |
175 | ||
176 | /* Release the specified pthread mutex. */ | |
177 | ||
178 | static void call_rcu_unlock(pthread_mutex_t *pmp) | |
179 | { | |
4a6d7378 MD |
180 | int ret; |
181 | ||
182 | ret = pthread_mutex_unlock(pmp); | |
183 | if (ret) | |
184 | urcu_die(ret); | |
b57aee66 PM |
185 | } |
186 | ||
8e3690db MD |
187 | /* |
188 | * Periodically retry setting CPU affinity if we migrate. | |
189 | * Losing affinity can be caused by CPU hotunplug/hotplug, or by | |
190 | * cpuset(7). | |
191 | */ | |
2388c075 | 192 | #ifdef HAVE_SCHED_SETAFFINITY |
c1d2c60b MD |
193 | static |
194 | int set_thread_cpu_affinity(struct call_rcu_data *crdp) | |
195 | { | |
196 | cpu_set_t mask; | |
8e3690db | 197 | int ret; |
c1d2c60b MD |
198 | |
199 | if (crdp->cpu_affinity < 0) | |
200 | return 0; | |
8e3690db MD |
201 | if (++crdp->gp_count & SET_AFFINITY_CHECK_PERIOD_MASK) |
202 | return 0; | |
203 | if (urcu_sched_getcpu() == crdp->cpu_affinity) | |
204 | return 0; | |
c1d2c60b MD |
205 | |
206 | CPU_ZERO(&mask); | |
207 | CPU_SET(crdp->cpu_affinity, &mask); | |
8e3690db | 208 | ret = sched_setaffinity(0, sizeof(mask), &mask); |
0614a2e6 | 209 | |
8e3690db MD |
210 | /* |
211 | * EINVAL is fine: can be caused by hotunplugged CPUs, or by | |
212 | * cpuset(7). This is why we should always retry if we detect | |
213 | * migration. | |
214 | */ | |
215 | if (ret && errno == EINVAL) { | |
216 | ret = 0; | |
217 | errno = 0; | |
218 | } | |
219 | return ret; | |
c1d2c60b MD |
220 | } |
221 | #else | |
222 | static | |
a142df4e | 223 | int set_thread_cpu_affinity(struct call_rcu_data *crdp __attribute__((unused))) |
c1d2c60b MD |
224 | { |
225 | return 0; | |
226 | } | |
227 | #endif | |
228 | ||
03fe58b3 MD |
229 | static void call_rcu_wait(struct call_rcu_data *crdp) |
230 | { | |
231 | /* Read call_rcu list before read futex */ | |
232 | cmm_smp_mb(); | |
98d705dd MD |
233 | while (uatomic_read(&crdp->futex) == -1) { |
234 | if (!futex_async(&crdp->futex, FUTEX_WAIT, -1, NULL, NULL, 0)) { | |
235 | /* | |
236 | * Prior queued wakeups queued by unrelated code | |
237 | * using the same address can cause futex wait to | |
238 | * return 0 even through the futex value is still | |
239 | * -1 (spurious wakeups). Check the value again | |
240 | * in user-space to validate whether it really | |
241 | * differs from -1. | |
242 | */ | |
243 | continue; | |
244 | } | |
b0a841b4 | 245 | switch (errno) { |
98d705dd | 246 | case EAGAIN: |
b0a841b4 MD |
247 | /* Value already changed. */ |
248 | return; | |
249 | case EINTR: | |
250 | /* Retry if interrupted by signal. */ | |
98d705dd | 251 | break; /* Get out of switch. Check again. */ |
b0a841b4 MD |
252 | default: |
253 | /* Unexpected error. */ | |
254 | urcu_die(errno); | |
255 | } | |
256 | } | |
03fe58b3 MD |
257 | } |
258 | ||
259 | static void call_rcu_wake_up(struct call_rcu_data *crdp) | |
260 | { | |
261 | /* Write to call_rcu list before reading/writing futex */ | |
262 | cmm_smp_mb(); | |
a0b7f7ea | 263 | if (caa_unlikely(uatomic_read(&crdp->futex) == -1)) { |
03fe58b3 | 264 | uatomic_set(&crdp->futex, 0); |
b0a841b4 MD |
265 | if (futex_async(&crdp->futex, FUTEX_WAKE, 1, |
266 | NULL, NULL, 0) < 0) | |
267 | urcu_die(errno); | |
03fe58b3 MD |
268 | } |
269 | } | |
270 | ||
b7f721d9 MD |
271 | static void call_rcu_completion_wait(struct call_rcu_completion *completion) |
272 | { | |
273 | /* Read completion barrier count before read futex */ | |
274 | cmm_smp_mb(); | |
98d705dd MD |
275 | while (uatomic_read(&completion->futex) == -1) { |
276 | if (!futex_async(&completion->futex, FUTEX_WAIT, -1, NULL, NULL, 0)) { | |
277 | /* | |
278 | * Prior queued wakeups queued by unrelated code | |
279 | * using the same address can cause futex wait to | |
280 | * return 0 even through the futex value is still | |
281 | * -1 (spurious wakeups). Check the value again | |
282 | * in user-space to validate whether it really | |
283 | * differs from -1. | |
284 | */ | |
285 | continue; | |
286 | } | |
b0a841b4 | 287 | switch (errno) { |
98d705dd | 288 | case EAGAIN: |
b0a841b4 MD |
289 | /* Value already changed. */ |
290 | return; | |
291 | case EINTR: | |
292 | /* Retry if interrupted by signal. */ | |
98d705dd | 293 | break; /* Get out of switch. Check again. */ |
b0a841b4 MD |
294 | default: |
295 | /* Unexpected error. */ | |
296 | urcu_die(errno); | |
297 | } | |
298 | } | |
b7f721d9 MD |
299 | } |
300 | ||
301 | static void call_rcu_completion_wake_up(struct call_rcu_completion *completion) | |
302 | { | |
303 | /* Write to completion barrier count before reading/writing futex */ | |
304 | cmm_smp_mb(); | |
305 | if (caa_unlikely(uatomic_read(&completion->futex) == -1)) { | |
306 | uatomic_set(&completion->futex, 0); | |
b0a841b4 MD |
307 | if (futex_async(&completion->futex, FUTEX_WAKE, 1, |
308 | NULL, NULL, 0) < 0) | |
309 | urcu_die(errno); | |
b7f721d9 MD |
310 | } |
311 | } | |
312 | ||
b57aee66 PM |
313 | /* This is the code run by each call_rcu thread. */ |
314 | ||
315 | static void *call_rcu_thread(void *arg) | |
316 | { | |
317 | unsigned long cbcount; | |
5161f31e | 318 | struct call_rcu_data *crdp = (struct call_rcu_data *) arg; |
2870aa1e | 319 | int rt = !!(uatomic_read(&crdp->flags) & URCU_CALL_RCU_RT); |
b57aee66 | 320 | |
8e3690db | 321 | if (set_thread_cpu_affinity(crdp)) |
4a6d7378 | 322 | urcu_die(errno); |
c1d2c60b | 323 | |
765f3ead MD |
324 | /* |
325 | * If callbacks take a read-side lock, we need to be registered. | |
326 | */ | |
327 | rcu_register_thread(); | |
328 | ||
bd252a04 | 329 | URCU_TLS(thread_call_rcu_data) = crdp; |
bc94ca9b MD |
330 | if (!rt) { |
331 | uatomic_dec(&crdp->futex); | |
332 | /* Decrement futex before reading call_rcu list */ | |
333 | cmm_smp_mb(); | |
334 | } | |
b57aee66 | 335 | for (;;) { |
5161f31e MD |
336 | struct cds_wfcq_head cbs_tmp_head; |
337 | struct cds_wfcq_tail cbs_tmp_tail; | |
338 | struct cds_wfcq_node *cbs, *cbs_tmp_n; | |
ae25b7e2 | 339 | enum cds_wfcq_ret splice_ret; |
5161f31e | 340 | |
8e3690db MD |
341 | if (set_thread_cpu_affinity(crdp)) |
342 | urcu_die(errno); | |
343 | ||
e85451a1 MD |
344 | if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) { |
345 | /* | |
346 | * Pause requested. Become quiescent: remove | |
347 | * ourself from all global lists, and don't | |
348 | * process any callback. The callback lists may | |
349 | * still be non-empty though. | |
350 | */ | |
351 | rcu_unregister_thread(); | |
352 | cmm_smp_mb__before_uatomic_or(); | |
353 | uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSED); | |
354 | while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) != 0) | |
109105b7 | 355 | (void) poll(NULL, 0, 1); |
fc236e5e KF |
356 | uatomic_and(&crdp->flags, ~URCU_CALL_RCU_PAUSED); |
357 | cmm_smp_mb__after_uatomic_and(); | |
e85451a1 MD |
358 | rcu_register_thread(); |
359 | } | |
360 | ||
5161f31e | 361 | cds_wfcq_init(&cbs_tmp_head, &cbs_tmp_tail); |
ae25b7e2 MD |
362 | splice_ret = __cds_wfcq_splice_blocking(&cbs_tmp_head, |
363 | &cbs_tmp_tail, &crdp->cbs_head, &crdp->cbs_tail); | |
01477510 FD |
364 | urcu_posix_assert(splice_ret != CDS_WFCQ_RET_WOULDBLOCK); |
365 | urcu_posix_assert(splice_ret != CDS_WFCQ_RET_DEST_NON_EMPTY); | |
ae25b7e2 | 366 | if (splice_ret != CDS_WFCQ_RET_SRC_EMPTY) { |
b57aee66 PM |
367 | synchronize_rcu(); |
368 | cbcount = 0; | |
5161f31e MD |
369 | __cds_wfcq_for_each_blocking_safe(&cbs_tmp_head, |
370 | &cbs_tmp_tail, cbs, cbs_tmp_n) { | |
371 | struct rcu_head *rhp; | |
372 | ||
373 | rhp = caa_container_of(cbs, | |
374 | struct rcu_head, next); | |
b57aee66 PM |
375 | rhp->func(rhp); |
376 | cbcount++; | |
5161f31e | 377 | } |
b57aee66 PM |
378 | uatomic_sub(&crdp->qlen, cbcount); |
379 | } | |
bc94ca9b MD |
380 | if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOP) |
381 | break; | |
765f3ead | 382 | rcu_thread_offline(); |
bc94ca9b | 383 | if (!rt) { |
5161f31e MD |
384 | if (cds_wfcq_empty(&crdp->cbs_head, |
385 | &crdp->cbs_tail)) { | |
bc94ca9b | 386 | call_rcu_wait(crdp); |
109105b7 | 387 | (void) poll(NULL, 0, 10); |
bc94ca9b | 388 | uatomic_dec(&crdp->futex); |
c768e45e | 389 | /* |
bc94ca9b MD |
390 | * Decrement futex before reading |
391 | * call_rcu list. | |
c768e45e MD |
392 | */ |
393 | cmm_smp_mb(); | |
ccbac24d | 394 | } else { |
109105b7 | 395 | (void) poll(NULL, 0, 10); |
c768e45e | 396 | } |
bc94ca9b | 397 | } else { |
109105b7 | 398 | (void) poll(NULL, 0, 10); |
b57aee66 | 399 | } |
765f3ead | 400 | rcu_thread_online(); |
bc94ca9b MD |
401 | } |
402 | if (!rt) { | |
403 | /* | |
404 | * Read call_rcu list before write futex. | |
405 | */ | |
406 | cmm_smp_mb(); | |
407 | uatomic_set(&crdp->futex, 0); | |
b57aee66 | 408 | } |
2870aa1e | 409 | uatomic_or(&crdp->flags, URCU_CALL_RCU_STOPPED); |
765f3ead | 410 | rcu_unregister_thread(); |
7106ddf8 | 411 | return NULL; |
b57aee66 PM |
412 | } |
413 | ||
414 | /* | |
415 | * Create both a call_rcu thread and the corresponding call_rcu_data | |
3c24913f PM |
416 | * structure, linking the structure in as specified. Caller must hold |
417 | * call_rcu_mutex. | |
b57aee66 PM |
418 | */ |
419 | ||
3c24913f | 420 | static void call_rcu_data_init(struct call_rcu_data **crdpp, |
c1d2c60b MD |
421 | unsigned long flags, |
422 | int cpu_affinity) | |
b57aee66 PM |
423 | { |
424 | struct call_rcu_data *crdp; | |
4a6d7378 | 425 | int ret; |
ea3a28a3 | 426 | sigset_t newmask, oldmask; |
b57aee66 PM |
427 | |
428 | crdp = malloc(sizeof(*crdp)); | |
4a6d7378 MD |
429 | if (crdp == NULL) |
430 | urcu_die(errno); | |
b57aee66 | 431 | memset(crdp, '\0', sizeof(*crdp)); |
5161f31e | 432 | cds_wfcq_init(&crdp->cbs_head, &crdp->cbs_tail); |
b57aee66 | 433 | crdp->qlen = 0; |
263e3cf9 MD |
434 | crdp->futex = 0; |
435 | crdp->flags = flags; | |
3c24913f | 436 | cds_list_add(&crdp->list, &call_rcu_data_list); |
c1d2c60b | 437 | crdp->cpu_affinity = cpu_affinity; |
8e3690db | 438 | crdp->gp_count = 0; |
90f72b8c | 439 | rcu_set_pointer(crdpp, crdp); |
ea3a28a3 MD |
440 | |
441 | ret = sigfillset(&newmask); | |
442 | urcu_posix_assert(!ret); | |
443 | ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask); | |
444 | urcu_posix_assert(!ret); | |
445 | ||
4a6d7378 MD |
446 | ret = pthread_create(&crdp->tid, NULL, call_rcu_thread, crdp); |
447 | if (ret) | |
448 | urcu_die(ret); | |
ea3a28a3 MD |
449 | |
450 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); | |
451 | urcu_posix_assert(!ret); | |
b57aee66 PM |
452 | } |
453 | ||
454 | /* | |
455 | * Return a pointer to the call_rcu_data structure for the specified | |
456 | * CPU, returning NULL if there is none. We cannot automatically | |
457 | * created it because the platform we are running on might not define | |
63b495d8 | 458 | * urcu_sched_getcpu(). |
618b2595 MD |
459 | * |
460 | * The call to this function and use of the returned call_rcu_data | |
461 | * should be protected by RCU read-side lock. | |
b57aee66 PM |
462 | */ |
463 | ||
464 | struct call_rcu_data *get_cpu_call_rcu_data(int cpu) | |
465 | { | |
466 | static int warned = 0; | |
618b2595 | 467 | struct call_rcu_data **pcpu_crdp; |
b57aee66 | 468 | |
618b2595 MD |
469 | pcpu_crdp = rcu_dereference(per_cpu_call_rcu_data); |
470 | if (pcpu_crdp == NULL) | |
b57aee66 | 471 | return NULL; |
5cfe81b7 | 472 | if (!warned && cpus_array_len > 0 && (cpu < 0 || cpus_array_len <= cpu)) { |
b57aee66 PM |
473 | fprintf(stderr, "[error] liburcu: get CPU # out of range\n"); |
474 | warned = 1; | |
475 | } | |
5cfe81b7 | 476 | if (cpu < 0 || cpus_array_len <= cpu) |
b57aee66 | 477 | return NULL; |
618b2595 | 478 | return rcu_dereference(pcpu_crdp[cpu]); |
b57aee66 PM |
479 | } |
480 | ||
481 | /* | |
482 | * Return the tid corresponding to the call_rcu thread whose | |
483 | * call_rcu_data structure is specified. | |
484 | */ | |
485 | ||
486 | pthread_t get_call_rcu_thread(struct call_rcu_data *crdp) | |
487 | { | |
488 | return crdp->tid; | |
489 | } | |
490 | ||
491 | /* | |
492 | * Create a call_rcu_data structure (with thread) and return a pointer. | |
493 | */ | |
494 | ||
c1d2c60b MD |
495 | static struct call_rcu_data *__create_call_rcu_data(unsigned long flags, |
496 | int cpu_affinity) | |
b57aee66 PM |
497 | { |
498 | struct call_rcu_data *crdp; | |
499 | ||
c1d2c60b | 500 | call_rcu_data_init(&crdp, flags, cpu_affinity); |
b57aee66 PM |
501 | return crdp; |
502 | } | |
503 | ||
c1d2c60b MD |
504 | struct call_rcu_data *create_call_rcu_data(unsigned long flags, |
505 | int cpu_affinity) | |
3c24913f PM |
506 | { |
507 | struct call_rcu_data *crdp; | |
508 | ||
509 | call_rcu_lock(&call_rcu_mutex); | |
c1d2c60b | 510 | crdp = __create_call_rcu_data(flags, cpu_affinity); |
3c24913f PM |
511 | call_rcu_unlock(&call_rcu_mutex); |
512 | return crdp; | |
513 | } | |
514 | ||
b57aee66 PM |
515 | /* |
516 | * Set the specified CPU to use the specified call_rcu_data structure. | |
7106ddf8 PM |
517 | * |
518 | * Use NULL to remove a CPU's call_rcu_data structure, but it is | |
519 | * the caller's responsibility to dispose of the removed structure. | |
520 | * Use get_cpu_call_rcu_data() to obtain a pointer to the old structure | |
521 | * (prior to NULLing it out, of course). | |
f9da0936 MD |
522 | * |
523 | * The caller must wait for a grace-period to pass between return from | |
524 | * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the | |
525 | * previous call rcu data as argument. | |
b57aee66 PM |
526 | */ |
527 | ||
528 | int set_cpu_call_rcu_data(int cpu, struct call_rcu_data *crdp) | |
529 | { | |
dcfc8165 | 530 | static int warned = 0; |
b57aee66 PM |
531 | |
532 | call_rcu_lock(&call_rcu_mutex); | |
f3776786 | 533 | alloc_cpu_call_rcu_data(); |
5cfe81b7 | 534 | if (cpu < 0 || cpus_array_len <= cpu) { |
b57aee66 PM |
535 | if (!warned) { |
536 | fprintf(stderr, "[error] liburcu: set CPU # out of range\n"); | |
537 | warned = 1; | |
538 | } | |
539 | call_rcu_unlock(&call_rcu_mutex); | |
540 | errno = EINVAL; | |
541 | return -EINVAL; | |
542 | } | |
53a55535 | 543 | |
b57aee66 | 544 | if (per_cpu_call_rcu_data == NULL) { |
3670fef2 | 545 | call_rcu_unlock(&call_rcu_mutex); |
b57aee66 PM |
546 | errno = ENOMEM; |
547 | return -ENOMEM; | |
548 | } | |
53a55535 LJ |
549 | |
550 | if (per_cpu_call_rcu_data[cpu] != NULL && crdp != NULL) { | |
551 | call_rcu_unlock(&call_rcu_mutex); | |
552 | errno = EEXIST; | |
553 | return -EEXIST; | |
554 | } | |
555 | ||
618b2595 | 556 | rcu_set_pointer(&per_cpu_call_rcu_data[cpu], crdp); |
3670fef2 | 557 | call_rcu_unlock(&call_rcu_mutex); |
b57aee66 PM |
558 | return 0; |
559 | } | |
560 | ||
561 | /* | |
562 | * Return a pointer to the default call_rcu_data structure, creating | |
90f72b8c MD |
563 | * one if need be. |
564 | * | |
565 | * The call to this function with intent to use the returned | |
566 | * call_rcu_data should be protected by RCU read-side lock. | |
b57aee66 PM |
567 | */ |
568 | ||
569 | struct call_rcu_data *get_default_call_rcu_data(void) | |
570 | { | |
90f72b8c MD |
571 | struct call_rcu_data *crdp; |
572 | ||
573 | crdp = rcu_dereference(default_call_rcu_data); | |
574 | if (crdp != NULL) | |
575 | return crdp; | |
576 | ||
b57aee66 | 577 | call_rcu_lock(&call_rcu_mutex); |
90f72b8c MD |
578 | if (default_call_rcu_data == NULL) |
579 | call_rcu_data_init(&default_call_rcu_data, 0, -1); | |
580 | crdp = default_call_rcu_data; | |
b57aee66 | 581 | call_rcu_unlock(&call_rcu_mutex); |
90f72b8c MD |
582 | |
583 | return crdp; | |
b57aee66 PM |
584 | } |
585 | ||
586 | /* | |
587 | * Return the call_rcu_data structure that applies to the currently | |
588 | * running thread. Any call_rcu_data structure assigned specifically | |
589 | * to this thread has first priority, followed by any call_rcu_data | |
590 | * structure assigned to the CPU on which the thread is running, | |
591 | * followed by the default call_rcu_data structure. If there is not | |
592 | * yet a default call_rcu_data structure, one will be created. | |
618b2595 MD |
593 | * |
594 | * Calls to this function and use of the returned call_rcu_data should | |
595 | * be protected by RCU read-side lock. | |
b57aee66 PM |
596 | */ |
597 | struct call_rcu_data *get_call_rcu_data(void) | |
598 | { | |
9744f3bb | 599 | struct call_rcu_data *crd; |
b57aee66 | 600 | |
bd252a04 MD |
601 | if (URCU_TLS(thread_call_rcu_data) != NULL) |
602 | return URCU_TLS(thread_call_rcu_data); | |
9744f3bb | 603 | |
5cfe81b7 | 604 | if (cpus_array_len > 0) { |
63b495d8 | 605 | crd = get_cpu_call_rcu_data(urcu_sched_getcpu()); |
9744f3bb LJ |
606 | if (crd) |
607 | return crd; | |
b57aee66 | 608 | } |
9744f3bb | 609 | |
b57aee66 PM |
610 | return get_default_call_rcu_data(); |
611 | } | |
612 | ||
613 | /* | |
614 | * Return a pointer to this task's call_rcu_data if there is one. | |
615 | */ | |
616 | ||
617 | struct call_rcu_data *get_thread_call_rcu_data(void) | |
618 | { | |
bd252a04 | 619 | return URCU_TLS(thread_call_rcu_data); |
b57aee66 PM |
620 | } |
621 | ||
622 | /* | |
623 | * Set this task's call_rcu_data structure as specified, regardless | |
624 | * of whether or not this task already had one. (This allows switching | |
625 | * to and from real-time call_rcu threads, for example.) | |
7106ddf8 PM |
626 | * |
627 | * Use NULL to remove a thread's call_rcu_data structure, but it is | |
628 | * the caller's responsibility to dispose of the removed structure. | |
629 | * Use get_thread_call_rcu_data() to obtain a pointer to the old structure | |
630 | * (prior to NULLing it out, of course). | |
b57aee66 PM |
631 | */ |
632 | ||
633 | void set_thread_call_rcu_data(struct call_rcu_data *crdp) | |
634 | { | |
bd252a04 | 635 | URCU_TLS(thread_call_rcu_data) = crdp; |
b57aee66 PM |
636 | } |
637 | ||
638 | /* | |
639 | * Create a separate call_rcu thread for each CPU. This does not | |
640 | * replace a pre-existing call_rcu thread -- use the set_cpu_call_rcu_data() | |
0938c541 MD |
641 | * function if you want that behavior. Should be paired with |
642 | * free_all_cpu_call_rcu_data() to teardown these call_rcu worker | |
643 | * threads. | |
b57aee66 PM |
644 | */ |
645 | ||
646 | int create_all_cpu_call_rcu_data(unsigned long flags) | |
647 | { | |
648 | int i; | |
649 | struct call_rcu_data *crdp; | |
650 | int ret; | |
651 | ||
652 | call_rcu_lock(&call_rcu_mutex); | |
653 | alloc_cpu_call_rcu_data(); | |
654 | call_rcu_unlock(&call_rcu_mutex); | |
5cfe81b7 | 655 | if (cpus_array_len <= 0) { |
b57aee66 PM |
656 | errno = EINVAL; |
657 | return -EINVAL; | |
658 | } | |
659 | if (per_cpu_call_rcu_data == NULL) { | |
660 | errno = ENOMEM; | |
661 | return -ENOMEM; | |
662 | } | |
5cfe81b7 | 663 | for (i = 0; i < cpus_array_len; i++) { |
b57aee66 PM |
664 | call_rcu_lock(&call_rcu_mutex); |
665 | if (get_cpu_call_rcu_data(i)) { | |
666 | call_rcu_unlock(&call_rcu_mutex); | |
667 | continue; | |
668 | } | |
c1d2c60b | 669 | crdp = __create_call_rcu_data(flags, i); |
b57aee66 PM |
670 | if (crdp == NULL) { |
671 | call_rcu_unlock(&call_rcu_mutex); | |
672 | errno = ENOMEM; | |
673 | return -ENOMEM; | |
674 | } | |
675 | call_rcu_unlock(&call_rcu_mutex); | |
676 | if ((ret = set_cpu_call_rcu_data(i, crdp)) != 0) { | |
356c8794 LJ |
677 | call_rcu_data_free(crdp); |
678 | ||
679 | /* it has been created by other thread */ | |
680 | if (ret == -EEXIST) | |
681 | continue; | |
682 | ||
683 | return ret; | |
b57aee66 PM |
684 | } |
685 | } | |
686 | return 0; | |
687 | } | |
688 | ||
7106ddf8 PM |
689 | /* |
690 | * Wake up the call_rcu thread corresponding to the specified | |
691 | * call_rcu_data structure. | |
692 | */ | |
693 | static void wake_call_rcu_thread(struct call_rcu_data *crdp) | |
694 | { | |
263e3cf9 MD |
695 | if (!(_CMM_LOAD_SHARED(crdp->flags) & URCU_CALL_RCU_RT)) |
696 | call_rcu_wake_up(crdp); | |
7106ddf8 PM |
697 | } |
698 | ||
b7f721d9 MD |
699 | static void _call_rcu(struct rcu_head *head, |
700 | void (*func)(struct rcu_head *head), | |
701 | struct call_rcu_data *crdp) | |
702 | { | |
703 | cds_wfcq_node_init(&head->next); | |
704 | head->func = func; | |
705 | cds_wfcq_enqueue(&crdp->cbs_head, &crdp->cbs_tail, &head->next); | |
706 | uatomic_inc(&crdp->qlen); | |
707 | wake_call_rcu_thread(crdp); | |
708 | } | |
709 | ||
b57aee66 PM |
710 | /* |
711 | * Schedule a function to be invoked after a following grace period. | |
712 | * This is the only function that must be called -- the others are | |
713 | * only present to allow applications to tune their use of RCU for | |
714 | * maximum performance. | |
715 | * | |
716 | * Note that unless a call_rcu thread has not already been created, | |
717 | * the first invocation of call_rcu() will create one. So, if you | |
718 | * need the first invocation of call_rcu() to be fast, make sure | |
719 | * to create a call_rcu thread first. One way to accomplish this is | |
720 | * "get_call_rcu_data();", and another is create_all_cpu_call_rcu_data(). | |
618b2595 MD |
721 | * |
722 | * call_rcu must be called by registered RCU read-side threads. | |
b57aee66 | 723 | */ |
b57aee66 PM |
724 | void call_rcu(struct rcu_head *head, |
725 | void (*func)(struct rcu_head *head)) | |
726 | { | |
727 | struct call_rcu_data *crdp; | |
728 | ||
618b2595 | 729 | /* Holding rcu read-side lock across use of per-cpu crdp */ |
ffb4aaa7 | 730 | _rcu_read_lock(); |
b57aee66 | 731 | crdp = get_call_rcu_data(); |
b7f721d9 | 732 | _call_rcu(head, func, crdp); |
ffb4aaa7 | 733 | _rcu_read_unlock(); |
7106ddf8 PM |
734 | } |
735 | ||
736 | /* | |
737 | * Free up the specified call_rcu_data structure, terminating the | |
738 | * associated call_rcu thread. The caller must have previously | |
739 | * removed the call_rcu_data structure from per-thread or per-CPU | |
740 | * usage. For example, set_cpu_call_rcu_data(cpu, NULL) for per-CPU | |
741 | * call_rcu_data structures or set_thread_call_rcu_data(NULL) for | |
742 | * per-thread call_rcu_data structures. | |
743 | * | |
744 | * We silently refuse to free up the default call_rcu_data structure | |
745 | * because that is where we put any leftover callbacks. Note that | |
746 | * the possibility of self-spawning callbacks makes it impossible | |
747 | * to execute all the callbacks in finite time without putting any | |
748 | * newly spawned callbacks somewhere else. The "somewhere else" of | |
749 | * last resort is the default call_rcu_data structure. | |
750 | * | |
751 | * We also silently refuse to free NULL pointers. This simplifies | |
752 | * the calling code. | |
f9da0936 MD |
753 | * |
754 | * The caller must wait for a grace-period to pass between return from | |
755 | * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the | |
756 | * previous call rcu data as argument. | |
03e5118f MD |
757 | * |
758 | * Note: introducing __cds_wfcq_splice_blocking() in this function fixed | |
759 | * a list corruption bug in the 0.7.x series. The equivalent fix | |
760 | * appeared in 0.6.8 for the stable-0.6 branch. | |
7106ddf8 | 761 | */ |
1cf55ba4 MD |
762 | static |
763 | void _call_rcu_data_free(struct call_rcu_data *crdp, unsigned int flags) | |
7106ddf8 | 764 | { |
7106ddf8 PM |
765 | if (crdp == NULL || crdp == default_call_rcu_data) { |
766 | return; | |
767 | } | |
2870aa1e PB |
768 | if ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOPPED) == 0) { |
769 | uatomic_or(&crdp->flags, URCU_CALL_RCU_STOP); | |
7106ddf8 | 770 | wake_call_rcu_thread(crdp); |
2870aa1e | 771 | while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOPPED) == 0) |
109105b7 | 772 | (void) poll(NULL, 0, 1); |
7106ddf8 | 773 | } |
51b98c3c | 774 | call_rcu_lock(&call_rcu_mutex); |
5161f31e | 775 | if (!cds_wfcq_empty(&crdp->cbs_head, &crdp->cbs_tail)) { |
51b98c3c PM |
776 | call_rcu_unlock(&call_rcu_mutex); |
777 | /* Create default call rcu data if need be. */ | |
778 | /* CBs queued here will be handed to the default list. */ | |
698d0778 | 779 | (void) get_default_call_rcu_data(); |
51b98c3c | 780 | call_rcu_lock(&call_rcu_mutex); |
5161f31e MD |
781 | __cds_wfcq_splice_blocking(&default_call_rcu_data->cbs_head, |
782 | &default_call_rcu_data->cbs_tail, | |
783 | &crdp->cbs_head, &crdp->cbs_tail); | |
7106ddf8 PM |
784 | uatomic_add(&default_call_rcu_data->qlen, |
785 | uatomic_read(&crdp->qlen)); | |
1e92aa15 | 786 | wake_call_rcu_thread(default_call_rcu_data); |
7106ddf8 | 787 | } |
59dc9e9d LJ |
788 | |
789 | cds_list_del(&crdp->list); | |
b75dffe6 LJ |
790 | call_rcu_unlock(&call_rcu_mutex); |
791 | ||
1cf55ba4 MD |
792 | if (flags & CRDF_FLAG_JOIN_THREAD) { |
793 | int ret; | |
794 | ||
795 | ret = pthread_join(get_call_rcu_thread(crdp), NULL); | |
796 | if (ret) | |
797 | urcu_die(ret); | |
798 | } | |
59dc9e9d | 799 | free(crdp); |
7106ddf8 PM |
800 | } |
801 | ||
1cf55ba4 MD |
802 | void call_rcu_data_free(struct call_rcu_data *crdp) |
803 | { | |
804 | _call_rcu_data_free(crdp, CRDF_FLAG_JOIN_THREAD); | |
805 | } | |
806 | ||
7106ddf8 PM |
807 | /* |
808 | * Clean up all the per-CPU call_rcu threads. | |
809 | */ | |
810 | void free_all_cpu_call_rcu_data(void) | |
811 | { | |
812 | int cpu; | |
618b2595 MD |
813 | struct call_rcu_data **crdp; |
814 | static int warned = 0; | |
7106ddf8 | 815 | |
5cfe81b7 | 816 | if (cpus_array_len <= 0) |
7106ddf8 | 817 | return; |
618b2595 | 818 | |
5cfe81b7 | 819 | crdp = malloc(sizeof(*crdp) * cpus_array_len); |
618b2595 MD |
820 | if (!crdp) { |
821 | if (!warned) { | |
822 | fprintf(stderr, "[error] liburcu: unable to allocate per-CPU pointer array\n"); | |
823 | } | |
824 | warned = 1; | |
d31150b4 | 825 | return; |
618b2595 MD |
826 | } |
827 | ||
5cfe81b7 | 828 | for (cpu = 0; cpu < cpus_array_len; cpu++) { |
618b2595 MD |
829 | crdp[cpu] = get_cpu_call_rcu_data(cpu); |
830 | if (crdp[cpu] == NULL) | |
7106ddf8 PM |
831 | continue; |
832 | set_cpu_call_rcu_data(cpu, NULL); | |
7106ddf8 | 833 | } |
618b2595 MD |
834 | /* |
835 | * Wait for call_rcu sites acting as RCU readers of the | |
836 | * call_rcu_data to become quiescent. | |
837 | */ | |
838 | synchronize_rcu(); | |
5cfe81b7 | 839 | for (cpu = 0; cpu < cpus_array_len; cpu++) { |
618b2595 MD |
840 | if (crdp[cpu] == NULL) |
841 | continue; | |
842 | call_rcu_data_free(crdp[cpu]); | |
843 | } | |
844 | free(crdp); | |
7106ddf8 PM |
845 | } |
846 | ||
81dd9134 KF |
847 | static |
848 | void free_completion(struct urcu_ref *ref) | |
849 | { | |
850 | struct call_rcu_completion *completion; | |
851 | ||
852 | completion = caa_container_of(ref, struct call_rcu_completion, ref); | |
853 | free(completion); | |
854 | } | |
855 | ||
b7f721d9 MD |
856 | static |
857 | void _rcu_barrier_complete(struct rcu_head *head) | |
858 | { | |
859 | struct call_rcu_completion_work *work; | |
860 | struct call_rcu_completion *completion; | |
861 | ||
862 | work = caa_container_of(head, struct call_rcu_completion_work, head); | |
863 | completion = work->completion; | |
81dd9134 KF |
864 | if (!uatomic_sub_return(&completion->barrier_count, 1)) |
865 | call_rcu_completion_wake_up(completion); | |
866 | urcu_ref_put(&completion->ref, free_completion); | |
b7f721d9 MD |
867 | free(work); |
868 | } | |
869 | ||
870 | /* | |
871 | * Wait for all in-flight call_rcu callbacks to complete execution. | |
872 | */ | |
873 | void rcu_barrier(void) | |
874 | { | |
875 | struct call_rcu_data *crdp; | |
81dd9134 | 876 | struct call_rcu_completion *completion; |
63f0fc6d | 877 | int count = 0; |
b7f721d9 MD |
878 | int was_online; |
879 | ||
880 | /* Put in offline state in QSBR. */ | |
ffb4aaa7 | 881 | was_online = _rcu_read_ongoing(); |
b7f721d9 MD |
882 | if (was_online) |
883 | rcu_thread_offline(); | |
884 | /* | |
885 | * Calling a rcu_barrier() within a RCU read-side critical | |
886 | * section is an error. | |
887 | */ | |
ffb4aaa7 | 888 | if (_rcu_read_ongoing()) { |
b7f721d9 MD |
889 | static int warned = 0; |
890 | ||
891 | if (!warned) { | |
892 | fprintf(stderr, "[error] liburcu: rcu_barrier() called from within RCU read-side critical section.\n"); | |
893 | } | |
894 | warned = 1; | |
895 | goto online; | |
896 | } | |
897 | ||
81dd9134 KF |
898 | completion = calloc(sizeof(*completion), 1); |
899 | if (!completion) | |
900 | urcu_die(errno); | |
901 | ||
b7f721d9 MD |
902 | call_rcu_lock(&call_rcu_mutex); |
903 | cds_list_for_each_entry(crdp, &call_rcu_data_list, list) | |
904 | count++; | |
905 | ||
81dd9134 KF |
906 | /* Referenced by rcu_barrier() and each call_rcu thread. */ |
907 | urcu_ref_set(&completion->ref, count + 1); | |
908 | completion->barrier_count = count; | |
b7f721d9 MD |
909 | |
910 | cds_list_for_each_entry(crdp, &call_rcu_data_list, list) { | |
911 | struct call_rcu_completion_work *work; | |
912 | ||
913 | work = calloc(sizeof(*work), 1); | |
63f0fc6d MD |
914 | if (!work) |
915 | urcu_die(errno); | |
81dd9134 | 916 | work->completion = completion; |
b7f721d9 | 917 | _call_rcu(&work->head, _rcu_barrier_complete, crdp); |
b7f721d9 MD |
918 | } |
919 | call_rcu_unlock(&call_rcu_mutex); | |
920 | ||
b7f721d9 MD |
921 | /* Wait for them */ |
922 | for (;;) { | |
81dd9134 | 923 | uatomic_dec(&completion->futex); |
b7f721d9 MD |
924 | /* Decrement futex before reading barrier_count */ |
925 | cmm_smp_mb(); | |
81dd9134 | 926 | if (!uatomic_read(&completion->barrier_count)) |
b7f721d9 | 927 | break; |
81dd9134 | 928 | call_rcu_completion_wait(completion); |
b7f721d9 | 929 | } |
81dd9134 KF |
930 | |
931 | urcu_ref_put(&completion->ref, free_completion); | |
932 | ||
b7f721d9 MD |
933 | online: |
934 | if (was_online) | |
935 | rcu_thread_online(); | |
936 | } | |
937 | ||
81ad2e19 PM |
938 | /* |
939 | * Acquire the call_rcu_mutex in order to ensure that the child sees | |
e85451a1 MD |
940 | * all of the call_rcu() data structures in a consistent state. Ensure |
941 | * that all call_rcu threads are in a quiescent state across fork. | |
81ad2e19 PM |
942 | * Suitable for pthread_atfork() and friends. |
943 | */ | |
944 | void call_rcu_before_fork(void) | |
945 | { | |
e85451a1 | 946 | struct call_rcu_data *crdp; |
d0ec0ed2 | 947 | struct urcu_atfork *atfork; |
e85451a1 | 948 | |
81ad2e19 | 949 | call_rcu_lock(&call_rcu_mutex); |
e85451a1 | 950 | |
d0ec0ed2 MD |
951 | atfork = registered_rculfhash_atfork; |
952 | if (atfork) | |
953 | atfork->before_fork(atfork->priv); | |
954 | ||
e85451a1 MD |
955 | cds_list_for_each_entry(crdp, &call_rcu_data_list, list) { |
956 | uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSE); | |
957 | cmm_smp_mb__after_uatomic_or(); | |
958 | wake_call_rcu_thread(crdp); | |
959 | } | |
960 | cds_list_for_each_entry(crdp, &call_rcu_data_list, list) { | |
961 | while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSED) == 0) | |
109105b7 | 962 | (void) poll(NULL, 0, 1); |
e85451a1 | 963 | } |
81ad2e19 PM |
964 | } |
965 | ||
966 | /* | |
967 | * Clean up call_rcu data structures in the parent of a successful fork() | |
968 | * that is not followed by exec() in the child. Suitable for | |
969 | * pthread_atfork() and friends. | |
970 | */ | |
971 | void call_rcu_after_fork_parent(void) | |
972 | { | |
e85451a1 | 973 | struct call_rcu_data *crdp; |
d0ec0ed2 | 974 | struct urcu_atfork *atfork; |
e85451a1 MD |
975 | |
976 | cds_list_for_each_entry(crdp, &call_rcu_data_list, list) | |
977 | uatomic_and(&crdp->flags, ~URCU_CALL_RCU_PAUSE); | |
fc236e5e KF |
978 | cds_list_for_each_entry(crdp, &call_rcu_data_list, list) { |
979 | while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSED) != 0) | |
109105b7 | 980 | (void) poll(NULL, 0, 1); |
fc236e5e | 981 | } |
d0ec0ed2 MD |
982 | atfork = registered_rculfhash_atfork; |
983 | if (atfork) | |
984 | atfork->after_fork_parent(atfork->priv); | |
81ad2e19 PM |
985 | call_rcu_unlock(&call_rcu_mutex); |
986 | } | |
987 | ||
7106ddf8 PM |
988 | /* |
989 | * Clean up call_rcu data structures in the child of a successful fork() | |
81ad2e19 PM |
990 | * that is not followed by exec(). Suitable for pthread_atfork() and |
991 | * friends. | |
7106ddf8 PM |
992 | */ |
993 | void call_rcu_after_fork_child(void) | |
994 | { | |
077ff173 | 995 | struct call_rcu_data *crdp, *next; |
d0ec0ed2 | 996 | struct urcu_atfork *atfork; |
7106ddf8 | 997 | |
81ad2e19 PM |
998 | /* Release the mutex. */ |
999 | call_rcu_unlock(&call_rcu_mutex); | |
1000 | ||
d0ec0ed2 MD |
1001 | atfork = registered_rculfhash_atfork; |
1002 | if (atfork) | |
1003 | atfork->after_fork_child(atfork->priv); | |
1004 | ||
ad1b9909 LJ |
1005 | /* Do nothing when call_rcu() has not been used */ |
1006 | if (cds_list_empty(&call_rcu_data_list)) | |
1007 | return; | |
1008 | ||
7106ddf8 PM |
1009 | /* |
1010 | * Allocate a new default call_rcu_data structure in order | |
1011 | * to get a working call_rcu thread to go with it. | |
1012 | */ | |
1013 | default_call_rcu_data = NULL; | |
1014 | (void)get_default_call_rcu_data(); | |
1015 | ||
60af049d | 1016 | /* Cleanup call_rcu_data pointers before use */ |
5cfe81b7 | 1017 | cpus_array_len_reset(); |
60af049d | 1018 | free(per_cpu_call_rcu_data); |
618b2595 | 1019 | rcu_set_pointer(&per_cpu_call_rcu_data, NULL); |
bd252a04 | 1020 | URCU_TLS(thread_call_rcu_data) = NULL; |
60af049d | 1021 | |
e85451a1 MD |
1022 | /* |
1023 | * Dispose of all of the rest of the call_rcu_data structures. | |
1024 | * Leftover call_rcu callbacks will be merged into the new | |
1025 | * default call_rcu thread queue. | |
1026 | */ | |
077ff173 | 1027 | cds_list_for_each_entry_safe(crdp, next, &call_rcu_data_list, list) { |
7106ddf8 | 1028 | if (crdp == default_call_rcu_data) |
077ff173 | 1029 | continue; |
2870aa1e | 1030 | uatomic_set(&crdp->flags, URCU_CALL_RCU_STOPPED); |
1cf55ba4 MD |
1031 | /* |
1032 | * Do not join the thread because it does not exist in | |
1033 | * the child. | |
1034 | */ | |
1035 | _call_rcu_data_free(crdp, 0); | |
b57aee66 PM |
1036 | } |
1037 | } | |
d0ec0ed2 MD |
1038 | |
1039 | void urcu_register_rculfhash_atfork(struct urcu_atfork *atfork) | |
1040 | { | |
b047e7a7 MD |
1041 | if (CMM_LOAD_SHARED(registered_rculfhash_atfork)) |
1042 | return; | |
d0ec0ed2 | 1043 | call_rcu_lock(&call_rcu_mutex); |
b047e7a7 MD |
1044 | if (!registered_rculfhash_atfork) |
1045 | registered_rculfhash_atfork = atfork; | |
d0ec0ed2 MD |
1046 | call_rcu_unlock(&call_rcu_mutex); |
1047 | } | |
1048 | ||
b047e7a7 MD |
1049 | /* |
1050 | * This unregistration function is deprecated, meant only for internal | |
1051 | * use by rculfhash. | |
1052 | */ | |
6fa8b4f8 | 1053 | __attribute__((__noreturn__)) |
70469b43 | 1054 | void urcu_unregister_rculfhash_atfork(struct urcu_atfork *atfork __attribute__((unused))) |
d0ec0ed2 | 1055 | { |
b047e7a7 | 1056 | urcu_die(EPERM); |
d0ec0ed2 | 1057 | } |
90f72b8c MD |
1058 | |
1059 | /* | |
1060 | * Teardown the default call_rcu worker thread if there are no queued | |
1061 | * callbacks on process exit. This prevents leaking memory. | |
1062 | * | |
1063 | * Here is how an application can ensure graceful teardown of this | |
1064 | * worker thread: | |
1065 | * | |
1066 | * - An application queuing call_rcu callbacks should invoke | |
1067 | * rcu_barrier() before it exits. | |
1068 | * - When chaining call_rcu callbacks, the number of calls to | |
1069 | * rcu_barrier() on application exit must match at least the maximum | |
1070 | * number of chained callbacks. | |
1071 | * - If an application chains callbacks endlessly, it would have to be | |
1072 | * modified to stop chaining callbacks when it detects an application | |
1073 | * exit (e.g. with a flag), and wait for quiescence with rcu_barrier() | |
1074 | * after setting that flag. | |
1075 | * - The statements above apply to a library which queues call_rcu | |
1076 | * callbacks, only it needs to invoke rcu_barrier in its library | |
1077 | * destructor. | |
1078 | * | |
1079 | * Note that this function does not presume it is being called when the | |
1080 | * application is single-threaded even though this is invoked from a | |
1081 | * destructor: this function synchronizes against concurrent calls to | |
1082 | * get_default_call_rcu_data(). | |
1083 | */ | |
1084 | static void urcu_call_rcu_exit(void) | |
1085 | { | |
1086 | struct call_rcu_data *crdp; | |
1087 | bool teardown = true; | |
1088 | ||
1089 | if (default_call_rcu_data == NULL) | |
1090 | return; | |
1091 | call_rcu_lock(&call_rcu_mutex); | |
1092 | /* | |
1093 | * If the application leaves callbacks in the default call_rcu | |
1094 | * worker queue, keep the default worker in place. | |
1095 | */ | |
1096 | crdp = default_call_rcu_data; | |
1097 | if (!crdp) { | |
1098 | teardown = false; | |
1099 | goto unlock; | |
1100 | } | |
1101 | if (!cds_wfcq_empty(&crdp->cbs_head, &crdp->cbs_tail)) { | |
1102 | teardown = false; | |
1103 | goto unlock; | |
1104 | } | |
1105 | rcu_set_pointer(&default_call_rcu_data, NULL); | |
1106 | unlock: | |
1107 | call_rcu_unlock(&call_rcu_mutex); | |
1108 | if (teardown) { | |
1109 | synchronize_rcu(); | |
1110 | call_rcu_data_free(crdp); | |
1111 | } | |
1112 | } |