Fix: urcu-bp: Bulletproof RCU arena resize bug
[urcu.git] / urcu-call-rcu-impl.h
1 /*
2 * urcu-call-rcu.c
3 *
4 * Userspace RCU library - batch memory reclamation with kernel API
5 *
6 * Copyright (c) 2010 Paul E. McKenney <paulmck@linux.vnet.ibm.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #define _GNU_SOURCE
24 #define _LGPL_SOURCE
25 #include <stdio.h>
26 #include <pthread.h>
27 #include <signal.h>
28 #include <assert.h>
29 #include <stdlib.h>
30 #include <stdint.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <poll.h>
34 #include <sys/time.h>
35 #include <unistd.h>
36 #include <sched.h>
37
38 #include "config.h"
39 #include "urcu/wfcqueue.h"
40 #include "urcu-call-rcu.h"
41 #include "urcu-pointer.h"
42 #include "urcu/list.h"
43 #include "urcu/futex.h"
44 #include "urcu/tls-compat.h"
45 #include "urcu-die.h"
46
47 /* Data structure that identifies a call_rcu thread. */
48
49 struct call_rcu_data {
50 /*
51 * We do not align head on a different cache-line than tail
52 * mainly because call_rcu callback-invocation threads use
53 * batching ("splice") to get an entire list of callbacks, which
54 * effectively empties the queue, and requires to touch the tail
55 * anyway.
56 */
57 struct cds_wfcq_tail cbs_tail;
58 struct cds_wfcq_head cbs_head;
59 unsigned long flags;
60 int32_t futex;
61 unsigned long qlen; /* maintained for debugging. */
62 pthread_t tid;
63 int cpu_affinity;
64 struct cds_list_head list;
65 } __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
66
67 struct call_rcu_completion {
68 int barrier_count;
69 int32_t futex;
70 };
71
72 struct call_rcu_completion_work {
73 struct rcu_head head;
74 struct call_rcu_completion *completion;
75 };
76
77 /*
78 * List of all call_rcu_data structures to keep valgrind happy.
79 * Protected by call_rcu_mutex.
80 */
81
82 static CDS_LIST_HEAD(call_rcu_data_list);
83
84 /* Link a thread using call_rcu() to its call_rcu thread. */
85
86 static DEFINE_URCU_TLS(struct call_rcu_data *, thread_call_rcu_data);
87
88 /*
89 * Guard call_rcu thread creation and atfork handlers.
90 */
91 static pthread_mutex_t call_rcu_mutex = PTHREAD_MUTEX_INITIALIZER;
92
93 /* If a given thread does not have its own call_rcu thread, this is default. */
94
95 static struct call_rcu_data *default_call_rcu_data;
96
97 /*
98 * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are
99 * available, then we can have call_rcu threads assigned to individual
100 * CPUs rather than only to specific threads.
101 */
102
103 #ifdef HAVE_SCHED_GETCPU
104
105 static int urcu_sched_getcpu(void)
106 {
107 return sched_getcpu();
108 }
109
110 #else /* #ifdef HAVE_SCHED_GETCPU */
111
112 static int urcu_sched_getcpu(void)
113 {
114 return -1;
115 }
116
117 #endif /* #else #ifdef HAVE_SCHED_GETCPU */
118
119 #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU)
120
121 /*
122 * Pointer to array of pointers to per-CPU call_rcu_data structures
123 * and # CPUs. per_cpu_call_rcu_data is a RCU-protected pointer to an
124 * array of RCU-protected pointers to call_rcu_data. call_rcu acts as a
125 * RCU read-side and reads per_cpu_call_rcu_data and the per-cpu pointer
126 * without mutex. The call_rcu_mutex protects updates.
127 */
128
129 static struct call_rcu_data **per_cpu_call_rcu_data;
130 static long maxcpus;
131
132 static void maxcpus_reset(void)
133 {
134 maxcpus = 0;
135 }
136
137 /* Allocate the array if it has not already been allocated. */
138
139 static void alloc_cpu_call_rcu_data(void)
140 {
141 struct call_rcu_data **p;
142 static int warned = 0;
143
144 if (maxcpus != 0)
145 return;
146 maxcpus = sysconf(_SC_NPROCESSORS_CONF);
147 if (maxcpus <= 0) {
148 return;
149 }
150 p = malloc(maxcpus * sizeof(*per_cpu_call_rcu_data));
151 if (p != NULL) {
152 memset(p, '\0', maxcpus * sizeof(*per_cpu_call_rcu_data));
153 rcu_set_pointer(&per_cpu_call_rcu_data, p);
154 } else {
155 if (!warned) {
156 fprintf(stderr, "[error] liburcu: unable to allocate per-CPU pointer array\n");
157 }
158 warned = 1;
159 }
160 }
161
162 #else /* #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) */
163
164 /*
165 * per_cpu_call_rcu_data should be constant, but some functions below, used both
166 * for cases where cpu number is available and not available, assume it it not
167 * constant.
168 */
169 static struct call_rcu_data **per_cpu_call_rcu_data = NULL;
170 static const long maxcpus = -1;
171
172 static void maxcpus_reset(void)
173 {
174 }
175
176 static void alloc_cpu_call_rcu_data(void)
177 {
178 }
179
180 #endif /* #else #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) */
181
182 /* Acquire the specified pthread mutex. */
183
184 static void call_rcu_lock(pthread_mutex_t *pmp)
185 {
186 int ret;
187
188 ret = pthread_mutex_lock(pmp);
189 if (ret)
190 urcu_die(ret);
191 }
192
193 /* Release the specified pthread mutex. */
194
195 static void call_rcu_unlock(pthread_mutex_t *pmp)
196 {
197 int ret;
198
199 ret = pthread_mutex_unlock(pmp);
200 if (ret)
201 urcu_die(ret);
202 }
203
204 #if HAVE_SCHED_SETAFFINITY
205 static
206 int set_thread_cpu_affinity(struct call_rcu_data *crdp)
207 {
208 cpu_set_t mask;
209
210 if (crdp->cpu_affinity < 0)
211 return 0;
212
213 CPU_ZERO(&mask);
214 CPU_SET(crdp->cpu_affinity, &mask);
215 #if SCHED_SETAFFINITY_ARGS == 2
216 return sched_setaffinity(0, &mask);
217 #else
218 return sched_setaffinity(0, sizeof(mask), &mask);
219 #endif
220 }
221 #else
222 static
223 int set_thread_cpu_affinity(struct call_rcu_data *crdp)
224 {
225 return 0;
226 }
227 #endif
228
229 static void call_rcu_wait(struct call_rcu_data *crdp)
230 {
231 /* Read call_rcu list before read futex */
232 cmm_smp_mb();
233 if (uatomic_read(&crdp->futex) == -1)
234 futex_async(&crdp->futex, FUTEX_WAIT, -1,
235 NULL, NULL, 0);
236 }
237
238 static void call_rcu_wake_up(struct call_rcu_data *crdp)
239 {
240 /* Write to call_rcu list before reading/writing futex */
241 cmm_smp_mb();
242 if (caa_unlikely(uatomic_read(&crdp->futex) == -1)) {
243 uatomic_set(&crdp->futex, 0);
244 futex_async(&crdp->futex, FUTEX_WAKE, 1,
245 NULL, NULL, 0);
246 }
247 }
248
249 static void call_rcu_completion_wait(struct call_rcu_completion *completion)
250 {
251 /* Read completion barrier count before read futex */
252 cmm_smp_mb();
253 if (uatomic_read(&completion->futex) == -1)
254 futex_async(&completion->futex, FUTEX_WAIT, -1,
255 NULL, NULL, 0);
256 }
257
258 static void call_rcu_completion_wake_up(struct call_rcu_completion *completion)
259 {
260 /* Write to completion barrier count before reading/writing futex */
261 cmm_smp_mb();
262 if (caa_unlikely(uatomic_read(&completion->futex) == -1)) {
263 uatomic_set(&completion->futex, 0);
264 futex_async(&completion->futex, FUTEX_WAKE, 1,
265 NULL, NULL, 0);
266 }
267 }
268
269 /* This is the code run by each call_rcu thread. */
270
271 static void *call_rcu_thread(void *arg)
272 {
273 unsigned long cbcount;
274 struct call_rcu_data *crdp = (struct call_rcu_data *) arg;
275 int rt = !!(uatomic_read(&crdp->flags) & URCU_CALL_RCU_RT);
276 int ret;
277
278 ret = set_thread_cpu_affinity(crdp);
279 if (ret)
280 urcu_die(errno);
281
282 /*
283 * If callbacks take a read-side lock, we need to be registered.
284 */
285 rcu_register_thread();
286
287 URCU_TLS(thread_call_rcu_data) = crdp;
288 if (!rt) {
289 uatomic_dec(&crdp->futex);
290 /* Decrement futex before reading call_rcu list */
291 cmm_smp_mb();
292 }
293 for (;;) {
294 struct cds_wfcq_head cbs_tmp_head;
295 struct cds_wfcq_tail cbs_tmp_tail;
296 struct cds_wfcq_node *cbs, *cbs_tmp_n;
297 enum cds_wfcq_ret splice_ret;
298
299 if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) {
300 /*
301 * Pause requested. Become quiescent: remove
302 * ourself from all global lists, and don't
303 * process any callback. The callback lists may
304 * still be non-empty though.
305 */
306 rcu_unregister_thread();
307 cmm_smp_mb__before_uatomic_or();
308 uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSED);
309 while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) != 0)
310 poll(NULL, 0, 1);
311 rcu_register_thread();
312 }
313
314 cds_wfcq_init(&cbs_tmp_head, &cbs_tmp_tail);
315 splice_ret = __cds_wfcq_splice_blocking(&cbs_tmp_head,
316 &cbs_tmp_tail, &crdp->cbs_head, &crdp->cbs_tail);
317 assert(splice_ret != CDS_WFCQ_RET_WOULDBLOCK);
318 assert(splice_ret != CDS_WFCQ_RET_DEST_NON_EMPTY);
319 if (splice_ret != CDS_WFCQ_RET_SRC_EMPTY) {
320 synchronize_rcu();
321 cbcount = 0;
322 __cds_wfcq_for_each_blocking_safe(&cbs_tmp_head,
323 &cbs_tmp_tail, cbs, cbs_tmp_n) {
324 struct rcu_head *rhp;
325
326 rhp = caa_container_of(cbs,
327 struct rcu_head, next);
328 rhp->func(rhp);
329 cbcount++;
330 }
331 uatomic_sub(&crdp->qlen, cbcount);
332 }
333 if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOP)
334 break;
335 rcu_thread_offline();
336 if (!rt) {
337 if (cds_wfcq_empty(&crdp->cbs_head,
338 &crdp->cbs_tail)) {
339 call_rcu_wait(crdp);
340 poll(NULL, 0, 10);
341 uatomic_dec(&crdp->futex);
342 /*
343 * Decrement futex before reading
344 * call_rcu list.
345 */
346 cmm_smp_mb();
347 } else {
348 poll(NULL, 0, 10);
349 }
350 } else {
351 poll(NULL, 0, 10);
352 }
353 rcu_thread_online();
354 }
355 if (!rt) {
356 /*
357 * Read call_rcu list before write futex.
358 */
359 cmm_smp_mb();
360 uatomic_set(&crdp->futex, 0);
361 }
362 uatomic_or(&crdp->flags, URCU_CALL_RCU_STOPPED);
363 rcu_unregister_thread();
364 return NULL;
365 }
366
367 /*
368 * Create both a call_rcu thread and the corresponding call_rcu_data
369 * structure, linking the structure in as specified. Caller must hold
370 * call_rcu_mutex.
371 */
372
373 static void call_rcu_data_init(struct call_rcu_data **crdpp,
374 unsigned long flags,
375 int cpu_affinity)
376 {
377 struct call_rcu_data *crdp;
378 int ret;
379
380 crdp = malloc(sizeof(*crdp));
381 if (crdp == NULL)
382 urcu_die(errno);
383 memset(crdp, '\0', sizeof(*crdp));
384 cds_wfcq_init(&crdp->cbs_head, &crdp->cbs_tail);
385 crdp->qlen = 0;
386 crdp->futex = 0;
387 crdp->flags = flags;
388 cds_list_add(&crdp->list, &call_rcu_data_list);
389 crdp->cpu_affinity = cpu_affinity;
390 cmm_smp_mb(); /* Structure initialized before pointer is planted. */
391 *crdpp = crdp;
392 ret = pthread_create(&crdp->tid, NULL, call_rcu_thread, crdp);
393 if (ret)
394 urcu_die(ret);
395 }
396
397 /*
398 * Return a pointer to the call_rcu_data structure for the specified
399 * CPU, returning NULL if there is none. We cannot automatically
400 * created it because the platform we are running on might not define
401 * urcu_sched_getcpu().
402 *
403 * The call to this function and use of the returned call_rcu_data
404 * should be protected by RCU read-side lock.
405 */
406
407 struct call_rcu_data *get_cpu_call_rcu_data(int cpu)
408 {
409 static int warned = 0;
410 struct call_rcu_data **pcpu_crdp;
411
412 pcpu_crdp = rcu_dereference(per_cpu_call_rcu_data);
413 if (pcpu_crdp == NULL)
414 return NULL;
415 if (!warned && maxcpus > 0 && (cpu < 0 || maxcpus <= cpu)) {
416 fprintf(stderr, "[error] liburcu: get CPU # out of range\n");
417 warned = 1;
418 }
419 if (cpu < 0 || maxcpus <= cpu)
420 return NULL;
421 return rcu_dereference(pcpu_crdp[cpu]);
422 }
423
424 /*
425 * Return the tid corresponding to the call_rcu thread whose
426 * call_rcu_data structure is specified.
427 */
428
429 pthread_t get_call_rcu_thread(struct call_rcu_data *crdp)
430 {
431 return crdp->tid;
432 }
433
434 /*
435 * Create a call_rcu_data structure (with thread) and return a pointer.
436 */
437
438 static struct call_rcu_data *__create_call_rcu_data(unsigned long flags,
439 int cpu_affinity)
440 {
441 struct call_rcu_data *crdp;
442
443 call_rcu_data_init(&crdp, flags, cpu_affinity);
444 return crdp;
445 }
446
447 struct call_rcu_data *create_call_rcu_data(unsigned long flags,
448 int cpu_affinity)
449 {
450 struct call_rcu_data *crdp;
451
452 call_rcu_lock(&call_rcu_mutex);
453 crdp = __create_call_rcu_data(flags, cpu_affinity);
454 call_rcu_unlock(&call_rcu_mutex);
455 return crdp;
456 }
457
458 /*
459 * Set the specified CPU to use the specified call_rcu_data structure.
460 *
461 * Use NULL to remove a CPU's call_rcu_data structure, but it is
462 * the caller's responsibility to dispose of the removed structure.
463 * Use get_cpu_call_rcu_data() to obtain a pointer to the old structure
464 * (prior to NULLing it out, of course).
465 *
466 * The caller must wait for a grace-period to pass between return from
467 * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the
468 * previous call rcu data as argument.
469 */
470
471 int set_cpu_call_rcu_data(int cpu, struct call_rcu_data *crdp)
472 {
473 static int warned = 0;
474
475 call_rcu_lock(&call_rcu_mutex);
476 alloc_cpu_call_rcu_data();
477 if (cpu < 0 || maxcpus <= cpu) {
478 if (!warned) {
479 fprintf(stderr, "[error] liburcu: set CPU # out of range\n");
480 warned = 1;
481 }
482 call_rcu_unlock(&call_rcu_mutex);
483 errno = EINVAL;
484 return -EINVAL;
485 }
486
487 if (per_cpu_call_rcu_data == NULL) {
488 call_rcu_unlock(&call_rcu_mutex);
489 errno = ENOMEM;
490 return -ENOMEM;
491 }
492
493 if (per_cpu_call_rcu_data[cpu] != NULL && crdp != NULL) {
494 call_rcu_unlock(&call_rcu_mutex);
495 errno = EEXIST;
496 return -EEXIST;
497 }
498
499 rcu_set_pointer(&per_cpu_call_rcu_data[cpu], crdp);
500 call_rcu_unlock(&call_rcu_mutex);
501 return 0;
502 }
503
504 /*
505 * Return a pointer to the default call_rcu_data structure, creating
506 * one if need be. Because we never free call_rcu_data structures,
507 * we don't need to be in an RCU read-side critical section.
508 */
509
510 struct call_rcu_data *get_default_call_rcu_data(void)
511 {
512 if (default_call_rcu_data != NULL)
513 return rcu_dereference(default_call_rcu_data);
514 call_rcu_lock(&call_rcu_mutex);
515 if (default_call_rcu_data != NULL) {
516 call_rcu_unlock(&call_rcu_mutex);
517 return default_call_rcu_data;
518 }
519 call_rcu_data_init(&default_call_rcu_data, 0, -1);
520 call_rcu_unlock(&call_rcu_mutex);
521 return default_call_rcu_data;
522 }
523
524 /*
525 * Return the call_rcu_data structure that applies to the currently
526 * running thread. Any call_rcu_data structure assigned specifically
527 * to this thread has first priority, followed by any call_rcu_data
528 * structure assigned to the CPU on which the thread is running,
529 * followed by the default call_rcu_data structure. If there is not
530 * yet a default call_rcu_data structure, one will be created.
531 *
532 * Calls to this function and use of the returned call_rcu_data should
533 * be protected by RCU read-side lock.
534 */
535 struct call_rcu_data *get_call_rcu_data(void)
536 {
537 struct call_rcu_data *crd;
538
539 if (URCU_TLS(thread_call_rcu_data) != NULL)
540 return URCU_TLS(thread_call_rcu_data);
541
542 if (maxcpus > 0) {
543 crd = get_cpu_call_rcu_data(urcu_sched_getcpu());
544 if (crd)
545 return crd;
546 }
547
548 return get_default_call_rcu_data();
549 }
550
551 /*
552 * Return a pointer to this task's call_rcu_data if there is one.
553 */
554
555 struct call_rcu_data *get_thread_call_rcu_data(void)
556 {
557 return URCU_TLS(thread_call_rcu_data);
558 }
559
560 /*
561 * Set this task's call_rcu_data structure as specified, regardless
562 * of whether or not this task already had one. (This allows switching
563 * to and from real-time call_rcu threads, for example.)
564 *
565 * Use NULL to remove a thread's call_rcu_data structure, but it is
566 * the caller's responsibility to dispose of the removed structure.
567 * Use get_thread_call_rcu_data() to obtain a pointer to the old structure
568 * (prior to NULLing it out, of course).
569 */
570
571 void set_thread_call_rcu_data(struct call_rcu_data *crdp)
572 {
573 URCU_TLS(thread_call_rcu_data) = crdp;
574 }
575
576 /*
577 * Create a separate call_rcu thread for each CPU. This does not
578 * replace a pre-existing call_rcu thread -- use the set_cpu_call_rcu_data()
579 * function if you want that behavior. Should be paired with
580 * free_all_cpu_call_rcu_data() to teardown these call_rcu worker
581 * threads.
582 */
583
584 int create_all_cpu_call_rcu_data(unsigned long flags)
585 {
586 int i;
587 struct call_rcu_data *crdp;
588 int ret;
589
590 call_rcu_lock(&call_rcu_mutex);
591 alloc_cpu_call_rcu_data();
592 call_rcu_unlock(&call_rcu_mutex);
593 if (maxcpus <= 0) {
594 errno = EINVAL;
595 return -EINVAL;
596 }
597 if (per_cpu_call_rcu_data == NULL) {
598 errno = ENOMEM;
599 return -ENOMEM;
600 }
601 for (i = 0; i < maxcpus; i++) {
602 call_rcu_lock(&call_rcu_mutex);
603 if (get_cpu_call_rcu_data(i)) {
604 call_rcu_unlock(&call_rcu_mutex);
605 continue;
606 }
607 crdp = __create_call_rcu_data(flags, i);
608 if (crdp == NULL) {
609 call_rcu_unlock(&call_rcu_mutex);
610 errno = ENOMEM;
611 return -ENOMEM;
612 }
613 call_rcu_unlock(&call_rcu_mutex);
614 if ((ret = set_cpu_call_rcu_data(i, crdp)) != 0) {
615 call_rcu_data_free(crdp);
616
617 /* it has been created by other thread */
618 if (ret == -EEXIST)
619 continue;
620
621 return ret;
622 }
623 }
624 return 0;
625 }
626
627 /*
628 * Wake up the call_rcu thread corresponding to the specified
629 * call_rcu_data structure.
630 */
631 static void wake_call_rcu_thread(struct call_rcu_data *crdp)
632 {
633 if (!(_CMM_LOAD_SHARED(crdp->flags) & URCU_CALL_RCU_RT))
634 call_rcu_wake_up(crdp);
635 }
636
637 static void _call_rcu(struct rcu_head *head,
638 void (*func)(struct rcu_head *head),
639 struct call_rcu_data *crdp)
640 {
641 cds_wfcq_node_init(&head->next);
642 head->func = func;
643 cds_wfcq_enqueue(&crdp->cbs_head, &crdp->cbs_tail, &head->next);
644 uatomic_inc(&crdp->qlen);
645 wake_call_rcu_thread(crdp);
646 }
647
648 /*
649 * Schedule a function to be invoked after a following grace period.
650 * This is the only function that must be called -- the others are
651 * only present to allow applications to tune their use of RCU for
652 * maximum performance.
653 *
654 * Note that unless a call_rcu thread has not already been created,
655 * the first invocation of call_rcu() will create one. So, if you
656 * need the first invocation of call_rcu() to be fast, make sure
657 * to create a call_rcu thread first. One way to accomplish this is
658 * "get_call_rcu_data();", and another is create_all_cpu_call_rcu_data().
659 *
660 * call_rcu must be called by registered RCU read-side threads.
661 */
662 void call_rcu(struct rcu_head *head,
663 void (*func)(struct rcu_head *head))
664 {
665 struct call_rcu_data *crdp;
666
667 /* Holding rcu read-side lock across use of per-cpu crdp */
668 rcu_read_lock();
669 crdp = get_call_rcu_data();
670 _call_rcu(head, func, crdp);
671 rcu_read_unlock();
672 }
673
674 /*
675 * Free up the specified call_rcu_data structure, terminating the
676 * associated call_rcu thread. The caller must have previously
677 * removed the call_rcu_data structure from per-thread or per-CPU
678 * usage. For example, set_cpu_call_rcu_data(cpu, NULL) for per-CPU
679 * call_rcu_data structures or set_thread_call_rcu_data(NULL) for
680 * per-thread call_rcu_data structures.
681 *
682 * We silently refuse to free up the default call_rcu_data structure
683 * because that is where we put any leftover callbacks. Note that
684 * the possibility of self-spawning callbacks makes it impossible
685 * to execute all the callbacks in finite time without putting any
686 * newly spawned callbacks somewhere else. The "somewhere else" of
687 * last resort is the default call_rcu_data structure.
688 *
689 * We also silently refuse to free NULL pointers. This simplifies
690 * the calling code.
691 *
692 * The caller must wait for a grace-period to pass between return from
693 * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the
694 * previous call rcu data as argument.
695 *
696 * Note: introducing __cds_wfcq_splice_blocking() in this function fixed
697 * a list corruption bug in the 0.7.x series. The equivalent fix
698 * appeared in 0.6.8 for the stable-0.6 branch.
699 */
700 void call_rcu_data_free(struct call_rcu_data *crdp)
701 {
702 if (crdp == NULL || crdp == default_call_rcu_data) {
703 return;
704 }
705 if ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOPPED) == 0) {
706 uatomic_or(&crdp->flags, URCU_CALL_RCU_STOP);
707 wake_call_rcu_thread(crdp);
708 while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOPPED) == 0)
709 poll(NULL, 0, 1);
710 }
711 if (!cds_wfcq_empty(&crdp->cbs_head, &crdp->cbs_tail)) {
712 /* Create default call rcu data if need be */
713 (void) get_default_call_rcu_data();
714 __cds_wfcq_splice_blocking(&default_call_rcu_data->cbs_head,
715 &default_call_rcu_data->cbs_tail,
716 &crdp->cbs_head, &crdp->cbs_tail);
717 uatomic_add(&default_call_rcu_data->qlen,
718 uatomic_read(&crdp->qlen));
719 wake_call_rcu_thread(default_call_rcu_data);
720 }
721
722 call_rcu_lock(&call_rcu_mutex);
723 cds_list_del(&crdp->list);
724 call_rcu_unlock(&call_rcu_mutex);
725
726 free(crdp);
727 }
728
729 /*
730 * Clean up all the per-CPU call_rcu threads.
731 */
732 void free_all_cpu_call_rcu_data(void)
733 {
734 int cpu;
735 struct call_rcu_data **crdp;
736 static int warned = 0;
737
738 if (maxcpus <= 0)
739 return;
740
741 crdp = malloc(sizeof(*crdp) * maxcpus);
742 if (!crdp) {
743 if (!warned) {
744 fprintf(stderr, "[error] liburcu: unable to allocate per-CPU pointer array\n");
745 }
746 warned = 1;
747 return;
748 }
749
750 for (cpu = 0; cpu < maxcpus; cpu++) {
751 crdp[cpu] = get_cpu_call_rcu_data(cpu);
752 if (crdp[cpu] == NULL)
753 continue;
754 set_cpu_call_rcu_data(cpu, NULL);
755 }
756 /*
757 * Wait for call_rcu sites acting as RCU readers of the
758 * call_rcu_data to become quiescent.
759 */
760 synchronize_rcu();
761 for (cpu = 0; cpu < maxcpus; cpu++) {
762 if (crdp[cpu] == NULL)
763 continue;
764 call_rcu_data_free(crdp[cpu]);
765 }
766 free(crdp);
767 }
768
769 static
770 void _rcu_barrier_complete(struct rcu_head *head)
771 {
772 struct call_rcu_completion_work *work;
773 struct call_rcu_completion *completion;
774
775 work = caa_container_of(head, struct call_rcu_completion_work, head);
776 completion = work->completion;
777 uatomic_dec(&completion->barrier_count);
778 call_rcu_completion_wake_up(completion);
779 free(work);
780 }
781
782 /*
783 * Wait for all in-flight call_rcu callbacks to complete execution.
784 */
785 void rcu_barrier(void)
786 {
787 struct call_rcu_data *crdp;
788 struct call_rcu_completion completion;
789 int count = 0;
790 int was_online;
791
792 /* Put in offline state in QSBR. */
793 was_online = rcu_read_ongoing();
794 if (was_online)
795 rcu_thread_offline();
796 /*
797 * Calling a rcu_barrier() within a RCU read-side critical
798 * section is an error.
799 */
800 if (rcu_read_ongoing()) {
801 static int warned = 0;
802
803 if (!warned) {
804 fprintf(stderr, "[error] liburcu: rcu_barrier() called from within RCU read-side critical section.\n");
805 }
806 warned = 1;
807 goto online;
808 }
809
810 call_rcu_lock(&call_rcu_mutex);
811 cds_list_for_each_entry(crdp, &call_rcu_data_list, list)
812 count++;
813
814 completion.barrier_count = count;
815
816 cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
817 struct call_rcu_completion_work *work;
818
819 work = calloc(sizeof(*work), 1);
820 if (!work)
821 urcu_die(errno);
822 work->completion = &completion;
823 _call_rcu(&work->head, _rcu_barrier_complete, crdp);
824 }
825 call_rcu_unlock(&call_rcu_mutex);
826
827 /* Wait for them */
828 for (;;) {
829 uatomic_dec(&completion.futex);
830 /* Decrement futex before reading barrier_count */
831 cmm_smp_mb();
832 if (!uatomic_read(&completion.barrier_count))
833 break;
834 call_rcu_completion_wait(&completion);
835 }
836 online:
837 if (was_online)
838 rcu_thread_online();
839 }
840
841 /*
842 * Acquire the call_rcu_mutex in order to ensure that the child sees
843 * all of the call_rcu() data structures in a consistent state. Ensure
844 * that all call_rcu threads are in a quiescent state across fork.
845 * Suitable for pthread_atfork() and friends.
846 */
847 void call_rcu_before_fork(void)
848 {
849 struct call_rcu_data *crdp;
850
851 call_rcu_lock(&call_rcu_mutex);
852
853 cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
854 uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSE);
855 cmm_smp_mb__after_uatomic_or();
856 wake_call_rcu_thread(crdp);
857 }
858 cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
859 while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSED) == 0)
860 poll(NULL, 0, 1);
861 }
862 }
863
864 /*
865 * Clean up call_rcu data structures in the parent of a successful fork()
866 * that is not followed by exec() in the child. Suitable for
867 * pthread_atfork() and friends.
868 */
869 void call_rcu_after_fork_parent(void)
870 {
871 struct call_rcu_data *crdp;
872
873 cds_list_for_each_entry(crdp, &call_rcu_data_list, list)
874 uatomic_and(&crdp->flags, ~URCU_CALL_RCU_PAUSE);
875 call_rcu_unlock(&call_rcu_mutex);
876 }
877
878 /*
879 * Clean up call_rcu data structures in the child of a successful fork()
880 * that is not followed by exec(). Suitable for pthread_atfork() and
881 * friends.
882 */
883 void call_rcu_after_fork_child(void)
884 {
885 struct call_rcu_data *crdp, *next;
886
887 /* Release the mutex. */
888 call_rcu_unlock(&call_rcu_mutex);
889
890 /* Do nothing when call_rcu() has not been used */
891 if (cds_list_empty(&call_rcu_data_list))
892 return;
893
894 /*
895 * Allocate a new default call_rcu_data structure in order
896 * to get a working call_rcu thread to go with it.
897 */
898 default_call_rcu_data = NULL;
899 (void)get_default_call_rcu_data();
900
901 /* Cleanup call_rcu_data pointers before use */
902 maxcpus_reset();
903 free(per_cpu_call_rcu_data);
904 rcu_set_pointer(&per_cpu_call_rcu_data, NULL);
905 URCU_TLS(thread_call_rcu_data) = NULL;
906
907 /*
908 * Dispose of all of the rest of the call_rcu_data structures.
909 * Leftover call_rcu callbacks will be merged into the new
910 * default call_rcu thread queue.
911 */
912 cds_list_for_each_entry_safe(crdp, next, &call_rcu_data_list, list) {
913 if (crdp == default_call_rcu_data)
914 continue;
915 uatomic_set(&crdp->flags, URCU_CALL_RCU_STOPPED);
916 call_rcu_data_free(crdp);
917 }
918 }
This page took 0.063807 seconds and 4 git commands to generate.