4 * Userspace RCU library - batch memory reclamation with kernel API
6 * Copyright (c) 2010 Paul E. McKenney <paulmck@linux.vnet.ibm.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
37 #include "compat-getcpu.h"
38 #include <urcu/wfcqueue.h>
39 #include <urcu/call-rcu.h>
40 #include <urcu/pointer.h>
41 #include <urcu/list.h>
42 #include <urcu/futex.h>
43 #include <urcu/tls-compat.h>
46 #include "urcu-utils.h"
47 #include "compat-smp.h"
49 #define SET_AFFINITY_CHECK_PERIOD (1U << 8) /* 256 */
50 #define SET_AFFINITY_CHECK_PERIOD_MASK (SET_AFFINITY_CHECK_PERIOD - 1)
52 /* Data structure that identifies a call_rcu thread. */
54 struct call_rcu_data
{
56 * We do not align head on a different cache-line than tail
57 * mainly because call_rcu callback-invocation threads use
58 * batching ("splice") to get an entire list of callbacks, which
59 * effectively empties the queue, and requires to touch the tail
62 struct cds_wfcq_tail cbs_tail
;
63 struct cds_wfcq_head cbs_head
;
66 unsigned long qlen
; /* maintained for debugging. */
69 unsigned long gp_count
;
70 struct cds_list_head list
;
71 } __attribute__((aligned(CAA_CACHE_LINE_SIZE
)));
73 struct call_rcu_completion
{
79 struct call_rcu_completion_work
{
81 struct call_rcu_completion
*completion
;
85 CRDF_FLAG_JOIN_THREAD
= (1 << 0),
89 * List of all call_rcu_data structures to keep valgrind happy.
90 * Protected by call_rcu_mutex.
93 static CDS_LIST_HEAD(call_rcu_data_list
);
95 /* Link a thread using call_rcu() to its call_rcu thread. */
97 static DEFINE_URCU_TLS(struct call_rcu_data
*, thread_call_rcu_data
);
100 * Guard call_rcu thread creation and atfork handlers.
102 static pthread_mutex_t call_rcu_mutex
= PTHREAD_MUTEX_INITIALIZER
;
104 /* If a given thread does not have its own call_rcu thread, this is default. */
106 static struct call_rcu_data
*default_call_rcu_data
;
108 static struct urcu_atfork
*registered_rculfhash_atfork
;
111 * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are
112 * available, then we can have call_rcu threads assigned to individual
113 * CPUs rather than only to specific threads.
116 #if defined(HAVE_SYSCONF) && (defined(HAVE_SCHED_GETCPU) || defined(HAVE_GETCPUID))
119 * Pointer to array of pointers to per-CPU call_rcu_data structures
120 * and # CPUs. per_cpu_call_rcu_data is a RCU-protected pointer to an
121 * array of RCU-protected pointers to call_rcu_data. call_rcu acts as a
122 * RCU read-side and reads per_cpu_call_rcu_data and the per-cpu pointer
123 * without mutex. The call_rcu_mutex protects updates.
126 static struct call_rcu_data
**per_cpu_call_rcu_data
;
127 static long cpus_array_len
;
129 static void cpus_array_len_reset(void)
134 /* Allocate the array if it has not already been allocated. */
136 static void alloc_cpu_call_rcu_data(void)
138 struct call_rcu_data
**p
;
139 static int warned
= 0;
141 if (cpus_array_len
!= 0)
143 cpus_array_len
= get_possible_cpus_array_len();
144 if (cpus_array_len
<= 0) {
147 p
= malloc(cpus_array_len
* sizeof(*per_cpu_call_rcu_data
));
149 memset(p
, '\0', cpus_array_len
* sizeof(*per_cpu_call_rcu_data
));
150 rcu_set_pointer(&per_cpu_call_rcu_data
, p
);
153 fprintf(stderr
, "[error] liburcu: unable to allocate per-CPU pointer array\n");
159 #else /* #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) */
162 * per_cpu_call_rcu_data should be constant, but some functions below, used both
163 * for cases where cpu number is available and not available, assume it it not
166 static struct call_rcu_data
**per_cpu_call_rcu_data
= NULL
;
167 static const long cpus_array_len
= -1;
169 static void cpus_array_len_reset(void)
173 static void alloc_cpu_call_rcu_data(void)
177 #endif /* #else #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) */
179 /* Acquire the specified pthread mutex. */
181 static void call_rcu_lock(pthread_mutex_t
*pmp
)
185 ret
= pthread_mutex_lock(pmp
);
190 /* Release the specified pthread mutex. */
192 static void call_rcu_unlock(pthread_mutex_t
*pmp
)
196 ret
= pthread_mutex_unlock(pmp
);
202 * Periodically retry setting CPU affinity if we migrate.
203 * Losing affinity can be caused by CPU hotunplug/hotplug, or by
206 #ifdef HAVE_SCHED_SETAFFINITY
208 int set_thread_cpu_affinity(struct call_rcu_data
*crdp
)
213 if (crdp
->cpu_affinity
< 0)
215 if (++crdp
->gp_count
& SET_AFFINITY_CHECK_PERIOD_MASK
)
217 if (urcu_sched_getcpu() == crdp
->cpu_affinity
)
221 CPU_SET(crdp
->cpu_affinity
, &mask
);
222 #if SCHED_SETAFFINITY_ARGS == 2
223 ret
= sched_setaffinity(0, &mask
);
225 ret
= sched_setaffinity(0, sizeof(mask
), &mask
);
228 * EINVAL is fine: can be caused by hotunplugged CPUs, or by
229 * cpuset(7). This is why we should always retry if we detect
232 if (ret
&& errno
== EINVAL
) {
240 int set_thread_cpu_affinity(struct call_rcu_data
*crdp
__attribute__((unused
)))
246 static void call_rcu_wait(struct call_rcu_data
*crdp
)
248 /* Read call_rcu list before read futex */
250 while (uatomic_read(&crdp
->futex
) == -1) {
251 if (!futex_async(&crdp
->futex
, FUTEX_WAIT
, -1, NULL
, NULL
, 0)) {
253 * Prior queued wakeups queued by unrelated code
254 * using the same address can cause futex wait to
255 * return 0 even through the futex value is still
256 * -1 (spurious wakeups). Check the value again
257 * in user-space to validate whether it really
264 /* Value already changed. */
267 /* Retry if interrupted by signal. */
268 break; /* Get out of switch. Check again. */
270 /* Unexpected error. */
276 static void call_rcu_wake_up(struct call_rcu_data
*crdp
)
278 /* Write to call_rcu list before reading/writing futex */
280 if (caa_unlikely(uatomic_read(&crdp
->futex
) == -1)) {
281 uatomic_set(&crdp
->futex
, 0);
282 if (futex_async(&crdp
->futex
, FUTEX_WAKE
, 1,
288 static void call_rcu_completion_wait(struct call_rcu_completion
*completion
)
290 /* Read completion barrier count before read futex */
292 while (uatomic_read(&completion
->futex
) == -1) {
293 if (!futex_async(&completion
->futex
, FUTEX_WAIT
, -1, NULL
, NULL
, 0)) {
295 * Prior queued wakeups queued by unrelated code
296 * using the same address can cause futex wait to
297 * return 0 even through the futex value is still
298 * -1 (spurious wakeups). Check the value again
299 * in user-space to validate whether it really
306 /* Value already changed. */
309 /* Retry if interrupted by signal. */
310 break; /* Get out of switch. Check again. */
312 /* Unexpected error. */
318 static void call_rcu_completion_wake_up(struct call_rcu_completion
*completion
)
320 /* Write to completion barrier count before reading/writing futex */
322 if (caa_unlikely(uatomic_read(&completion
->futex
) == -1)) {
323 uatomic_set(&completion
->futex
, 0);
324 if (futex_async(&completion
->futex
, FUTEX_WAKE
, 1,
330 /* This is the code run by each call_rcu thread. */
332 static void *call_rcu_thread(void *arg
)
334 unsigned long cbcount
;
335 struct call_rcu_data
*crdp
= (struct call_rcu_data
*) arg
;
336 int rt
= !!(uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_RT
);
338 if (set_thread_cpu_affinity(crdp
))
342 * If callbacks take a read-side lock, we need to be registered.
344 rcu_register_thread();
346 URCU_TLS(thread_call_rcu_data
) = crdp
;
348 uatomic_dec(&crdp
->futex
);
349 /* Decrement futex before reading call_rcu list */
353 struct cds_wfcq_head cbs_tmp_head
;
354 struct cds_wfcq_tail cbs_tmp_tail
;
355 struct cds_wfcq_node
*cbs
, *cbs_tmp_n
;
356 enum cds_wfcq_ret splice_ret
;
358 if (set_thread_cpu_affinity(crdp
))
361 if (uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_PAUSE
) {
363 * Pause requested. Become quiescent: remove
364 * ourself from all global lists, and don't
365 * process any callback. The callback lists may
366 * still be non-empty though.
368 rcu_unregister_thread();
369 cmm_smp_mb__before_uatomic_or();
370 uatomic_or(&crdp
->flags
, URCU_CALL_RCU_PAUSED
);
371 while ((uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_PAUSE
) != 0)
372 (void) poll(NULL
, 0, 1);
373 uatomic_and(&crdp
->flags
, ~URCU_CALL_RCU_PAUSED
);
374 cmm_smp_mb__after_uatomic_and();
375 rcu_register_thread();
378 cds_wfcq_init(&cbs_tmp_head
, &cbs_tmp_tail
);
379 splice_ret
= __cds_wfcq_splice_blocking(&cbs_tmp_head
,
380 &cbs_tmp_tail
, &crdp
->cbs_head
, &crdp
->cbs_tail
);
381 assert(splice_ret
!= CDS_WFCQ_RET_WOULDBLOCK
);
382 assert(splice_ret
!= CDS_WFCQ_RET_DEST_NON_EMPTY
);
383 if (splice_ret
!= CDS_WFCQ_RET_SRC_EMPTY
) {
386 __cds_wfcq_for_each_blocking_safe(&cbs_tmp_head
,
387 &cbs_tmp_tail
, cbs
, cbs_tmp_n
) {
388 struct rcu_head
*rhp
;
390 rhp
= caa_container_of(cbs
,
391 struct rcu_head
, next
);
395 uatomic_sub(&crdp
->qlen
, cbcount
);
397 if (uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_STOP
)
399 rcu_thread_offline();
401 if (cds_wfcq_empty(&crdp
->cbs_head
,
404 (void) poll(NULL
, 0, 10);
405 uatomic_dec(&crdp
->futex
);
407 * Decrement futex before reading
412 (void) poll(NULL
, 0, 10);
415 (void) poll(NULL
, 0, 10);
421 * Read call_rcu list before write futex.
424 uatomic_set(&crdp
->futex
, 0);
426 uatomic_or(&crdp
->flags
, URCU_CALL_RCU_STOPPED
);
427 rcu_unregister_thread();
432 * Create both a call_rcu thread and the corresponding call_rcu_data
433 * structure, linking the structure in as specified. Caller must hold
437 static void call_rcu_data_init(struct call_rcu_data
**crdpp
,
441 struct call_rcu_data
*crdp
;
444 crdp
= malloc(sizeof(*crdp
));
447 memset(crdp
, '\0', sizeof(*crdp
));
448 cds_wfcq_init(&crdp
->cbs_head
, &crdp
->cbs_tail
);
452 cds_list_add(&crdp
->list
, &call_rcu_data_list
);
453 crdp
->cpu_affinity
= cpu_affinity
;
455 cmm_smp_mb(); /* Structure initialized before pointer is planted. */
457 ret
= pthread_create(&crdp
->tid
, NULL
, call_rcu_thread
, crdp
);
463 * Return a pointer to the call_rcu_data structure for the specified
464 * CPU, returning NULL if there is none. We cannot automatically
465 * created it because the platform we are running on might not define
466 * urcu_sched_getcpu().
468 * The call to this function and use of the returned call_rcu_data
469 * should be protected by RCU read-side lock.
472 struct call_rcu_data
*get_cpu_call_rcu_data(int cpu
)
474 static int warned
= 0;
475 struct call_rcu_data
**pcpu_crdp
;
477 pcpu_crdp
= rcu_dereference(per_cpu_call_rcu_data
);
478 if (pcpu_crdp
== NULL
)
480 if (!warned
&& cpus_array_len
> 0 && (cpu
< 0 || cpus_array_len
<= cpu
)) {
481 fprintf(stderr
, "[error] liburcu: get CPU # out of range\n");
484 if (cpu
< 0 || cpus_array_len
<= cpu
)
486 return rcu_dereference(pcpu_crdp
[cpu
]);
488 URCU_ATTR_ALIAS(urcu_stringify(get_cpu_call_rcu_data
))
489 struct call_rcu_data
*alias_get_cpu_call_rcu_data();
492 * Return the tid corresponding to the call_rcu thread whose
493 * call_rcu_data structure is specified.
496 pthread_t
get_call_rcu_thread(struct call_rcu_data
*crdp
)
500 URCU_ATTR_ALIAS(urcu_stringify(get_call_rcu_thread
))
501 pthread_t
alias_get_call_rcu_thread();
504 * Create a call_rcu_data structure (with thread) and return a pointer.
507 static struct call_rcu_data
*__create_call_rcu_data(unsigned long flags
,
510 struct call_rcu_data
*crdp
;
512 call_rcu_data_init(&crdp
, flags
, cpu_affinity
);
516 URCU_ATTR_ALIAS(urcu_stringify(create_call_rcu_data
))
517 struct call_rcu_data
*alias_create_call_rcu_data();
518 struct call_rcu_data
*create_call_rcu_data(unsigned long flags
,
521 struct call_rcu_data
*crdp
;
523 call_rcu_lock(&call_rcu_mutex
);
524 crdp
= __create_call_rcu_data(flags
, cpu_affinity
);
525 call_rcu_unlock(&call_rcu_mutex
);
530 * Set the specified CPU to use the specified call_rcu_data structure.
532 * Use NULL to remove a CPU's call_rcu_data structure, but it is
533 * the caller's responsibility to dispose of the removed structure.
534 * Use get_cpu_call_rcu_data() to obtain a pointer to the old structure
535 * (prior to NULLing it out, of course).
537 * The caller must wait for a grace-period to pass between return from
538 * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the
539 * previous call rcu data as argument.
542 int set_cpu_call_rcu_data(int cpu
, struct call_rcu_data
*crdp
)
544 static int warned
= 0;
546 call_rcu_lock(&call_rcu_mutex
);
547 alloc_cpu_call_rcu_data();
548 if (cpu
< 0 || cpus_array_len
<= cpu
) {
550 fprintf(stderr
, "[error] liburcu: set CPU # out of range\n");
553 call_rcu_unlock(&call_rcu_mutex
);
558 if (per_cpu_call_rcu_data
== NULL
) {
559 call_rcu_unlock(&call_rcu_mutex
);
564 if (per_cpu_call_rcu_data
[cpu
] != NULL
&& crdp
!= NULL
) {
565 call_rcu_unlock(&call_rcu_mutex
);
570 rcu_set_pointer(&per_cpu_call_rcu_data
[cpu
], crdp
);
571 call_rcu_unlock(&call_rcu_mutex
);
574 URCU_ATTR_ALIAS(urcu_stringify(set_cpu_call_rcu_data
))
575 int alias_set_cpu_call_rcu_data();
578 * Return a pointer to the default call_rcu_data structure, creating
579 * one if need be. Because we never free call_rcu_data structures,
580 * we don't need to be in an RCU read-side critical section.
583 struct call_rcu_data
*get_default_call_rcu_data(void)
585 if (default_call_rcu_data
!= NULL
)
586 return rcu_dereference(default_call_rcu_data
);
587 call_rcu_lock(&call_rcu_mutex
);
588 if (default_call_rcu_data
!= NULL
) {
589 call_rcu_unlock(&call_rcu_mutex
);
590 return default_call_rcu_data
;
592 call_rcu_data_init(&default_call_rcu_data
, 0, -1);
593 call_rcu_unlock(&call_rcu_mutex
);
594 return default_call_rcu_data
;
596 URCU_ATTR_ALIAS(urcu_stringify(get_default_call_rcu_data
))
597 struct call_rcu_data
*alias_get_default_call_rcu_data();
600 * Return the call_rcu_data structure that applies to the currently
601 * running thread. Any call_rcu_data structure assigned specifically
602 * to this thread has first priority, followed by any call_rcu_data
603 * structure assigned to the CPU on which the thread is running,
604 * followed by the default call_rcu_data structure. If there is not
605 * yet a default call_rcu_data structure, one will be created.
607 * Calls to this function and use of the returned call_rcu_data should
608 * be protected by RCU read-side lock.
610 struct call_rcu_data
*get_call_rcu_data(void)
612 struct call_rcu_data
*crd
;
614 if (URCU_TLS(thread_call_rcu_data
) != NULL
)
615 return URCU_TLS(thread_call_rcu_data
);
617 if (cpus_array_len
> 0) {
618 crd
= get_cpu_call_rcu_data(urcu_sched_getcpu());
623 return get_default_call_rcu_data();
625 URCU_ATTR_ALIAS(urcu_stringify(get_call_rcu_data
))
626 struct call_rcu_data
*alias_get_call_rcu_data();
629 * Return a pointer to this task's call_rcu_data if there is one.
632 struct call_rcu_data
*get_thread_call_rcu_data(void)
634 return URCU_TLS(thread_call_rcu_data
);
636 URCU_ATTR_ALIAS(urcu_stringify(get_thread_call_rcu_data
))
637 struct call_rcu_data
*alias_get_thread_call_rcu_data();
640 * Set this task's call_rcu_data structure as specified, regardless
641 * of whether or not this task already had one. (This allows switching
642 * to and from real-time call_rcu threads, for example.)
644 * Use NULL to remove a thread's call_rcu_data structure, but it is
645 * the caller's responsibility to dispose of the removed structure.
646 * Use get_thread_call_rcu_data() to obtain a pointer to the old structure
647 * (prior to NULLing it out, of course).
650 void set_thread_call_rcu_data(struct call_rcu_data
*crdp
)
652 URCU_TLS(thread_call_rcu_data
) = crdp
;
654 URCU_ATTR_ALIAS(urcu_stringify(set_thread_call_rcu_data
))
655 void alias_set_thread_call_rcu_data();
658 * Create a separate call_rcu thread for each CPU. This does not
659 * replace a pre-existing call_rcu thread -- use the set_cpu_call_rcu_data()
660 * function if you want that behavior. Should be paired with
661 * free_all_cpu_call_rcu_data() to teardown these call_rcu worker
665 int create_all_cpu_call_rcu_data(unsigned long flags
)
668 struct call_rcu_data
*crdp
;
671 call_rcu_lock(&call_rcu_mutex
);
672 alloc_cpu_call_rcu_data();
673 call_rcu_unlock(&call_rcu_mutex
);
674 if (cpus_array_len
<= 0) {
678 if (per_cpu_call_rcu_data
== NULL
) {
682 for (i
= 0; i
< cpus_array_len
; i
++) {
683 call_rcu_lock(&call_rcu_mutex
);
684 if (get_cpu_call_rcu_data(i
)) {
685 call_rcu_unlock(&call_rcu_mutex
);
688 crdp
= __create_call_rcu_data(flags
, i
);
690 call_rcu_unlock(&call_rcu_mutex
);
694 call_rcu_unlock(&call_rcu_mutex
);
695 if ((ret
= set_cpu_call_rcu_data(i
, crdp
)) != 0) {
696 call_rcu_data_free(crdp
);
698 /* it has been created by other thread */
707 URCU_ATTR_ALIAS(urcu_stringify(create_all_cpu_call_rcu_data
))
708 int alias_create_all_cpu_call_rcu_data();
711 * Wake up the call_rcu thread corresponding to the specified
712 * call_rcu_data structure.
714 static void wake_call_rcu_thread(struct call_rcu_data
*crdp
)
716 if (!(_CMM_LOAD_SHARED(crdp
->flags
) & URCU_CALL_RCU_RT
))
717 call_rcu_wake_up(crdp
);
720 static void _call_rcu(struct rcu_head
*head
,
721 void (*func
)(struct rcu_head
*head
),
722 struct call_rcu_data
*crdp
)
724 cds_wfcq_node_init(&head
->next
);
726 cds_wfcq_enqueue(&crdp
->cbs_head
, &crdp
->cbs_tail
, &head
->next
);
727 uatomic_inc(&crdp
->qlen
);
728 wake_call_rcu_thread(crdp
);
732 * Schedule a function to be invoked after a following grace period.
733 * This is the only function that must be called -- the others are
734 * only present to allow applications to tune their use of RCU for
735 * maximum performance.
737 * Note that unless a call_rcu thread has not already been created,
738 * the first invocation of call_rcu() will create one. So, if you
739 * need the first invocation of call_rcu() to be fast, make sure
740 * to create a call_rcu thread first. One way to accomplish this is
741 * "get_call_rcu_data();", and another is create_all_cpu_call_rcu_data().
743 * call_rcu must be called by registered RCU read-side threads.
745 void call_rcu(struct rcu_head
*head
,
746 void (*func
)(struct rcu_head
*head
))
748 struct call_rcu_data
*crdp
;
750 /* Holding rcu read-side lock across use of per-cpu crdp */
752 crdp
= get_call_rcu_data();
753 _call_rcu(head
, func
, crdp
);
756 URCU_ATTR_ALIAS(urcu_stringify(call_rcu
)) void alias_call_rcu();
759 * Free up the specified call_rcu_data structure, terminating the
760 * associated call_rcu thread. The caller must have previously
761 * removed the call_rcu_data structure from per-thread or per-CPU
762 * usage. For example, set_cpu_call_rcu_data(cpu, NULL) for per-CPU
763 * call_rcu_data structures or set_thread_call_rcu_data(NULL) for
764 * per-thread call_rcu_data structures.
766 * We silently refuse to free up the default call_rcu_data structure
767 * because that is where we put any leftover callbacks. Note that
768 * the possibility of self-spawning callbacks makes it impossible
769 * to execute all the callbacks in finite time without putting any
770 * newly spawned callbacks somewhere else. The "somewhere else" of
771 * last resort is the default call_rcu_data structure.
773 * We also silently refuse to free NULL pointers. This simplifies
776 * The caller must wait for a grace-period to pass between return from
777 * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the
778 * previous call rcu data as argument.
780 * Note: introducing __cds_wfcq_splice_blocking() in this function fixed
781 * a list corruption bug in the 0.7.x series. The equivalent fix
782 * appeared in 0.6.8 for the stable-0.6 branch.
785 void _call_rcu_data_free(struct call_rcu_data
*crdp
, unsigned int flags
)
787 if (crdp
== NULL
|| crdp
== default_call_rcu_data
) {
790 if ((uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_STOPPED
) == 0) {
791 uatomic_or(&crdp
->flags
, URCU_CALL_RCU_STOP
);
792 wake_call_rcu_thread(crdp
);
793 while ((uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_STOPPED
) == 0)
794 (void) poll(NULL
, 0, 1);
796 call_rcu_lock(&call_rcu_mutex
);
797 if (!cds_wfcq_empty(&crdp
->cbs_head
, &crdp
->cbs_tail
)) {
798 call_rcu_unlock(&call_rcu_mutex
);
799 /* Create default call rcu data if need be. */
800 /* CBs queued here will be handed to the default list. */
801 (void) get_default_call_rcu_data();
802 call_rcu_lock(&call_rcu_mutex
);
803 __cds_wfcq_splice_blocking(&default_call_rcu_data
->cbs_head
,
804 &default_call_rcu_data
->cbs_tail
,
805 &crdp
->cbs_head
, &crdp
->cbs_tail
);
806 uatomic_add(&default_call_rcu_data
->qlen
,
807 uatomic_read(&crdp
->qlen
));
808 wake_call_rcu_thread(default_call_rcu_data
);
811 cds_list_del(&crdp
->list
);
812 call_rcu_unlock(&call_rcu_mutex
);
814 if (flags
& CRDF_FLAG_JOIN_THREAD
) {
817 ret
= pthread_join(get_call_rcu_thread(crdp
), NULL
);
823 URCU_ATTR_ALIAS(urcu_stringify(call_rcu_data_free
))
824 void alias_call_rcu_data_free();
826 void call_rcu_data_free(struct call_rcu_data
*crdp
)
828 _call_rcu_data_free(crdp
, CRDF_FLAG_JOIN_THREAD
);
832 * Clean up all the per-CPU call_rcu threads.
834 void free_all_cpu_call_rcu_data(void)
837 struct call_rcu_data
**crdp
;
838 static int warned
= 0;
840 if (cpus_array_len
<= 0)
843 crdp
= malloc(sizeof(*crdp
) * cpus_array_len
);
846 fprintf(stderr
, "[error] liburcu: unable to allocate per-CPU pointer array\n");
852 for (cpu
= 0; cpu
< cpus_array_len
; cpu
++) {
853 crdp
[cpu
] = get_cpu_call_rcu_data(cpu
);
854 if (crdp
[cpu
] == NULL
)
856 set_cpu_call_rcu_data(cpu
, NULL
);
859 * Wait for call_rcu sites acting as RCU readers of the
860 * call_rcu_data to become quiescent.
863 for (cpu
= 0; cpu
< cpus_array_len
; cpu
++) {
864 if (crdp
[cpu
] == NULL
)
866 call_rcu_data_free(crdp
[cpu
]);
871 /* ABI6 has a non-namespaced free_all_cpu_call_rcu_data for qsbr */
872 #undef free_all_cpu_call_rcu_data
873 URCU_ATTR_ALIAS("urcu_qsbr_free_all_cpu_call_rcu_data")
874 void free_all_cpu_call_rcu_data();
875 #define free_all_cpu_call_rcu_data urcu_qsbr_free_all_cpu_call_rcu_data
877 URCU_ATTR_ALIAS(urcu_stringify(free_all_cpu_call_rcu_data
))
878 void alias_free_all_cpu_call_rcu_data();
882 void free_completion(struct urcu_ref
*ref
)
884 struct call_rcu_completion
*completion
;
886 completion
= caa_container_of(ref
, struct call_rcu_completion
, ref
);
891 void _rcu_barrier_complete(struct rcu_head
*head
)
893 struct call_rcu_completion_work
*work
;
894 struct call_rcu_completion
*completion
;
896 work
= caa_container_of(head
, struct call_rcu_completion_work
, head
);
897 completion
= work
->completion
;
898 if (!uatomic_sub_return(&completion
->barrier_count
, 1))
899 call_rcu_completion_wake_up(completion
);
900 urcu_ref_put(&completion
->ref
, free_completion
);
905 * Wait for all in-flight call_rcu callbacks to complete execution.
907 void rcu_barrier(void)
909 struct call_rcu_data
*crdp
;
910 struct call_rcu_completion
*completion
;
914 /* Put in offline state in QSBR. */
915 was_online
= _rcu_read_ongoing();
917 rcu_thread_offline();
919 * Calling a rcu_barrier() within a RCU read-side critical
920 * section is an error.
922 if (_rcu_read_ongoing()) {
923 static int warned
= 0;
926 fprintf(stderr
, "[error] liburcu: rcu_barrier() called from within RCU read-side critical section.\n");
932 completion
= calloc(sizeof(*completion
), 1);
936 call_rcu_lock(&call_rcu_mutex
);
937 cds_list_for_each_entry(crdp
, &call_rcu_data_list
, list
)
940 /* Referenced by rcu_barrier() and each call_rcu thread. */
941 urcu_ref_set(&completion
->ref
, count
+ 1);
942 completion
->barrier_count
= count
;
944 cds_list_for_each_entry(crdp
, &call_rcu_data_list
, list
) {
945 struct call_rcu_completion_work
*work
;
947 work
= calloc(sizeof(*work
), 1);
950 work
->completion
= completion
;
951 _call_rcu(&work
->head
, _rcu_barrier_complete
, crdp
);
953 call_rcu_unlock(&call_rcu_mutex
);
957 uatomic_dec(&completion
->futex
);
958 /* Decrement futex before reading barrier_count */
960 if (!uatomic_read(&completion
->barrier_count
))
962 call_rcu_completion_wait(completion
);
965 urcu_ref_put(&completion
->ref
, free_completion
);
971 URCU_ATTR_ALIAS(urcu_stringify(rcu_barrier
))
972 void alias_rcu_barrier();
975 * Acquire the call_rcu_mutex in order to ensure that the child sees
976 * all of the call_rcu() data structures in a consistent state. Ensure
977 * that all call_rcu threads are in a quiescent state across fork.
978 * Suitable for pthread_atfork() and friends.
980 void call_rcu_before_fork(void)
982 struct call_rcu_data
*crdp
;
983 struct urcu_atfork
*atfork
;
985 call_rcu_lock(&call_rcu_mutex
);
987 atfork
= registered_rculfhash_atfork
;
989 atfork
->before_fork(atfork
->priv
);
991 cds_list_for_each_entry(crdp
, &call_rcu_data_list
, list
) {
992 uatomic_or(&crdp
->flags
, URCU_CALL_RCU_PAUSE
);
993 cmm_smp_mb__after_uatomic_or();
994 wake_call_rcu_thread(crdp
);
996 cds_list_for_each_entry(crdp
, &call_rcu_data_list
, list
) {
997 while ((uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_PAUSED
) == 0)
998 (void) poll(NULL
, 0, 1);
1001 URCU_ATTR_ALIAS(urcu_stringify(call_rcu_before_fork
))
1002 void alias_call_rcu_before_fork();
1005 * Clean up call_rcu data structures in the parent of a successful fork()
1006 * that is not followed by exec() in the child. Suitable for
1007 * pthread_atfork() and friends.
1009 void call_rcu_after_fork_parent(void)
1011 struct call_rcu_data
*crdp
;
1012 struct urcu_atfork
*atfork
;
1014 cds_list_for_each_entry(crdp
, &call_rcu_data_list
, list
)
1015 uatomic_and(&crdp
->flags
, ~URCU_CALL_RCU_PAUSE
);
1016 cds_list_for_each_entry(crdp
, &call_rcu_data_list
, list
) {
1017 while ((uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_PAUSED
) != 0)
1018 (void) poll(NULL
, 0, 1);
1020 atfork
= registered_rculfhash_atfork
;
1022 atfork
->after_fork_parent(atfork
->priv
);
1023 call_rcu_unlock(&call_rcu_mutex
);
1025 URCU_ATTR_ALIAS(urcu_stringify(call_rcu_after_fork_parent
))
1026 void alias_call_rcu_after_fork_parent();
1029 * Clean up call_rcu data structures in the child of a successful fork()
1030 * that is not followed by exec(). Suitable for pthread_atfork() and
1033 void call_rcu_after_fork_child(void)
1035 struct call_rcu_data
*crdp
, *next
;
1036 struct urcu_atfork
*atfork
;
1038 /* Release the mutex. */
1039 call_rcu_unlock(&call_rcu_mutex
);
1041 atfork
= registered_rculfhash_atfork
;
1043 atfork
->after_fork_child(atfork
->priv
);
1045 /* Do nothing when call_rcu() has not been used */
1046 if (cds_list_empty(&call_rcu_data_list
))
1050 * Allocate a new default call_rcu_data structure in order
1051 * to get a working call_rcu thread to go with it.
1053 default_call_rcu_data
= NULL
;
1054 (void)get_default_call_rcu_data();
1056 /* Cleanup call_rcu_data pointers before use */
1057 cpus_array_len_reset();
1058 free(per_cpu_call_rcu_data
);
1059 rcu_set_pointer(&per_cpu_call_rcu_data
, NULL
);
1060 URCU_TLS(thread_call_rcu_data
) = NULL
;
1063 * Dispose of all of the rest of the call_rcu_data structures.
1064 * Leftover call_rcu callbacks will be merged into the new
1065 * default call_rcu thread queue.
1067 cds_list_for_each_entry_safe(crdp
, next
, &call_rcu_data_list
, list
) {
1068 if (crdp
== default_call_rcu_data
)
1070 uatomic_set(&crdp
->flags
, URCU_CALL_RCU_STOPPED
);
1072 * Do not join the thread because it does not exist in
1075 _call_rcu_data_free(crdp
, 0);
1078 URCU_ATTR_ALIAS(urcu_stringify(call_rcu_after_fork_child
))
1079 void alias_call_rcu_after_fork_child();
1081 void urcu_register_rculfhash_atfork(struct urcu_atfork
*atfork
)
1083 if (CMM_LOAD_SHARED(registered_rculfhash_atfork
))
1085 call_rcu_lock(&call_rcu_mutex
);
1086 if (!registered_rculfhash_atfork
)
1087 registered_rculfhash_atfork
= atfork
;
1088 call_rcu_unlock(&call_rcu_mutex
);
1090 URCU_ATTR_ALIAS(urcu_stringify(urcu_register_rculfhash_atfork
))
1091 void alias_urcu_register_rculfhash_atfork();
1094 * This unregistration function is deprecated, meant only for internal
1097 __attribute__((noreturn
))
1098 void urcu_unregister_rculfhash_atfork(struct urcu_atfork
*atfork
__attribute__((unused
)))
1102 URCU_ATTR_ALIAS(urcu_stringify(urcu_unregister_rculfhash_atfork
))
1103 __attribute__((noreturn
))
1104 void alias_urcu_unregister_rculfhash_atfork();