4 * Userspace RCU library - batch memory reclamation with kernel API
6 * Copyright (c) 2010 Paul E. McKenney <paulmck@linux.vnet.ibm.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
37 #include "compat-getcpu.h"
38 #include "urcu/wfcqueue.h"
39 #include "urcu-call-rcu.h"
40 #include "urcu-pointer.h"
41 #include "urcu/list.h"
42 #include "urcu/futex.h"
43 #include "urcu/tls-compat.h"
47 #define SET_AFFINITY_CHECK_PERIOD (1U << 8) /* 256 */
48 #define SET_AFFINITY_CHECK_PERIOD_MASK (SET_AFFINITY_CHECK_PERIOD - 1)
50 /* Data structure that identifies a call_rcu thread. */
52 struct call_rcu_data
{
54 * We do not align head on a different cache-line than tail
55 * mainly because call_rcu callback-invocation threads use
56 * batching ("splice") to get an entire list of callbacks, which
57 * effectively empties the queue, and requires to touch the tail
60 struct cds_wfcq_tail cbs_tail
;
61 struct cds_wfcq_head cbs_head
;
64 unsigned long qlen
; /* maintained for debugging. */
67 unsigned long gp_count
;
68 struct cds_list_head list
;
69 } __attribute__((aligned(CAA_CACHE_LINE_SIZE
)));
71 struct call_rcu_completion
{
77 struct call_rcu_completion_work
{
79 struct call_rcu_completion
*completion
;
83 * List of all call_rcu_data structures to keep valgrind happy.
84 * Protected by call_rcu_mutex.
87 static CDS_LIST_HEAD(call_rcu_data_list
);
89 /* Link a thread using call_rcu() to its call_rcu thread. */
91 static DEFINE_URCU_TLS(struct call_rcu_data
*, thread_call_rcu_data
);
94 * Guard call_rcu thread creation and atfork handlers.
96 static pthread_mutex_t call_rcu_mutex
= PTHREAD_MUTEX_INITIALIZER
;
98 /* If a given thread does not have its own call_rcu thread, this is default. */
100 static struct call_rcu_data
*default_call_rcu_data
;
103 * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are
104 * available, then we can have call_rcu threads assigned to individual
105 * CPUs rather than only to specific threads.
108 #if defined(HAVE_SYSCONF) && (defined(HAVE_SCHED_GETCPU) || defined(HAVE_GETCPUID))
111 * Pointer to array of pointers to per-CPU call_rcu_data structures
112 * and # CPUs. per_cpu_call_rcu_data is a RCU-protected pointer to an
113 * array of RCU-protected pointers to call_rcu_data. call_rcu acts as a
114 * RCU read-side and reads per_cpu_call_rcu_data and the per-cpu pointer
115 * without mutex. The call_rcu_mutex protects updates.
118 static struct call_rcu_data
**per_cpu_call_rcu_data
;
121 static void maxcpus_reset(void)
126 /* Allocate the array if it has not already been allocated. */
128 static void alloc_cpu_call_rcu_data(void)
130 struct call_rcu_data
**p
;
131 static int warned
= 0;
135 maxcpus
= sysconf(_SC_NPROCESSORS_CONF
);
139 p
= malloc(maxcpus
* sizeof(*per_cpu_call_rcu_data
));
141 memset(p
, '\0', maxcpus
* sizeof(*per_cpu_call_rcu_data
));
142 rcu_set_pointer(&per_cpu_call_rcu_data
, p
);
145 fprintf(stderr
, "[error] liburcu: unable to allocate per-CPU pointer array\n");
151 #else /* #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) */
154 * per_cpu_call_rcu_data should be constant, but some functions below, used both
155 * for cases where cpu number is available and not available, assume it it not
158 static struct call_rcu_data
**per_cpu_call_rcu_data
= NULL
;
159 static const long maxcpus
= -1;
161 static void maxcpus_reset(void)
165 static void alloc_cpu_call_rcu_data(void)
169 #endif /* #else #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) */
171 /* Acquire the specified pthread mutex. */
173 static void call_rcu_lock(pthread_mutex_t
*pmp
)
177 ret
= pthread_mutex_lock(pmp
);
182 /* Release the specified pthread mutex. */
184 static void call_rcu_unlock(pthread_mutex_t
*pmp
)
188 ret
= pthread_mutex_unlock(pmp
);
194 * Periodically retry setting CPU affinity if we migrate.
195 * Losing affinity can be caused by CPU hotunplug/hotplug, or by
198 #if HAVE_SCHED_SETAFFINITY
200 int set_thread_cpu_affinity(struct call_rcu_data
*crdp
)
205 if (crdp
->cpu_affinity
< 0)
207 if (++crdp
->gp_count
& SET_AFFINITY_CHECK_PERIOD_MASK
)
209 if (urcu_sched_getcpu() == crdp
->cpu_affinity
)
213 CPU_SET(crdp
->cpu_affinity
, &mask
);
214 #if SCHED_SETAFFINITY_ARGS == 2
215 ret
= sched_setaffinity(0, &mask
);
217 ret
= sched_setaffinity(0, sizeof(mask
), &mask
);
220 * EINVAL is fine: can be caused by hotunplugged CPUs, or by
221 * cpuset(7). This is why we should always retry if we detect
224 if (ret
&& errno
== EINVAL
) {
232 int set_thread_cpu_affinity(struct call_rcu_data
*crdp
)
238 static void call_rcu_wait(struct call_rcu_data
*crdp
)
240 /* Read call_rcu list before read futex */
242 if (uatomic_read(&crdp
->futex
) != -1)
244 while (futex_async(&crdp
->futex
, FUTEX_WAIT
, -1,
248 /* Value already changed. */
251 /* Retry if interrupted by signal. */
252 break; /* Get out of switch. */
254 /* Unexpected error. */
260 static void call_rcu_wake_up(struct call_rcu_data
*crdp
)
262 /* Write to call_rcu list before reading/writing futex */
264 if (caa_unlikely(uatomic_read(&crdp
->futex
) == -1)) {
265 uatomic_set(&crdp
->futex
, 0);
266 if (futex_async(&crdp
->futex
, FUTEX_WAKE
, 1,
272 static void call_rcu_completion_wait(struct call_rcu_completion
*completion
)
274 /* Read completion barrier count before read futex */
276 if (uatomic_read(&completion
->futex
) != -1)
278 while (futex_async(&completion
->futex
, FUTEX_WAIT
, -1,
282 /* Value already changed. */
285 /* Retry if interrupted by signal. */
286 break; /* Get out of switch. */
288 /* Unexpected error. */
294 static void call_rcu_completion_wake_up(struct call_rcu_completion
*completion
)
296 /* Write to completion barrier count before reading/writing futex */
298 if (caa_unlikely(uatomic_read(&completion
->futex
) == -1)) {
299 uatomic_set(&completion
->futex
, 0);
300 if (futex_async(&completion
->futex
, FUTEX_WAKE
, 1,
306 /* This is the code run by each call_rcu thread. */
308 static void *call_rcu_thread(void *arg
)
310 unsigned long cbcount
;
311 struct call_rcu_data
*crdp
= (struct call_rcu_data
*) arg
;
312 int rt
= !!(uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_RT
);
314 if (set_thread_cpu_affinity(crdp
))
318 * If callbacks take a read-side lock, we need to be registered.
320 rcu_register_thread();
322 URCU_TLS(thread_call_rcu_data
) = crdp
;
324 uatomic_dec(&crdp
->futex
);
325 /* Decrement futex before reading call_rcu list */
329 struct cds_wfcq_head cbs_tmp_head
;
330 struct cds_wfcq_tail cbs_tmp_tail
;
331 struct cds_wfcq_node
*cbs
, *cbs_tmp_n
;
332 enum cds_wfcq_ret splice_ret
;
334 if (set_thread_cpu_affinity(crdp
))
337 if (uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_PAUSE
) {
339 * Pause requested. Become quiescent: remove
340 * ourself from all global lists, and don't
341 * process any callback. The callback lists may
342 * still be non-empty though.
344 rcu_unregister_thread();
345 cmm_smp_mb__before_uatomic_or();
346 uatomic_or(&crdp
->flags
, URCU_CALL_RCU_PAUSED
);
347 while ((uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_PAUSE
) != 0)
348 (void) poll(NULL
, 0, 1);
349 uatomic_and(&crdp
->flags
, ~URCU_CALL_RCU_PAUSED
);
350 cmm_smp_mb__after_uatomic_and();
351 rcu_register_thread();
354 cds_wfcq_init(&cbs_tmp_head
, &cbs_tmp_tail
);
355 splice_ret
= __cds_wfcq_splice_blocking(&cbs_tmp_head
,
356 &cbs_tmp_tail
, &crdp
->cbs_head
, &crdp
->cbs_tail
);
357 assert(splice_ret
!= CDS_WFCQ_RET_WOULDBLOCK
);
358 assert(splice_ret
!= CDS_WFCQ_RET_DEST_NON_EMPTY
);
359 if (splice_ret
!= CDS_WFCQ_RET_SRC_EMPTY
) {
362 __cds_wfcq_for_each_blocking_safe(&cbs_tmp_head
,
363 &cbs_tmp_tail
, cbs
, cbs_tmp_n
) {
364 struct rcu_head
*rhp
;
366 rhp
= caa_container_of(cbs
,
367 struct rcu_head
, next
);
371 uatomic_sub(&crdp
->qlen
, cbcount
);
373 if (uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_STOP
)
375 rcu_thread_offline();
377 if (cds_wfcq_empty(&crdp
->cbs_head
,
380 (void) poll(NULL
, 0, 10);
381 uatomic_dec(&crdp
->futex
);
383 * Decrement futex before reading
388 (void) poll(NULL
, 0, 10);
391 (void) poll(NULL
, 0, 10);
397 * Read call_rcu list before write futex.
400 uatomic_set(&crdp
->futex
, 0);
402 uatomic_or(&crdp
->flags
, URCU_CALL_RCU_STOPPED
);
403 rcu_unregister_thread();
408 * Create both a call_rcu thread and the corresponding call_rcu_data
409 * structure, linking the structure in as specified. Caller must hold
413 static void call_rcu_data_init(struct call_rcu_data
**crdpp
,
417 struct call_rcu_data
*crdp
;
420 crdp
= malloc(sizeof(*crdp
));
423 memset(crdp
, '\0', sizeof(*crdp
));
424 cds_wfcq_init(&crdp
->cbs_head
, &crdp
->cbs_tail
);
428 cds_list_add(&crdp
->list
, &call_rcu_data_list
);
429 crdp
->cpu_affinity
= cpu_affinity
;
431 cmm_smp_mb(); /* Structure initialized before pointer is planted. */
433 ret
= pthread_create(&crdp
->tid
, NULL
, call_rcu_thread
, crdp
);
439 * Return a pointer to the call_rcu_data structure for the specified
440 * CPU, returning NULL if there is none. We cannot automatically
441 * created it because the platform we are running on might not define
442 * urcu_sched_getcpu().
444 * The call to this function and use of the returned call_rcu_data
445 * should be protected by RCU read-side lock.
448 struct call_rcu_data
*get_cpu_call_rcu_data(int cpu
)
450 static int warned
= 0;
451 struct call_rcu_data
**pcpu_crdp
;
453 pcpu_crdp
= rcu_dereference(per_cpu_call_rcu_data
);
454 if (pcpu_crdp
== NULL
)
456 if (!warned
&& maxcpus
> 0 && (cpu
< 0 || maxcpus
<= cpu
)) {
457 fprintf(stderr
, "[error] liburcu: get CPU # out of range\n");
460 if (cpu
< 0 || maxcpus
<= cpu
)
462 return rcu_dereference(pcpu_crdp
[cpu
]);
466 * Return the tid corresponding to the call_rcu thread whose
467 * call_rcu_data structure is specified.
470 pthread_t
get_call_rcu_thread(struct call_rcu_data
*crdp
)
476 * Create a call_rcu_data structure (with thread) and return a pointer.
479 static struct call_rcu_data
*__create_call_rcu_data(unsigned long flags
,
482 struct call_rcu_data
*crdp
;
484 call_rcu_data_init(&crdp
, flags
, cpu_affinity
);
488 struct call_rcu_data
*create_call_rcu_data(unsigned long flags
,
491 struct call_rcu_data
*crdp
;
493 call_rcu_lock(&call_rcu_mutex
);
494 crdp
= __create_call_rcu_data(flags
, cpu_affinity
);
495 call_rcu_unlock(&call_rcu_mutex
);
500 * Set the specified CPU to use the specified call_rcu_data structure.
502 * Use NULL to remove a CPU's call_rcu_data structure, but it is
503 * the caller's responsibility to dispose of the removed structure.
504 * Use get_cpu_call_rcu_data() to obtain a pointer to the old structure
505 * (prior to NULLing it out, of course).
507 * The caller must wait for a grace-period to pass between return from
508 * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the
509 * previous call rcu data as argument.
512 int set_cpu_call_rcu_data(int cpu
, struct call_rcu_data
*crdp
)
514 static int warned
= 0;
516 call_rcu_lock(&call_rcu_mutex
);
517 alloc_cpu_call_rcu_data();
518 if (cpu
< 0 || maxcpus
<= cpu
) {
520 fprintf(stderr
, "[error] liburcu: set CPU # out of range\n");
523 call_rcu_unlock(&call_rcu_mutex
);
528 if (per_cpu_call_rcu_data
== NULL
) {
529 call_rcu_unlock(&call_rcu_mutex
);
534 if (per_cpu_call_rcu_data
[cpu
] != NULL
&& crdp
!= NULL
) {
535 call_rcu_unlock(&call_rcu_mutex
);
540 rcu_set_pointer(&per_cpu_call_rcu_data
[cpu
], crdp
);
541 call_rcu_unlock(&call_rcu_mutex
);
546 * Return a pointer to the default call_rcu_data structure, creating
547 * one if need be. Because we never free call_rcu_data structures,
548 * we don't need to be in an RCU read-side critical section.
551 struct call_rcu_data
*get_default_call_rcu_data(void)
553 if (default_call_rcu_data
!= NULL
)
554 return rcu_dereference(default_call_rcu_data
);
555 call_rcu_lock(&call_rcu_mutex
);
556 if (default_call_rcu_data
!= NULL
) {
557 call_rcu_unlock(&call_rcu_mutex
);
558 return default_call_rcu_data
;
560 call_rcu_data_init(&default_call_rcu_data
, 0, -1);
561 call_rcu_unlock(&call_rcu_mutex
);
562 return default_call_rcu_data
;
566 * Return the call_rcu_data structure that applies to the currently
567 * running thread. Any call_rcu_data structure assigned specifically
568 * to this thread has first priority, followed by any call_rcu_data
569 * structure assigned to the CPU on which the thread is running,
570 * followed by the default call_rcu_data structure. If there is not
571 * yet a default call_rcu_data structure, one will be created.
573 * Calls to this function and use of the returned call_rcu_data should
574 * be protected by RCU read-side lock.
576 struct call_rcu_data
*get_call_rcu_data(void)
578 struct call_rcu_data
*crd
;
580 if (URCU_TLS(thread_call_rcu_data
) != NULL
)
581 return URCU_TLS(thread_call_rcu_data
);
584 crd
= get_cpu_call_rcu_data(urcu_sched_getcpu());
589 return get_default_call_rcu_data();
593 * Return a pointer to this task's call_rcu_data if there is one.
596 struct call_rcu_data
*get_thread_call_rcu_data(void)
598 return URCU_TLS(thread_call_rcu_data
);
602 * Set this task's call_rcu_data structure as specified, regardless
603 * of whether or not this task already had one. (This allows switching
604 * to and from real-time call_rcu threads, for example.)
606 * Use NULL to remove a thread's call_rcu_data structure, but it is
607 * the caller's responsibility to dispose of the removed structure.
608 * Use get_thread_call_rcu_data() to obtain a pointer to the old structure
609 * (prior to NULLing it out, of course).
612 void set_thread_call_rcu_data(struct call_rcu_data
*crdp
)
614 URCU_TLS(thread_call_rcu_data
) = crdp
;
618 * Create a separate call_rcu thread for each CPU. This does not
619 * replace a pre-existing call_rcu thread -- use the set_cpu_call_rcu_data()
620 * function if you want that behavior. Should be paired with
621 * free_all_cpu_call_rcu_data() to teardown these call_rcu worker
625 int create_all_cpu_call_rcu_data(unsigned long flags
)
628 struct call_rcu_data
*crdp
;
631 call_rcu_lock(&call_rcu_mutex
);
632 alloc_cpu_call_rcu_data();
633 call_rcu_unlock(&call_rcu_mutex
);
638 if (per_cpu_call_rcu_data
== NULL
) {
642 for (i
= 0; i
< maxcpus
; i
++) {
643 call_rcu_lock(&call_rcu_mutex
);
644 if (get_cpu_call_rcu_data(i
)) {
645 call_rcu_unlock(&call_rcu_mutex
);
648 crdp
= __create_call_rcu_data(flags
, i
);
650 call_rcu_unlock(&call_rcu_mutex
);
654 call_rcu_unlock(&call_rcu_mutex
);
655 if ((ret
= set_cpu_call_rcu_data(i
, crdp
)) != 0) {
656 call_rcu_data_free(crdp
);
658 /* it has been created by other thread */
669 * Wake up the call_rcu thread corresponding to the specified
670 * call_rcu_data structure.
672 static void wake_call_rcu_thread(struct call_rcu_data
*crdp
)
674 if (!(_CMM_LOAD_SHARED(crdp
->flags
) & URCU_CALL_RCU_RT
))
675 call_rcu_wake_up(crdp
);
678 static void _call_rcu(struct rcu_head
*head
,
679 void (*func
)(struct rcu_head
*head
),
680 struct call_rcu_data
*crdp
)
682 cds_wfcq_node_init(&head
->next
);
684 cds_wfcq_enqueue(&crdp
->cbs_head
, &crdp
->cbs_tail
, &head
->next
);
685 uatomic_inc(&crdp
->qlen
);
686 wake_call_rcu_thread(crdp
);
690 * Schedule a function to be invoked after a following grace period.
691 * This is the only function that must be called -- the others are
692 * only present to allow applications to tune their use of RCU for
693 * maximum performance.
695 * Note that unless a call_rcu thread has not already been created,
696 * the first invocation of call_rcu() will create one. So, if you
697 * need the first invocation of call_rcu() to be fast, make sure
698 * to create a call_rcu thread first. One way to accomplish this is
699 * "get_call_rcu_data();", and another is create_all_cpu_call_rcu_data().
701 * call_rcu must be called by registered RCU read-side threads.
703 void call_rcu(struct rcu_head
*head
,
704 void (*func
)(struct rcu_head
*head
))
706 struct call_rcu_data
*crdp
;
708 /* Holding rcu read-side lock across use of per-cpu crdp */
710 crdp
= get_call_rcu_data();
711 _call_rcu(head
, func
, crdp
);
716 * Free up the specified call_rcu_data structure, terminating the
717 * associated call_rcu thread. The caller must have previously
718 * removed the call_rcu_data structure from per-thread or per-CPU
719 * usage. For example, set_cpu_call_rcu_data(cpu, NULL) for per-CPU
720 * call_rcu_data structures or set_thread_call_rcu_data(NULL) for
721 * per-thread call_rcu_data structures.
723 * We silently refuse to free up the default call_rcu_data structure
724 * because that is where we put any leftover callbacks. Note that
725 * the possibility of self-spawning callbacks makes it impossible
726 * to execute all the callbacks in finite time without putting any
727 * newly spawned callbacks somewhere else. The "somewhere else" of
728 * last resort is the default call_rcu_data structure.
730 * We also silently refuse to free NULL pointers. This simplifies
733 * The caller must wait for a grace-period to pass between return from
734 * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the
735 * previous call rcu data as argument.
737 * Note: introducing __cds_wfcq_splice_blocking() in this function fixed
738 * a list corruption bug in the 0.7.x series. The equivalent fix
739 * appeared in 0.6.8 for the stable-0.6 branch.
741 void call_rcu_data_free(struct call_rcu_data
*crdp
)
743 if (crdp
== NULL
|| crdp
== default_call_rcu_data
) {
746 if ((uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_STOPPED
) == 0) {
747 uatomic_or(&crdp
->flags
, URCU_CALL_RCU_STOP
);
748 wake_call_rcu_thread(crdp
);
749 while ((uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_STOPPED
) == 0)
750 (void) poll(NULL
, 0, 1);
752 if (!cds_wfcq_empty(&crdp
->cbs_head
, &crdp
->cbs_tail
)) {
753 /* Create default call rcu data if need be */
754 (void) get_default_call_rcu_data();
755 __cds_wfcq_splice_blocking(&default_call_rcu_data
->cbs_head
,
756 &default_call_rcu_data
->cbs_tail
,
757 &crdp
->cbs_head
, &crdp
->cbs_tail
);
758 uatomic_add(&default_call_rcu_data
->qlen
,
759 uatomic_read(&crdp
->qlen
));
760 wake_call_rcu_thread(default_call_rcu_data
);
763 call_rcu_lock(&call_rcu_mutex
);
764 cds_list_del(&crdp
->list
);
765 call_rcu_unlock(&call_rcu_mutex
);
771 * Clean up all the per-CPU call_rcu threads.
773 void free_all_cpu_call_rcu_data(void)
776 struct call_rcu_data
**crdp
;
777 static int warned
= 0;
782 crdp
= malloc(sizeof(*crdp
) * maxcpus
);
785 fprintf(stderr
, "[error] liburcu: unable to allocate per-CPU pointer array\n");
791 for (cpu
= 0; cpu
< maxcpus
; cpu
++) {
792 crdp
[cpu
] = get_cpu_call_rcu_data(cpu
);
793 if (crdp
[cpu
] == NULL
)
795 set_cpu_call_rcu_data(cpu
, NULL
);
798 * Wait for call_rcu sites acting as RCU readers of the
799 * call_rcu_data to become quiescent.
802 for (cpu
= 0; cpu
< maxcpus
; cpu
++) {
803 if (crdp
[cpu
] == NULL
)
805 call_rcu_data_free(crdp
[cpu
]);
811 void free_completion(struct urcu_ref
*ref
)
813 struct call_rcu_completion
*completion
;
815 completion
= caa_container_of(ref
, struct call_rcu_completion
, ref
);
820 void _rcu_barrier_complete(struct rcu_head
*head
)
822 struct call_rcu_completion_work
*work
;
823 struct call_rcu_completion
*completion
;
825 work
= caa_container_of(head
, struct call_rcu_completion_work
, head
);
826 completion
= work
->completion
;
827 if (!uatomic_sub_return(&completion
->barrier_count
, 1))
828 call_rcu_completion_wake_up(completion
);
829 urcu_ref_put(&completion
->ref
, free_completion
);
834 * Wait for all in-flight call_rcu callbacks to complete execution.
836 void rcu_barrier(void)
838 struct call_rcu_data
*crdp
;
839 struct call_rcu_completion
*completion
;
843 /* Put in offline state in QSBR. */
844 was_online
= _rcu_read_ongoing();
846 rcu_thread_offline();
848 * Calling a rcu_barrier() within a RCU read-side critical
849 * section is an error.
851 if (_rcu_read_ongoing()) {
852 static int warned
= 0;
855 fprintf(stderr
, "[error] liburcu: rcu_barrier() called from within RCU read-side critical section.\n");
861 completion
= calloc(sizeof(*completion
), 1);
865 call_rcu_lock(&call_rcu_mutex
);
866 cds_list_for_each_entry(crdp
, &call_rcu_data_list
, list
)
869 /* Referenced by rcu_barrier() and each call_rcu thread. */
870 urcu_ref_set(&completion
->ref
, count
+ 1);
871 completion
->barrier_count
= count
;
873 cds_list_for_each_entry(crdp
, &call_rcu_data_list
, list
) {
874 struct call_rcu_completion_work
*work
;
876 work
= calloc(sizeof(*work
), 1);
879 work
->completion
= completion
;
880 _call_rcu(&work
->head
, _rcu_barrier_complete
, crdp
);
882 call_rcu_unlock(&call_rcu_mutex
);
886 uatomic_dec(&completion
->futex
);
887 /* Decrement futex before reading barrier_count */
889 if (!uatomic_read(&completion
->barrier_count
))
891 call_rcu_completion_wait(completion
);
894 urcu_ref_put(&completion
->ref
, free_completion
);
902 * Acquire the call_rcu_mutex in order to ensure that the child sees
903 * all of the call_rcu() data structures in a consistent state. Ensure
904 * that all call_rcu threads are in a quiescent state across fork.
905 * Suitable for pthread_atfork() and friends.
907 void call_rcu_before_fork(void)
909 struct call_rcu_data
*crdp
;
911 call_rcu_lock(&call_rcu_mutex
);
913 cds_list_for_each_entry(crdp
, &call_rcu_data_list
, list
) {
914 uatomic_or(&crdp
->flags
, URCU_CALL_RCU_PAUSE
);
915 cmm_smp_mb__after_uatomic_or();
916 wake_call_rcu_thread(crdp
);
918 cds_list_for_each_entry(crdp
, &call_rcu_data_list
, list
) {
919 while ((uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_PAUSED
) == 0)
920 (void) poll(NULL
, 0, 1);
925 * Clean up call_rcu data structures in the parent of a successful fork()
926 * that is not followed by exec() in the child. Suitable for
927 * pthread_atfork() and friends.
929 void call_rcu_after_fork_parent(void)
931 struct call_rcu_data
*crdp
;
933 cds_list_for_each_entry(crdp
, &call_rcu_data_list
, list
)
934 uatomic_and(&crdp
->flags
, ~URCU_CALL_RCU_PAUSE
);
935 cds_list_for_each_entry(crdp
, &call_rcu_data_list
, list
) {
936 while ((uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_PAUSED
) != 0)
937 (void) poll(NULL
, 0, 1);
939 call_rcu_unlock(&call_rcu_mutex
);
943 * Clean up call_rcu data structures in the child of a successful fork()
944 * that is not followed by exec(). Suitable for pthread_atfork() and
947 void call_rcu_after_fork_child(void)
949 struct call_rcu_data
*crdp
, *next
;
951 /* Release the mutex. */
952 call_rcu_unlock(&call_rcu_mutex
);
954 /* Do nothing when call_rcu() has not been used */
955 if (cds_list_empty(&call_rcu_data_list
))
959 * Allocate a new default call_rcu_data structure in order
960 * to get a working call_rcu thread to go with it.
962 default_call_rcu_data
= NULL
;
963 (void)get_default_call_rcu_data();
965 /* Cleanup call_rcu_data pointers before use */
967 free(per_cpu_call_rcu_data
);
968 rcu_set_pointer(&per_cpu_call_rcu_data
, NULL
);
969 URCU_TLS(thread_call_rcu_data
) = NULL
;
972 * Dispose of all of the rest of the call_rcu_data structures.
973 * Leftover call_rcu callbacks will be merged into the new
974 * default call_rcu thread queue.
976 cds_list_for_each_entry_safe(crdp
, next
, &call_rcu_data_list
, list
) {
977 if (crdp
== default_call_rcu_data
)
979 uatomic_set(&crdp
->flags
, URCU_CALL_RCU_STOPPED
);
980 call_rcu_data_free(crdp
);