4 * Userspace RCU library - batch memory reclamation with kernel API
6 * Copyright (c) 2010 Paul E. McKenney <paulmck@linux.vnet.ibm.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
38 #include "urcu/wfqueue.h"
39 #include "urcu-call-rcu.h"
40 #include "urcu-pointer.h"
41 #include "urcu/list.h"
42 #include "urcu/futex.h"
44 /* Data structure that identifies a call_rcu thread. */
46 struct call_rcu_data
{
47 struct cds_wfq_queue cbs
;
50 unsigned long qlen
; /* maintained for debugging. */
53 struct cds_list_head list
;
54 } __attribute__((aligned(CAA_CACHE_LINE_SIZE
)));
57 * List of all call_rcu_data structures to keep valgrind happy.
58 * Protected by call_rcu_mutex.
61 CDS_LIST_HEAD(call_rcu_data_list
);
63 /* Link a thread using call_rcu() to its call_rcu thread. */
65 static __thread
struct call_rcu_data
*thread_call_rcu_data
;
67 /* Guard call_rcu thread creation. */
69 static pthread_mutex_t call_rcu_mutex
= PTHREAD_MUTEX_INITIALIZER
;
71 /* If a given thread does not have its own call_rcu thread, this is default. */
73 static struct call_rcu_data
*default_call_rcu_data
;
76 * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are
77 * available, then we can have call_rcu threads assigned to individual
78 * CPUs rather than only to specific threads.
81 #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
84 * Pointer to array of pointers to per-CPU call_rcu_data structures
88 static struct call_rcu_data
**per_cpu_call_rcu_data
;
91 static void maxcpus_reset(void)
96 /* Allocate the array if it has not already been allocated. */
98 static void alloc_cpu_call_rcu_data(void)
100 struct call_rcu_data
**p
;
101 static int warned
= 0;
105 maxcpus
= sysconf(_SC_NPROCESSORS_CONF
);
109 p
= malloc(maxcpus
* sizeof(*per_cpu_call_rcu_data
));
111 memset(p
, '\0', maxcpus
* sizeof(*per_cpu_call_rcu_data
));
112 per_cpu_call_rcu_data
= p
;
115 fprintf(stderr
, "[error] liburcu: unable to allocate per-CPU pointer array\n");
121 #else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
124 * per_cpu_call_rcu_data should be constant, but some functions below, used both
125 * for cases where cpu number is available and not available, assume it it not
128 static struct call_rcu_data
**per_cpu_call_rcu_data
= NULL
;
129 static const long maxcpus
= -1;
131 static void maxcpus_reset(void)
135 static void alloc_cpu_call_rcu_data(void)
139 static int sched_getcpu(void)
144 #endif /* #else #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
146 /* Acquire the specified pthread mutex. */
148 static void call_rcu_lock(pthread_mutex_t
*pmp
)
150 if (pthread_mutex_lock(pmp
) != 0) {
151 perror("pthread_mutex_lock");
156 /* Release the specified pthread mutex. */
158 static void call_rcu_unlock(pthread_mutex_t
*pmp
)
160 if (pthread_mutex_unlock(pmp
) != 0) {
161 perror("pthread_mutex_unlock");
166 #if HAVE_SCHED_SETAFFINITY
168 int set_thread_cpu_affinity(struct call_rcu_data
*crdp
)
172 if (crdp
->cpu_affinity
< 0)
176 CPU_SET(crdp
->cpu_affinity
, &mask
);
177 #if SCHED_SETAFFINITY_ARGS == 2
178 return sched_setaffinity(0, &mask
);
180 return sched_setaffinity(0, sizeof(mask
), &mask
);
185 int set_thread_cpu_affinity(struct call_rcu_data
*crdp
)
191 static void call_rcu_wait(struct call_rcu_data
*crdp
)
193 /* Read call_rcu list before read futex */
195 if (uatomic_read(&crdp
->futex
) == -1)
196 futex_async(&crdp
->futex
, FUTEX_WAIT
, -1,
200 static void call_rcu_wake_up(struct call_rcu_data
*crdp
)
202 /* Write to call_rcu list before reading/writing futex */
204 if (unlikely(uatomic_read(&crdp
->futex
) == -1)) {
205 uatomic_set(&crdp
->futex
, 0);
206 futex_async(&crdp
->futex
, FUTEX_WAKE
, 1,
211 /* This is the code run by each call_rcu thread. */
213 static void *call_rcu_thread(void *arg
)
215 unsigned long cbcount
;
216 struct cds_wfq_node
*cbs
;
217 struct cds_wfq_node
**cbs_tail
;
218 struct call_rcu_data
*crdp
= (struct call_rcu_data
*)arg
;
219 struct rcu_head
*rhp
;
220 int rt
= !!(uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_RT
);
222 if (set_thread_cpu_affinity(crdp
) != 0) {
223 perror("pthread_setaffinity_np");
228 * If callbacks take a read-side lock, we need to be registered.
230 rcu_register_thread();
232 thread_call_rcu_data
= crdp
;
234 uatomic_dec(&crdp
->futex
);
235 /* Decrement futex before reading call_rcu list */
239 if (&crdp
->cbs
.head
!= _CMM_LOAD_SHARED(crdp
->cbs
.tail
)) {
240 while ((cbs
= _CMM_LOAD_SHARED(crdp
->cbs
.head
)) == NULL
)
242 _CMM_STORE_SHARED(crdp
->cbs
.head
, NULL
);
243 cbs_tail
= (struct cds_wfq_node
**)
244 uatomic_xchg(&crdp
->cbs
.tail
, &crdp
->cbs
.head
);
248 while (cbs
->next
== NULL
&&
249 &cbs
->next
!= cbs_tail
)
251 if (cbs
== &crdp
->cbs
.dummy
) {
255 rhp
= (struct rcu_head
*)cbs
;
259 } while (cbs
!= NULL
);
260 uatomic_sub(&crdp
->qlen
, cbcount
);
262 if (uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_STOP
)
264 rcu_thread_offline();
267 == _CMM_LOAD_SHARED(crdp
->cbs
.tail
)) {
270 uatomic_dec(&crdp
->futex
);
272 * Decrement futex before reading
286 * Read call_rcu list before write futex.
289 uatomic_set(&crdp
->futex
, 0);
291 uatomic_or(&crdp
->flags
, URCU_CALL_RCU_STOPPED
);
292 rcu_unregister_thread();
297 * Create both a call_rcu thread and the corresponding call_rcu_data
298 * structure, linking the structure in as specified. Caller must hold
302 static void call_rcu_data_init(struct call_rcu_data
**crdpp
,
306 struct call_rcu_data
*crdp
;
308 crdp
= malloc(sizeof(*crdp
));
310 fprintf(stderr
, "Out of memory.\n");
313 memset(crdp
, '\0', sizeof(*crdp
));
314 cds_wfq_init(&crdp
->cbs
);
318 cds_list_add(&crdp
->list
, &call_rcu_data_list
);
319 crdp
->cpu_affinity
= cpu_affinity
;
320 cmm_smp_mb(); /* Structure initialized before pointer is planted. */
322 if (pthread_create(&crdp
->tid
, NULL
, call_rcu_thread
, crdp
) != 0) {
323 perror("pthread_create");
329 * Return a pointer to the call_rcu_data structure for the specified
330 * CPU, returning NULL if there is none. We cannot automatically
331 * created it because the platform we are running on might not define
335 struct call_rcu_data
*get_cpu_call_rcu_data(int cpu
)
337 static int warned
= 0;
339 if (per_cpu_call_rcu_data
== NULL
)
341 if (!warned
&& maxcpus
> 0 && (cpu
< 0 || maxcpus
<= cpu
)) {
342 fprintf(stderr
, "[error] liburcu: get CPU # out of range\n");
345 if (cpu
< 0 || maxcpus
<= cpu
)
347 return per_cpu_call_rcu_data
[cpu
];
351 * Return the tid corresponding to the call_rcu thread whose
352 * call_rcu_data structure is specified.
355 pthread_t
get_call_rcu_thread(struct call_rcu_data
*crdp
)
361 * Create a call_rcu_data structure (with thread) and return a pointer.
364 static struct call_rcu_data
*__create_call_rcu_data(unsigned long flags
,
367 struct call_rcu_data
*crdp
;
369 call_rcu_data_init(&crdp
, flags
, cpu_affinity
);
373 struct call_rcu_data
*create_call_rcu_data(unsigned long flags
,
376 struct call_rcu_data
*crdp
;
378 call_rcu_lock(&call_rcu_mutex
);
379 crdp
= __create_call_rcu_data(flags
, cpu_affinity
);
380 call_rcu_unlock(&call_rcu_mutex
);
385 * Set the specified CPU to use the specified call_rcu_data structure.
387 * Use NULL to remove a CPU's call_rcu_data structure, but it is
388 * the caller's responsibility to dispose of the removed structure.
389 * Use get_cpu_call_rcu_data() to obtain a pointer to the old structure
390 * (prior to NULLing it out, of course).
393 int set_cpu_call_rcu_data(int cpu
, struct call_rcu_data
*crdp
)
395 static int warned
= 0;
397 call_rcu_lock(&call_rcu_mutex
);
398 alloc_cpu_call_rcu_data();
399 if (cpu
< 0 || maxcpus
<= cpu
) {
401 fprintf(stderr
, "[error] liburcu: set CPU # out of range\n");
404 call_rcu_unlock(&call_rcu_mutex
);
409 if (per_cpu_call_rcu_data
== NULL
) {
410 call_rcu_unlock(&call_rcu_mutex
);
415 if (per_cpu_call_rcu_data
[cpu
] != NULL
&& crdp
!= NULL
) {
416 call_rcu_unlock(&call_rcu_mutex
);
421 per_cpu_call_rcu_data
[cpu
] = crdp
;
422 call_rcu_unlock(&call_rcu_mutex
);
427 * Return a pointer to the default call_rcu_data structure, creating
428 * one if need be. Because we never free call_rcu_data structures,
429 * we don't need to be in an RCU read-side critical section.
432 struct call_rcu_data
*get_default_call_rcu_data(void)
434 if (default_call_rcu_data
!= NULL
)
435 return rcu_dereference(default_call_rcu_data
);
436 call_rcu_lock(&call_rcu_mutex
);
437 if (default_call_rcu_data
!= NULL
) {
438 call_rcu_unlock(&call_rcu_mutex
);
439 return default_call_rcu_data
;
441 call_rcu_data_init(&default_call_rcu_data
, 0, -1);
442 call_rcu_unlock(&call_rcu_mutex
);
443 return default_call_rcu_data
;
447 * Return the call_rcu_data structure that applies to the currently
448 * running thread. Any call_rcu_data structure assigned specifically
449 * to this thread has first priority, followed by any call_rcu_data
450 * structure assigned to the CPU on which the thread is running,
451 * followed by the default call_rcu_data structure. If there is not
452 * yet a default call_rcu_data structure, one will be created.
454 struct call_rcu_data
*get_call_rcu_data(void)
456 struct call_rcu_data
*crd
;
458 if (thread_call_rcu_data
!= NULL
)
459 return thread_call_rcu_data
;
462 crd
= get_cpu_call_rcu_data(sched_getcpu());
467 return get_default_call_rcu_data();
471 * Return a pointer to this task's call_rcu_data if there is one.
474 struct call_rcu_data
*get_thread_call_rcu_data(void)
476 return thread_call_rcu_data
;
480 * Set this task's call_rcu_data structure as specified, regardless
481 * of whether or not this task already had one. (This allows switching
482 * to and from real-time call_rcu threads, for example.)
484 * Use NULL to remove a thread's call_rcu_data structure, but it is
485 * the caller's responsibility to dispose of the removed structure.
486 * Use get_thread_call_rcu_data() to obtain a pointer to the old structure
487 * (prior to NULLing it out, of course).
490 void set_thread_call_rcu_data(struct call_rcu_data
*crdp
)
492 thread_call_rcu_data
= crdp
;
496 * Create a separate call_rcu thread for each CPU. This does not
497 * replace a pre-existing call_rcu thread -- use the set_cpu_call_rcu_data()
498 * function if you want that behavior. Should be paired with
499 * free_all_cpu_call_rcu_data() to teardown these call_rcu worker
503 int create_all_cpu_call_rcu_data(unsigned long flags
)
506 struct call_rcu_data
*crdp
;
509 call_rcu_lock(&call_rcu_mutex
);
510 alloc_cpu_call_rcu_data();
511 call_rcu_unlock(&call_rcu_mutex
);
516 if (per_cpu_call_rcu_data
== NULL
) {
520 for (i
= 0; i
< maxcpus
; i
++) {
521 call_rcu_lock(&call_rcu_mutex
);
522 if (get_cpu_call_rcu_data(i
)) {
523 call_rcu_unlock(&call_rcu_mutex
);
526 crdp
= __create_call_rcu_data(flags
, i
);
528 call_rcu_unlock(&call_rcu_mutex
);
532 call_rcu_unlock(&call_rcu_mutex
);
533 if ((ret
= set_cpu_call_rcu_data(i
, crdp
)) != 0) {
534 call_rcu_data_free(crdp
);
536 /* it has been created by other thread */
547 * Wake up the call_rcu thread corresponding to the specified
548 * call_rcu_data structure.
550 static void wake_call_rcu_thread(struct call_rcu_data
*crdp
)
552 if (!(_CMM_LOAD_SHARED(crdp
->flags
) & URCU_CALL_RCU_RT
))
553 call_rcu_wake_up(crdp
);
557 * Schedule a function to be invoked after a following grace period.
558 * This is the only function that must be called -- the others are
559 * only present to allow applications to tune their use of RCU for
560 * maximum performance.
562 * Note that unless a call_rcu thread has not already been created,
563 * the first invocation of call_rcu() will create one. So, if you
564 * need the first invocation of call_rcu() to be fast, make sure
565 * to create a call_rcu thread first. One way to accomplish this is
566 * "get_call_rcu_data();", and another is create_all_cpu_call_rcu_data().
569 void call_rcu(struct rcu_head
*head
,
570 void (*func
)(struct rcu_head
*head
))
572 struct call_rcu_data
*crdp
;
574 cds_wfq_node_init(&head
->next
);
576 crdp
= get_call_rcu_data();
577 cds_wfq_enqueue(&crdp
->cbs
, &head
->next
);
578 uatomic_inc(&crdp
->qlen
);
579 wake_call_rcu_thread(crdp
);
583 * Free up the specified call_rcu_data structure, terminating the
584 * associated call_rcu thread. The caller must have previously
585 * removed the call_rcu_data structure from per-thread or per-CPU
586 * usage. For example, set_cpu_call_rcu_data(cpu, NULL) for per-CPU
587 * call_rcu_data structures or set_thread_call_rcu_data(NULL) for
588 * per-thread call_rcu_data structures.
590 * We silently refuse to free up the default call_rcu_data structure
591 * because that is where we put any leftover callbacks. Note that
592 * the possibility of self-spawning callbacks makes it impossible
593 * to execute all the callbacks in finite time without putting any
594 * newly spawned callbacks somewhere else. The "somewhere else" of
595 * last resort is the default call_rcu_data structure.
597 * We also silently refuse to free NULL pointers. This simplifies
600 void call_rcu_data_free(struct call_rcu_data
*crdp
)
602 struct cds_wfq_node
*cbs
;
603 struct cds_wfq_node
**cbs_tail
;
604 struct cds_wfq_node
**cbs_endprev
;
606 if (crdp
== NULL
|| crdp
== default_call_rcu_data
) {
609 if ((uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_STOPPED
) == 0) {
610 uatomic_or(&crdp
->flags
, URCU_CALL_RCU_STOP
);
611 wake_call_rcu_thread(crdp
);
612 while ((uatomic_read(&crdp
->flags
) & URCU_CALL_RCU_STOPPED
) == 0)
615 if (&crdp
->cbs
.head
!= _CMM_LOAD_SHARED(crdp
->cbs
.tail
)) {
616 while ((cbs
= _CMM_LOAD_SHARED(crdp
->cbs
.head
)) == NULL
)
618 _CMM_STORE_SHARED(crdp
->cbs
.head
, NULL
);
619 cbs_tail
= (struct cds_wfq_node
**)
620 uatomic_xchg(&crdp
->cbs
.tail
, &crdp
->cbs
.head
);
621 /* Create default call rcu data if need be */
622 (void) get_default_call_rcu_data();
623 cbs_endprev
= (struct cds_wfq_node
**)
624 uatomic_xchg(&default_call_rcu_data
, cbs_tail
);
626 uatomic_add(&default_call_rcu_data
->qlen
,
627 uatomic_read(&crdp
->qlen
));
628 wake_call_rcu_thread(default_call_rcu_data
);
631 call_rcu_lock(&call_rcu_mutex
);
632 cds_list_del(&crdp
->list
);
633 call_rcu_unlock(&call_rcu_mutex
);
639 * Clean up all the per-CPU call_rcu threads.
641 void free_all_cpu_call_rcu_data(void)
644 struct call_rcu_data
*crdp
;
648 for (cpu
= 0; cpu
< maxcpus
; cpu
++) {
649 crdp
= get_cpu_call_rcu_data(cpu
);
652 set_cpu_call_rcu_data(cpu
, NULL
);
653 call_rcu_data_free(crdp
);
658 * Acquire the call_rcu_mutex in order to ensure that the child sees
659 * all of the call_rcu() data structures in a consistent state.
660 * Suitable for pthread_atfork() and friends.
662 void call_rcu_before_fork(void)
664 call_rcu_lock(&call_rcu_mutex
);
668 * Clean up call_rcu data structures in the parent of a successful fork()
669 * that is not followed by exec() in the child. Suitable for
670 * pthread_atfork() and friends.
672 void call_rcu_after_fork_parent(void)
674 call_rcu_unlock(&call_rcu_mutex
);
678 * Clean up call_rcu data structures in the child of a successful fork()
679 * that is not followed by exec(). Suitable for pthread_atfork() and
682 void call_rcu_after_fork_child(void)
684 struct call_rcu_data
*crdp
, *next
;
686 /* Release the mutex. */
687 call_rcu_unlock(&call_rcu_mutex
);
689 /* Do nothing when call_rcu() has not been used */
690 if (cds_list_empty(&call_rcu_data_list
))
694 * Allocate a new default call_rcu_data structure in order
695 * to get a working call_rcu thread to go with it.
697 default_call_rcu_data
= NULL
;
698 (void)get_default_call_rcu_data();
700 /* Cleanup call_rcu_data pointers before use */
702 free(per_cpu_call_rcu_data
);
703 per_cpu_call_rcu_data
= NULL
;
704 thread_call_rcu_data
= NULL
;
706 /* Dispose of all of the rest of the call_rcu_data structures. */
707 cds_list_for_each_entry_safe(crdp
, next
, &call_rcu_data_list
, list
) {
708 if (crdp
== default_call_rcu_data
)
710 uatomic_set(&crdp
->flags
, URCU_CALL_RCU_STOPPED
);
711 call_rcu_data_free(crdp
);