4 * Userspace RCU library - batch memory reclamation with kernel API
6 * Copyright (c) 2010 Paul E. McKenney <paulmck@linux.vnet.ibm.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
38 #include "urcu/wfqueue.h"
39 #include "urcu-call-rcu.h"
40 #include "urcu-pointer.h"
41 #include "urcu/list.h"
42 #include "urcu/urcu-futex.h"
44 /* Data structure that identifies a call_rcu thread. */
46 struct call_rcu_data
{
47 struct cds_wfq_queue cbs
;
51 unsigned long qlen
; /* maintained for debugging. */
54 struct cds_list_head list
;
55 } __attribute__((aligned(CAA_CACHE_LINE_SIZE
)));
58 * List of all call_rcu_data structures to keep valgrind happy.
59 * Protected by call_rcu_mutex.
62 CDS_LIST_HEAD(call_rcu_data_list
);
64 /* Link a thread using call_rcu() to its call_rcu thread. */
66 static __thread
struct call_rcu_data
*thread_call_rcu_data
;
68 /* Guard call_rcu thread creation. */
70 static pthread_mutex_t call_rcu_mutex
= PTHREAD_MUTEX_INITIALIZER
;
72 /* If a given thread does not have its own call_rcu thread, this is default. */
74 static struct call_rcu_data
*default_call_rcu_data
;
77 * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are
78 * available, then we can have call_rcu threads assigned to individual
79 * CPUs rather than only to specific threads.
82 #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
85 * Pointer to array of pointers to per-CPU call_rcu_data structures
89 static struct call_rcu_data
**per_cpu_call_rcu_data
;
92 static void call_rcu_wait(struct call_rcu_data
*crdp
)
94 /* Read call_rcu list before read futex */
96 if (uatomic_read(&crdp
->futex
) == -1)
97 futex_async(&crdp
->futex
, FUTEX_WAIT
, -1,
101 static void call_rcu_wake_up(struct call_rcu_data
*crdp
)
103 /* Write to call_rcu list before reading/writing futex */
105 if (unlikely(uatomic_read(&crdp
->futex
) == -1)) {
106 uatomic_set(&crdp
->futex
, 0);
107 futex_async(&crdp
->futex
, FUTEX_WAKE
, 1,
112 /* Allocate the array if it has not already been allocated. */
114 static void alloc_cpu_call_rcu_data(void)
116 struct call_rcu_data
**p
;
117 static int warned
= 0;
121 maxcpus
= sysconf(_SC_NPROCESSORS_CONF
);
125 p
= malloc(maxcpus
* sizeof(*per_cpu_call_rcu_data
));
127 memset(p
, '\0', maxcpus
* sizeof(*per_cpu_call_rcu_data
));
128 per_cpu_call_rcu_data
= p
;
131 fprintf(stderr
, "[error] liburcu: unable to allocate per-CPU pointer array\n");
137 #else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
139 static const struct call_rcu_data
**per_cpu_call_rcu_data
= NULL
;
140 static const long maxcpus
= -1;
142 static void alloc_cpu_call_rcu_data(void)
146 static int sched_getcpu(void)
151 #endif /* #else #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
153 /* Acquire the specified pthread mutex. */
155 static void call_rcu_lock(pthread_mutex_t
*pmp
)
157 if (pthread_mutex_lock(pmp
) != 0) {
158 perror("pthread_mutex_lock");
163 /* Release the specified pthread mutex. */
165 static void call_rcu_unlock(pthread_mutex_t
*pmp
)
167 if (pthread_mutex_unlock(pmp
) != 0) {
168 perror("pthread_mutex_unlock");
173 #if HAVE_SCHED_SETAFFINITY
175 int set_thread_cpu_affinity(struct call_rcu_data
*crdp
)
179 if (crdp
->cpu_affinity
< 0)
183 CPU_SET(crdp
->cpu_affinity
, &mask
);
184 #if SCHED_SETAFFINITY_ARGS == 2
185 return sched_setaffinity(0, &mask
);
187 return sched_setaffinity(0, sizeof(mask
), &mask
);
192 int set_thread_cpu_affinity(struct call_rcu_data
*crdp
)
198 /* This is the code run by each call_rcu thread. */
200 static void *call_rcu_thread(void *arg
)
202 unsigned long cbcount
;
203 struct cds_wfq_node
*cbs
;
204 struct cds_wfq_node
**cbs_tail
;
205 struct call_rcu_data
*crdp
= (struct call_rcu_data
*)arg
;
206 struct rcu_head
*rhp
;
208 if (set_thread_cpu_affinity(crdp
) != 0) {
209 perror("pthread_setaffinity_np");
213 thread_call_rcu_data
= crdp
;
215 if (!(crdp
->flags
& URCU_CALL_RCU_RT
)) {
216 uatomic_dec(&crdp
->futex
);
217 /* Decrement futex before reading call_rcu list */
220 if (&crdp
->cbs
.head
!= _CMM_LOAD_SHARED(crdp
->cbs
.tail
)) {
221 while ((cbs
= _CMM_LOAD_SHARED(crdp
->cbs
.head
)) == NULL
)
223 _CMM_STORE_SHARED(crdp
->cbs
.head
, NULL
);
224 cbs_tail
= (struct cds_wfq_node
**)
225 uatomic_xchg(&crdp
->cbs
.tail
, &crdp
->cbs
.head
);
229 while (cbs
->next
== NULL
&&
230 &cbs
->next
!= cbs_tail
)
232 if (cbs
== &crdp
->cbs
.dummy
) {
236 rhp
= (struct rcu_head
*)cbs
;
240 } while (cbs
!= NULL
);
241 uatomic_sub(&crdp
->qlen
, cbcount
);
243 if (crdp
->flags
& URCU_CALL_RCU_STOP
) {
244 if (!(crdp
->flags
& URCU_CALL_RCU_RT
)) {
246 * Read call_rcu list before write futex.
249 uatomic_set(&crdp
->futex
, 0);
253 if (!(crdp
->flags
& URCU_CALL_RCU_RT
)) {
254 if (&crdp
->cbs
.head
== _CMM_LOAD_SHARED(crdp
->cbs
.tail
))
259 call_rcu_lock(&crdp
->mtx
);
260 crdp
->flags
|= URCU_CALL_RCU_STOPPED
;
261 call_rcu_unlock(&crdp
->mtx
);
266 * Create both a call_rcu thread and the corresponding call_rcu_data
267 * structure, linking the structure in as specified. Caller must hold
271 static void call_rcu_data_init(struct call_rcu_data
**crdpp
,
275 struct call_rcu_data
*crdp
;
277 crdp
= malloc(sizeof(*crdp
));
279 fprintf(stderr
, "Out of memory.\n");
282 memset(crdp
, '\0', sizeof(*crdp
));
283 cds_wfq_init(&crdp
->cbs
);
285 if (pthread_mutex_init(&crdp
->mtx
, NULL
) != 0) {
286 perror("pthread_mutex_init");
291 cds_list_add(&crdp
->list
, &call_rcu_data_list
);
292 crdp
->cpu_affinity
= cpu_affinity
;
293 cmm_smp_mb(); /* Structure initialized before pointer is planted. */
295 if (pthread_create(&crdp
->tid
, NULL
, call_rcu_thread
, crdp
) != 0) {
296 perror("pthread_create");
302 * Return a pointer to the call_rcu_data structure for the specified
303 * CPU, returning NULL if there is none. We cannot automatically
304 * created it because the platform we are running on might not define
308 struct call_rcu_data
*get_cpu_call_rcu_data(int cpu
)
310 static int warned
= 0;
312 if (per_cpu_call_rcu_data
== NULL
)
314 if (!warned
&& maxcpus
> 0 && (cpu
< 0 || maxcpus
<= cpu
)) {
315 fprintf(stderr
, "[error] liburcu: get CPU # out of range\n");
318 if (cpu
< 0 || maxcpus
<= cpu
)
320 return per_cpu_call_rcu_data
[cpu
];
324 * Return the tid corresponding to the call_rcu thread whose
325 * call_rcu_data structure is specified.
328 pthread_t
get_call_rcu_thread(struct call_rcu_data
*crdp
)
334 * Create a call_rcu_data structure (with thread) and return a pointer.
337 static struct call_rcu_data
*__create_call_rcu_data(unsigned long flags
,
340 struct call_rcu_data
*crdp
;
342 call_rcu_data_init(&crdp
, flags
, cpu_affinity
);
346 struct call_rcu_data
*create_call_rcu_data(unsigned long flags
,
349 struct call_rcu_data
*crdp
;
351 call_rcu_lock(&call_rcu_mutex
);
352 crdp
= __create_call_rcu_data(flags
, cpu_affinity
);
353 call_rcu_unlock(&call_rcu_mutex
);
358 * Set the specified CPU to use the specified call_rcu_data structure.
360 * Use NULL to remove a CPU's call_rcu_data structure, but it is
361 * the caller's responsibility to dispose of the removed structure.
362 * Use get_cpu_call_rcu_data() to obtain a pointer to the old structure
363 * (prior to NULLing it out, of course).
366 int set_cpu_call_rcu_data(int cpu
, struct call_rcu_data
*crdp
)
370 call_rcu_lock(&call_rcu_mutex
);
371 if (cpu
< 0 || maxcpus
<= cpu
) {
373 fprintf(stderr
, "[error] liburcu: set CPU # out of range\n");
376 call_rcu_unlock(&call_rcu_mutex
);
380 alloc_cpu_call_rcu_data();
381 call_rcu_unlock(&call_rcu_mutex
);
382 if (per_cpu_call_rcu_data
== NULL
) {
386 per_cpu_call_rcu_data
[cpu
] = crdp
;
391 * Return a pointer to the default call_rcu_data structure, creating
392 * one if need be. Because we never free call_rcu_data structures,
393 * we don't need to be in an RCU read-side critical section.
396 struct call_rcu_data
*get_default_call_rcu_data(void)
398 if (default_call_rcu_data
!= NULL
)
399 return rcu_dereference(default_call_rcu_data
);
400 call_rcu_lock(&call_rcu_mutex
);
401 if (default_call_rcu_data
!= NULL
) {
402 call_rcu_unlock(&call_rcu_mutex
);
403 return default_call_rcu_data
;
405 call_rcu_data_init(&default_call_rcu_data
, 0, -1);
406 call_rcu_unlock(&call_rcu_mutex
);
407 return default_call_rcu_data
;
411 * Return the call_rcu_data structure that applies to the currently
412 * running thread. Any call_rcu_data structure assigned specifically
413 * to this thread has first priority, followed by any call_rcu_data
414 * structure assigned to the CPU on which the thread is running,
415 * followed by the default call_rcu_data structure. If there is not
416 * yet a default call_rcu_data structure, one will be created.
418 struct call_rcu_data
*get_call_rcu_data(void)
421 static int warned
= 0;
423 if (thread_call_rcu_data
!= NULL
)
424 return thread_call_rcu_data
;
426 return get_default_call_rcu_data();
427 curcpu
= sched_getcpu();
428 if (!warned
&& (curcpu
< 0 || maxcpus
<= curcpu
)) {
429 fprintf(stderr
, "[error] liburcu: gcrd CPU # out of range\n");
432 if (curcpu
>= 0 && maxcpus
> curcpu
&&
433 per_cpu_call_rcu_data
!= NULL
&&
434 per_cpu_call_rcu_data
[curcpu
] != NULL
)
435 return per_cpu_call_rcu_data
[curcpu
];
436 return get_default_call_rcu_data();
440 * Return a pointer to this task's call_rcu_data if there is one.
443 struct call_rcu_data
*get_thread_call_rcu_data(void)
445 return thread_call_rcu_data
;
449 * Set this task's call_rcu_data structure as specified, regardless
450 * of whether or not this task already had one. (This allows switching
451 * to and from real-time call_rcu threads, for example.)
453 * Use NULL to remove a thread's call_rcu_data structure, but it is
454 * the caller's responsibility to dispose of the removed structure.
455 * Use get_thread_call_rcu_data() to obtain a pointer to the old structure
456 * (prior to NULLing it out, of course).
459 void set_thread_call_rcu_data(struct call_rcu_data
*crdp
)
461 thread_call_rcu_data
= crdp
;
465 * Create a separate call_rcu thread for each CPU. This does not
466 * replace a pre-existing call_rcu thread -- use the set_cpu_call_rcu_data()
467 * function if you want that behavior.
470 int create_all_cpu_call_rcu_data(unsigned long flags
)
473 struct call_rcu_data
*crdp
;
476 call_rcu_lock(&call_rcu_mutex
);
477 alloc_cpu_call_rcu_data();
478 call_rcu_unlock(&call_rcu_mutex
);
483 if (per_cpu_call_rcu_data
== NULL
) {
487 for (i
= 0; i
< maxcpus
; i
++) {
488 call_rcu_lock(&call_rcu_mutex
);
489 if (get_cpu_call_rcu_data(i
)) {
490 call_rcu_unlock(&call_rcu_mutex
);
493 crdp
= __create_call_rcu_data(flags
, i
);
495 call_rcu_unlock(&call_rcu_mutex
);
499 call_rcu_unlock(&call_rcu_mutex
);
500 if ((ret
= set_cpu_call_rcu_data(i
, crdp
)) != 0) {
501 /* FIXME: Leaks crdp for now. */
502 return ret
; /* Can happen on race. */
509 * Wake up the call_rcu thread corresponding to the specified
510 * call_rcu_data structure.
512 static void wake_call_rcu_thread(struct call_rcu_data
*crdp
)
514 if (!(_CMM_LOAD_SHARED(crdp
->flags
) & URCU_CALL_RCU_RT
))
515 call_rcu_wake_up(crdp
);
519 * Schedule a function to be invoked after a following grace period.
520 * This is the only function that must be called -- the others are
521 * only present to allow applications to tune their use of RCU for
522 * maximum performance.
524 * Note that unless a call_rcu thread has not already been created,
525 * the first invocation of call_rcu() will create one. So, if you
526 * need the first invocation of call_rcu() to be fast, make sure
527 * to create a call_rcu thread first. One way to accomplish this is
528 * "get_call_rcu_data();", and another is create_all_cpu_call_rcu_data().
531 void call_rcu(struct rcu_head
*head
,
532 void (*func
)(struct rcu_head
*head
))
534 struct call_rcu_data
*crdp
;
536 cds_wfq_node_init(&head
->next
);
538 crdp
= get_call_rcu_data();
539 cds_wfq_enqueue(&crdp
->cbs
, &head
->next
);
540 uatomic_inc(&crdp
->qlen
);
541 wake_call_rcu_thread(crdp
);
545 * Free up the specified call_rcu_data structure, terminating the
546 * associated call_rcu thread. The caller must have previously
547 * removed the call_rcu_data structure from per-thread or per-CPU
548 * usage. For example, set_cpu_call_rcu_data(cpu, NULL) for per-CPU
549 * call_rcu_data structures or set_thread_call_rcu_data(NULL) for
550 * per-thread call_rcu_data structures.
552 * We silently refuse to free up the default call_rcu_data structure
553 * because that is where we put any leftover callbacks. Note that
554 * the possibility of self-spawning callbacks makes it impossible
555 * to execute all the callbacks in finite time without putting any
556 * newly spawned callbacks somewhere else. The "somewhere else" of
557 * last resort is the default call_rcu_data structure.
559 * We also silently refuse to free NULL pointers. This simplifies
562 void call_rcu_data_free(struct call_rcu_data
*crdp
)
564 struct cds_wfq_node
*cbs
;
565 struct cds_wfq_node
**cbs_tail
;
566 struct cds_wfq_node
**cbs_endprev
;
568 if (crdp
== NULL
|| crdp
== default_call_rcu_data
) {
571 if ((crdp
->flags
& URCU_CALL_RCU_STOPPED
) == 0) {
572 call_rcu_lock(&crdp
->mtx
);
573 crdp
->flags
|= URCU_CALL_RCU_STOP
;
574 call_rcu_unlock(&crdp
->mtx
);
575 wake_call_rcu_thread(crdp
);
576 while ((crdp
->flags
& URCU_CALL_RCU_STOPPED
) == 0)
579 if (&crdp
->cbs
.head
!= _CMM_LOAD_SHARED(crdp
->cbs
.tail
)) {
580 while ((cbs
= _CMM_LOAD_SHARED(crdp
->cbs
.head
)) == NULL
)
582 _CMM_STORE_SHARED(crdp
->cbs
.head
, NULL
);
583 cbs_tail
= (struct cds_wfq_node
**)
584 uatomic_xchg(&crdp
->cbs
.tail
, &crdp
->cbs
.head
);
585 cbs_endprev
= (struct cds_wfq_node
**)
586 uatomic_xchg(&default_call_rcu_data
, cbs_tail
);
588 uatomic_add(&default_call_rcu_data
->qlen
,
589 uatomic_read(&crdp
->qlen
));
590 cds_list_del(&crdp
->list
);
596 * Clean up all the per-CPU call_rcu threads.
598 void free_all_cpu_call_rcu_data(void)
601 struct call_rcu_data
*crdp
;
605 for (cpu
= 0; cpu
< maxcpus
; cpu
++) {
606 crdp
= get_cpu_call_rcu_data(cpu
);
609 set_cpu_call_rcu_data(cpu
, NULL
);
610 call_rcu_data_free(crdp
);
615 * Acquire the call_rcu_mutex in order to ensure that the child sees
616 * all of the call_rcu() data structures in a consistent state.
617 * Suitable for pthread_atfork() and friends.
619 void call_rcu_before_fork(void)
621 call_rcu_lock(&call_rcu_mutex
);
625 * Clean up call_rcu data structures in the parent of a successful fork()
626 * that is not followed by exec() in the child. Suitable for
627 * pthread_atfork() and friends.
629 void call_rcu_after_fork_parent(void)
631 call_rcu_unlock(&call_rcu_mutex
);
635 * Clean up call_rcu data structures in the child of a successful fork()
636 * that is not followed by exec(). Suitable for pthread_atfork() and
639 void call_rcu_after_fork_child(void)
641 struct call_rcu_data
*crdp
;
643 /* Release the mutex. */
644 call_rcu_unlock(&call_rcu_mutex
);
647 * Allocate a new default call_rcu_data structure in order
648 * to get a working call_rcu thread to go with it.
650 default_call_rcu_data
= NULL
;
651 (void)get_default_call_rcu_data();
653 /* Dispose of all of the rest of the call_rcu_data structures. */
654 while (call_rcu_data_list
.next
!= call_rcu_data_list
.prev
) {
655 crdp
= cds_list_entry(call_rcu_data_list
.prev
,
656 struct call_rcu_data
, list
);
657 if (crdp
== default_call_rcu_data
)
658 crdp
= cds_list_entry(crdp
->list
.prev
,
659 struct call_rcu_data
, list
);
660 crdp
->flags
= URCU_CALL_RCU_STOPPED
;
661 call_rcu_data_free(crdp
);