1 // SPDX-FileCopyrightText: 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
2 // SPDX-FileCopyrightText: 2009 Paul E. McKenney, IBM Corporation.
4 // SPDX-License-Identifier: LGPL-2.1-or-later
7 * Userspace RCU library, "bulletproof" version.
9 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
12 #define URCU_NO_COMPAT_IDENTIFIERS
25 #include <urcu/assert.h>
26 #include <urcu/config.h>
27 #include <urcu/arch.h>
28 #include <urcu/wfcqueue.h>
29 #include <urcu/map/urcu-bp.h>
30 #include <urcu/static/urcu-bp.h>
31 #include <urcu/pointer.h>
32 #include <urcu/tls-compat.h>
35 #include "urcu-utils.h"
38 /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
40 #include <urcu/urcu-bp.h>
44 #define MAP_ANONYMOUS MAP_ANON
49 void *mremap_wrapper(void *old_address
, size_t old_size
,
50 size_t new_size
, int flags
)
52 return mremap(old_address
, old_size
, new_size
, flags
);
56 #define MREMAP_MAYMOVE 1
57 #define MREMAP_FIXED 2
60 * mremap wrapper for non-Linux systems not allowing MAYMOVE.
61 * This is not generic.
64 void *mremap_wrapper(void *old_address
__attribute__((unused
)),
65 size_t old_size
__attribute__((unused
)),
66 size_t new_size
__attribute__((unused
)),
69 urcu_posix_assert(!(flags
& MREMAP_MAYMOVE
));
75 /* Sleep delay in ms */
76 #define RCU_SLEEP_DELAY_MS 10
77 #define INIT_NR_THREADS 8
78 #define ARENA_INIT_ALLOC \
79 sizeof(struct registry_chunk) \
80 + INIT_NR_THREADS * sizeof(struct urcu_bp_reader)
83 * Active attempts to check for reader Q.S. before calling sleep().
85 #define RCU_QS_ACTIVE_ATTEMPTS 100
90 /* If the headers do not support membarrier system call, fall back smp_mb. */
91 #ifdef __NR_membarrier
92 # define membarrier(...) syscall(__NR_membarrier, __VA_ARGS__)
94 # define membarrier(...) -ENOSYS
98 MEMBARRIER_CMD_QUERY
= 0,
99 MEMBARRIER_CMD_SHARED
= (1 << 0),
100 /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */
101 /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */
102 MEMBARRIER_CMD_PRIVATE_EXPEDITED
= (1 << 3),
103 MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED
= (1 << 4),
107 void __attribute__((constructor
)) _urcu_bp_init(void);
109 void urcu_bp_exit(void);
111 void __attribute__((destructor
)) urcu_bp_exit_destructor(void);
112 static void urcu_call_rcu_exit(void);
114 #ifndef CONFIG_RCU_FORCE_SYS_MEMBARRIER
115 int urcu_bp_has_sys_membarrier
;
119 * rcu_gp_lock ensures mutual exclusion between threads calling
122 static pthread_mutex_t rcu_gp_lock
= PTHREAD_MUTEX_INITIALIZER
;
124 * rcu_registry_lock ensures mutual exclusion between threads
125 * registering and unregistering themselves to/from the registry, and
126 * with threads reading that registry from synchronize_rcu(). However,
127 * this lock is not held all the way through the completion of awaiting
128 * for the grace period. It is sporadically released between iterations
130 * rcu_registry_lock may nest inside rcu_gp_lock.
132 static pthread_mutex_t rcu_registry_lock
= PTHREAD_MUTEX_INITIALIZER
;
134 static pthread_mutex_t init_lock
= PTHREAD_MUTEX_INITIALIZER
;
135 static int initialized
;
137 static pthread_key_t urcu_bp_key
;
139 struct urcu_bp_gp urcu_bp_gp
= { .ctr
= URCU_BP_GP_COUNT
};
142 * Pointer to registry elements. Written to only by each individual reader. Read
143 * by both the reader and the writers.
145 DEFINE_URCU_TLS(struct urcu_bp_reader
*, urcu_bp_reader
);
147 static CDS_LIST_HEAD(registry
);
149 struct registry_chunk
{
150 size_t data_len
; /* data length */
151 size_t used
; /* amount of data used */
152 struct cds_list_head node
; /* chunk_list node */
156 struct registry_arena
{
157 struct cds_list_head chunk_list
;
160 static struct registry_arena registry_arena
= {
161 .chunk_list
= CDS_LIST_HEAD_INIT(registry_arena
.chunk_list
),
164 /* Saved fork signal mask, protected by rcu_gp_lock */
165 static sigset_t saved_fork_signal_mask
;
167 static void mutex_lock(pthread_mutex_t
*mutex
)
171 #ifndef DISTRUST_SIGNALS_EXTREME
172 ret
= pthread_mutex_lock(mutex
);
175 #else /* #ifndef DISTRUST_SIGNALS_EXTREME */
176 while ((ret
= pthread_mutex_trylock(mutex
)) != 0) {
177 if (ret
!= EBUSY
&& ret
!= EINTR
)
181 #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
184 static void mutex_unlock(pthread_mutex_t
*mutex
)
188 ret
= pthread_mutex_unlock(mutex
);
193 static void smp_mb_master(void)
195 if (caa_likely(urcu_bp_has_sys_membarrier
)) {
196 if (membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED
, 0))
204 * Always called with rcu_registry lock held. Releases this lock between
205 * iterations and grabs it again. Holds the lock when it returns.
207 static void wait_for_readers(struct cds_list_head
*input_readers
,
208 struct cds_list_head
*cur_snap_readers
,
209 struct cds_list_head
*qsreaders
)
211 unsigned int wait_loops
= 0;
212 struct urcu_bp_reader
*index
, *tmp
;
215 * Wait for each thread URCU_TLS(urcu_bp_reader).ctr to either
216 * indicate quiescence (not nested), or observe the current
220 if (wait_loops
< RCU_QS_ACTIVE_ATTEMPTS
)
223 cds_list_for_each_entry_safe(index
, tmp
, input_readers
, node
) {
224 switch (urcu_bp_reader_state(&index
->ctr
)) {
225 case URCU_BP_READER_ACTIVE_CURRENT
:
226 if (cur_snap_readers
) {
227 cds_list_move(&index
->node
,
232 case URCU_BP_READER_INACTIVE
:
233 cds_list_move(&index
->node
, qsreaders
);
235 case URCU_BP_READER_ACTIVE_OLD
:
237 * Old snapshot. Leaving node in
238 * input_readers will make us busy-loop
239 * until the snapshot becomes current or
240 * the reader becomes inactive.
246 if (cds_list_empty(input_readers
)) {
249 /* Temporarily unlock the registry lock. */
250 mutex_unlock(&rcu_registry_lock
);
251 if (wait_loops
>= RCU_QS_ACTIVE_ATTEMPTS
)
252 (void) poll(NULL
, 0, RCU_SLEEP_DELAY_MS
);
255 /* Re-lock the registry lock before the next loop. */
256 mutex_lock(&rcu_registry_lock
);
261 void urcu_bp_synchronize_rcu(void)
263 CDS_LIST_HEAD(cur_snap_readers
);
264 CDS_LIST_HEAD(qsreaders
);
265 sigset_t newmask
, oldmask
;
268 ret
= sigfillset(&newmask
);
269 urcu_posix_assert(!ret
);
270 ret
= pthread_sigmask(SIG_BLOCK
, &newmask
, &oldmask
);
271 urcu_posix_assert(!ret
);
273 mutex_lock(&rcu_gp_lock
);
275 mutex_lock(&rcu_registry_lock
);
277 if (cds_list_empty(®istry
))
280 /* All threads should read qparity before accessing data structure
281 * where new ptr points to. */
282 /* Write new ptr before changing the qparity */
286 * Wait for readers to observe original parity or be quiescent.
287 * wait_for_readers() can release and grab again rcu_registry_lock
290 wait_for_readers(®istry
, &cur_snap_readers
, &qsreaders
);
293 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
294 * model easier to understand. It does not have a big performance impact
295 * anyway, given this is the write-side.
299 /* Switch parity: 0 -> 1, 1 -> 0 */
300 CMM_STORE_SHARED(rcu_gp
.ctr
, rcu_gp
.ctr
^ URCU_BP_GP_CTR_PHASE
);
303 * Must commit qparity update to memory before waiting for other parity
304 * quiescent state. Failure to do so could result in the writer waiting
305 * forever while new readers are always accessing data (no progress).
306 * Ensured by CMM_STORE_SHARED and CMM_LOAD_SHARED.
310 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
311 * model easier to understand. It does not have a big performance impact
312 * anyway, given this is the write-side.
317 * Wait for readers to observe new parity or be quiescent.
318 * wait_for_readers() can release and grab again rcu_registry_lock
321 wait_for_readers(&cur_snap_readers
, NULL
, &qsreaders
);
324 * Put quiescent reader list back into registry.
326 cds_list_splice(&qsreaders
, ®istry
);
329 * Finish waiting for reader threads before letting the old ptr being
334 mutex_unlock(&rcu_registry_lock
);
335 mutex_unlock(&rcu_gp_lock
);
336 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
337 urcu_posix_assert(!ret
);
341 * library wrappers to be used by non-LGPL compatible source code.
344 void urcu_bp_read_lock(void)
346 _urcu_bp_read_lock();
349 void urcu_bp_read_unlock(void)
351 _urcu_bp_read_unlock();
354 int urcu_bp_read_ongoing(void)
356 return _urcu_bp_read_ongoing();
360 * Only grow for now. If empty, allocate a ARENA_INIT_ALLOC sized chunk.
361 * Else, try expanding the last chunk. If this fails, allocate a new
362 * chunk twice as big as the last chunk.
363 * Memory used by chunks _never_ moves. A chunk could theoretically be
364 * freed when all "used" slots are released, but we don't do it at this
368 void expand_arena(struct registry_arena
*arena
)
370 struct registry_chunk
*new_chunk
, *last_chunk
;
371 size_t old_chunk_len
, new_chunk_len
;
374 if (cds_list_empty(&arena
->chunk_list
)) {
375 urcu_posix_assert(ARENA_INIT_ALLOC
>=
376 sizeof(struct registry_chunk
)
377 + sizeof(struct rcu_reader
));
378 new_chunk_len
= ARENA_INIT_ALLOC
;
379 new_chunk
= (struct registry_chunk
*) mmap(NULL
,
381 PROT_READ
| PROT_WRITE
,
382 MAP_ANONYMOUS
| MAP_PRIVATE
,
384 if (new_chunk
== MAP_FAILED
)
386 memset(new_chunk
, 0, new_chunk_len
);
387 new_chunk
->data_len
=
388 new_chunk_len
- sizeof(struct registry_chunk
);
389 cds_list_add_tail(&new_chunk
->node
, &arena
->chunk_list
);
390 return; /* We're done. */
393 /* Try expanding last chunk. */
394 last_chunk
= cds_list_entry(arena
->chunk_list
.prev
,
395 struct registry_chunk
, node
);
397 last_chunk
->data_len
+ sizeof(struct registry_chunk
);
398 new_chunk_len
= old_chunk_len
<< 1;
400 /* Don't allow memory mapping to move, just expand. */
401 new_chunk
= mremap_wrapper(last_chunk
, old_chunk_len
,
403 if (new_chunk
!= MAP_FAILED
) {
404 /* Should not have moved. */
405 urcu_posix_assert(new_chunk
== last_chunk
);
406 memset((char *) last_chunk
+ old_chunk_len
, 0,
407 new_chunk_len
- old_chunk_len
);
408 last_chunk
->data_len
=
409 new_chunk_len
- sizeof(struct registry_chunk
);
410 return; /* We're done. */
413 /* Remap did not succeed, we need to add a new chunk. */
414 new_chunk
= (struct registry_chunk
*) mmap(NULL
,
416 PROT_READ
| PROT_WRITE
,
417 MAP_ANONYMOUS
| MAP_PRIVATE
,
419 if (new_chunk
== MAP_FAILED
)
421 memset(new_chunk
, 0, new_chunk_len
);
422 new_chunk
->data_len
=
423 new_chunk_len
- sizeof(struct registry_chunk
);
424 cds_list_add_tail(&new_chunk
->node
, &arena
->chunk_list
);
428 struct rcu_reader
*arena_alloc(struct registry_arena
*arena
)
430 struct registry_chunk
*chunk
;
431 struct rcu_reader
*rcu_reader_reg
;
432 int expand_done
= 0; /* Only allow to expand once per alloc */
433 size_t len
= sizeof(struct rcu_reader
);
436 cds_list_for_each_entry(chunk
, &arena
->chunk_list
, node
) {
437 if (chunk
->data_len
- chunk
->used
< len
)
440 for (rcu_reader_reg
= (struct rcu_reader
*) &chunk
->data
[0];
441 rcu_reader_reg
< (struct rcu_reader
*) &chunk
->data
[chunk
->data_len
];
443 if (!rcu_reader_reg
->alloc
) {
444 rcu_reader_reg
->alloc
= 1;
446 return rcu_reader_reg
;
460 /* Called with signals off and mutex locked */
462 void add_thread(void)
464 struct rcu_reader
*rcu_reader_reg
;
467 rcu_reader_reg
= arena_alloc(®istry_arena
);
470 ret
= pthread_setspecific(urcu_bp_key
, rcu_reader_reg
);
474 /* Add to registry */
475 rcu_reader_reg
->tid
= pthread_self();
476 urcu_posix_assert(rcu_reader_reg
->ctr
== 0);
477 cds_list_add(&rcu_reader_reg
->node
, ®istry
);
479 * Reader threads are pointing to the reader registry. This is
480 * why its memory should never be relocated.
482 URCU_TLS(urcu_bp_reader
) = rcu_reader_reg
;
485 /* Called with mutex locked */
487 void cleanup_thread(struct registry_chunk
*chunk
,
488 struct rcu_reader
*rcu_reader_reg
)
490 rcu_reader_reg
->ctr
= 0;
491 cds_list_del(&rcu_reader_reg
->node
);
492 rcu_reader_reg
->tid
= 0;
493 rcu_reader_reg
->alloc
= 0;
494 chunk
->used
-= sizeof(struct rcu_reader
);
498 struct registry_chunk
*find_chunk(struct rcu_reader
*rcu_reader_reg
)
500 struct registry_chunk
*chunk
;
502 cds_list_for_each_entry(chunk
, ®istry_arena
.chunk_list
, node
) {
503 if (rcu_reader_reg
< (struct rcu_reader
*) &chunk
->data
[0])
505 if (rcu_reader_reg
>= (struct rcu_reader
*) &chunk
->data
[chunk
->data_len
])
512 /* Called with signals off and mutex locked */
514 void remove_thread(struct rcu_reader
*rcu_reader_reg
)
516 cleanup_thread(find_chunk(rcu_reader_reg
), rcu_reader_reg
);
517 URCU_TLS(urcu_bp_reader
) = NULL
;
520 /* Disable signals, take mutex, add to registry */
521 void urcu_bp_register(void)
523 sigset_t newmask
, oldmask
;
526 ret
= sigfillset(&newmask
);
529 ret
= pthread_sigmask(SIG_BLOCK
, &newmask
, &oldmask
);
534 * Check if a signal concurrently registered our thread since
535 * the check in rcu_read_lock().
537 if (URCU_TLS(urcu_bp_reader
))
541 * Take care of early registration before urcu_bp constructor.
545 mutex_lock(&rcu_registry_lock
);
547 mutex_unlock(&rcu_registry_lock
);
549 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
554 void urcu_bp_register_thread(void)
556 if (caa_unlikely(!URCU_TLS(urcu_bp_reader
)))
557 urcu_bp_register(); /* If not yet registered. */
560 /* Disable signals, take mutex, remove from registry */
562 void urcu_bp_unregister(struct rcu_reader
*rcu_reader_reg
)
564 sigset_t newmask
, oldmask
;
567 ret
= sigfillset(&newmask
);
570 ret
= pthread_sigmask(SIG_BLOCK
, &newmask
, &oldmask
);
574 mutex_lock(&rcu_registry_lock
);
575 remove_thread(rcu_reader_reg
);
576 mutex_unlock(&rcu_registry_lock
);
577 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
584 * Remove thread from the registry when it exits, and flag it as
585 * destroyed so garbage collection can take care of it.
588 void urcu_bp_thread_exit_notifier(void *rcu_key
)
590 urcu_bp_unregister(rcu_key
);
593 #ifdef CONFIG_RCU_FORCE_SYS_MEMBARRIER
595 void urcu_bp_sys_membarrier_status(bool available
)
602 void urcu_bp_sys_membarrier_status(bool available
)
606 urcu_bp_has_sys_membarrier
= 1;
611 void urcu_bp_sys_membarrier_init(void)
613 bool available
= false;
616 mask
= membarrier(MEMBARRIER_CMD_QUERY
, 0);
618 if (mask
& MEMBARRIER_CMD_PRIVATE_EXPEDITED
) {
619 if (membarrier(MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED
, 0))
624 urcu_bp_sys_membarrier_status(available
);
628 void _urcu_bp_init(void)
630 mutex_lock(&init_lock
);
631 if (!urcu_bp_refcount
++) {
634 ret
= pthread_key_create(&urcu_bp_key
,
635 urcu_bp_thread_exit_notifier
);
638 urcu_bp_sys_membarrier_init();
641 mutex_unlock(&init_lock
);
645 void urcu_bp_exit(void)
647 mutex_lock(&init_lock
);
648 if (!--urcu_bp_refcount
) {
649 struct registry_chunk
*chunk
, *tmp
;
652 cds_list_for_each_entry_safe(chunk
, tmp
,
653 ®istry_arena
.chunk_list
, node
) {
654 munmap((void *) chunk
, chunk
->data_len
655 + sizeof(struct registry_chunk
));
657 CDS_INIT_LIST_HEAD(®istry_arena
.chunk_list
);
658 ret
= pthread_key_delete(urcu_bp_key
);
662 mutex_unlock(&init_lock
);
666 void urcu_bp_exit_destructor(void)
668 urcu_call_rcu_exit();
673 * Holding the rcu_gp_lock and rcu_registry_lock across fork will make
674 * sure we fork() don't race with a concurrent thread executing with
675 * any of those locks held. This ensures that the registry and data
676 * protected by rcu_gp_lock are in a coherent state in the child.
678 void urcu_bp_before_fork(void)
680 sigset_t newmask
, oldmask
;
683 ret
= sigfillset(&newmask
);
684 urcu_posix_assert(!ret
);
685 ret
= pthread_sigmask(SIG_BLOCK
, &newmask
, &oldmask
);
686 urcu_posix_assert(!ret
);
687 mutex_lock(&rcu_gp_lock
);
688 mutex_lock(&rcu_registry_lock
);
689 saved_fork_signal_mask
= oldmask
;
692 void urcu_bp_after_fork_parent(void)
697 oldmask
= saved_fork_signal_mask
;
698 mutex_unlock(&rcu_registry_lock
);
699 mutex_unlock(&rcu_gp_lock
);
700 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
701 urcu_posix_assert(!ret
);
705 * Prune all entries from registry except our own thread. Fits the Linux
706 * fork behavior. Called with rcu_gp_lock and rcu_registry_lock held.
709 void urcu_bp_prune_registry(void)
711 struct registry_chunk
*chunk
;
712 struct urcu_bp_reader
*rcu_reader_reg
;
714 cds_list_for_each_entry(chunk
, ®istry_arena
.chunk_list
, node
) {
715 for (rcu_reader_reg
= (struct urcu_bp_reader
*) &chunk
->data
[0];
716 rcu_reader_reg
< (struct urcu_bp_reader
*) &chunk
->data
[chunk
->data_len
];
718 if (!rcu_reader_reg
->alloc
)
720 if (rcu_reader_reg
->tid
== pthread_self())
722 cleanup_thread(chunk
, rcu_reader_reg
);
727 void urcu_bp_after_fork_child(void)
732 urcu_bp_prune_registry();
733 oldmask
= saved_fork_signal_mask
;
734 mutex_unlock(&rcu_registry_lock
);
735 mutex_unlock(&rcu_gp_lock
);
736 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
737 urcu_posix_assert(!ret
);
740 void *urcu_bp_dereference_sym(void *p
)
742 return _rcu_dereference(p
);
745 void *urcu_bp_set_pointer_sym(void **p
, void *v
)
752 void *urcu_bp_xchg_pointer_sym(void **p
, void *v
)
755 return uatomic_xchg(p
, v
);
758 void *urcu_bp_cmpxchg_pointer_sym(void **p
, void *old
, void *_new
)
761 return uatomic_cmpxchg(p
, old
, _new
);
764 DEFINE_RCU_FLAVOR(rcu_flavor
);
766 #include "urcu-call-rcu-impl.h"
767 #include "urcu-defer-impl.h"
768 #include "urcu-poll-impl.h"