1 // SPDX-FileCopyrightText: 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
2 // SPDX-FileCopyrightText: 2009 Paul E. McKenney, IBM Corporation.
4 // SPDX-License-Identifier: LGPL-2.1-or-later
7 * Userspace RCU library, "bulletproof" version.
9 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
12 #define URCU_NO_COMPAT_IDENTIFIERS
25 #include <urcu/annotate.h>
26 #include <urcu/assert.h>
27 #include <urcu/config.h>
28 #include <urcu/arch.h>
29 #include <urcu/wfcqueue.h>
30 #include <urcu/map/urcu-bp.h>
31 #include <urcu/static/urcu-bp.h>
32 #include <urcu/pointer.h>
33 #include <urcu/tls-compat.h>
36 #include "urcu-utils.h"
39 /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
41 #include <urcu/urcu-bp.h>
45 #define MAP_ANONYMOUS MAP_ANON
50 void *mremap_wrapper(void *old_address
, size_t old_size
,
51 size_t new_size
, int flags
)
53 return mremap(old_address
, old_size
, new_size
, flags
);
57 #define MREMAP_MAYMOVE 1
58 #define MREMAP_FIXED 2
61 * mremap wrapper for non-Linux systems not allowing MAYMOVE.
62 * This is not generic.
65 void *mremap_wrapper(void *old_address
__attribute__((unused
)),
66 size_t old_size
__attribute__((unused
)),
67 size_t new_size
__attribute__((unused
)),
70 urcu_posix_assert(!(flags
& MREMAP_MAYMOVE
));
76 /* Sleep delay in ms */
77 #define RCU_SLEEP_DELAY_MS 10
78 #define INIT_READER_COUNT 8
81 * Active attempts to check for reader Q.S. before calling sleep().
83 #define RCU_QS_ACTIVE_ATTEMPTS 100
88 /* If the headers do not support membarrier system call, fall back smp_mb. */
89 #ifdef __NR_membarrier
90 # define membarrier(...) syscall(__NR_membarrier, __VA_ARGS__)
92 # define membarrier(...) -ENOSYS
96 MEMBARRIER_CMD_QUERY
= 0,
97 MEMBARRIER_CMD_SHARED
= (1 << 0),
98 /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */
99 /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */
100 MEMBARRIER_CMD_PRIVATE_EXPEDITED
= (1 << 3),
101 MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED
= (1 << 4),
105 void __attribute__((constructor
)) _urcu_bp_init(void);
107 void urcu_bp_exit(void);
109 void __attribute__((destructor
)) urcu_bp_exit_destructor(void);
110 static void urcu_call_rcu_exit(void);
112 #ifndef CONFIG_RCU_FORCE_SYS_MEMBARRIER
113 int urcu_bp_has_sys_membarrier
;
117 * rcu_gp_lock ensures mutual exclusion between threads calling
120 static pthread_mutex_t rcu_gp_lock
= PTHREAD_MUTEX_INITIALIZER
;
122 * rcu_registry_lock ensures mutual exclusion between threads
123 * registering and unregistering themselves to/from the registry, and
124 * with threads reading that registry from synchronize_rcu(). However,
125 * this lock is not held all the way through the completion of awaiting
126 * for the grace period. It is sporadically released between iterations
128 * rcu_registry_lock may nest inside rcu_gp_lock.
130 static pthread_mutex_t rcu_registry_lock
= PTHREAD_MUTEX_INITIALIZER
;
132 static pthread_mutex_t init_lock
= PTHREAD_MUTEX_INITIALIZER
;
133 static int initialized
;
135 static pthread_key_t urcu_bp_key
;
137 struct urcu_bp_gp urcu_bp_gp
= { .ctr
= URCU_BP_GP_COUNT
};
140 * Pointer to registry elements. Written to only by each individual reader. Read
141 * by both the reader and the writers.
143 DEFINE_URCU_TLS(struct urcu_bp_reader
*, urcu_bp_reader
);
145 static CDS_LIST_HEAD(registry
);
147 struct registry_chunk
{
148 size_t capacity
; /* capacity of this chunk (in elements) */
149 size_t used
; /* count of elements used */
150 struct cds_list_head node
; /* chunk_list node */
151 struct urcu_bp_reader readers
[];
154 struct registry_arena
{
155 struct cds_list_head chunk_list
;
158 static struct registry_arena registry_arena
= {
159 .chunk_list
= CDS_LIST_HEAD_INIT(registry_arena
.chunk_list
),
162 /* Saved fork signal mask, protected by rcu_gp_lock */
163 static sigset_t saved_fork_signal_mask
;
165 static void mutex_lock(pthread_mutex_t
*mutex
)
169 #ifndef DISTRUST_SIGNALS_EXTREME
170 ret
= pthread_mutex_lock(mutex
);
173 #else /* #ifndef DISTRUST_SIGNALS_EXTREME */
174 while ((ret
= pthread_mutex_trylock(mutex
)) != 0) {
175 if (ret
!= EBUSY
&& ret
!= EINTR
)
179 #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
182 static void mutex_unlock(pthread_mutex_t
*mutex
)
186 ret
= pthread_mutex_unlock(mutex
);
191 static void smp_mb_master(void)
193 if (caa_likely(urcu_bp_has_sys_membarrier
)) {
194 if (membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED
, 0))
201 /* Get the size of a chunk's allocation from its capacity (an element count). */
202 static size_t chunk_allocation_size(size_t capacity
)
204 return (capacity
* sizeof(struct urcu_bp_reader
)) +
205 sizeof(struct registry_chunk
);
209 * Always called with rcu_registry lock held. Releases this lock between
210 * iterations and grabs it again. Holds the lock when it returns.
212 static void wait_for_readers(struct cds_list_head
*input_readers
,
213 struct cds_list_head
*cur_snap_readers
,
214 struct cds_list_head
*qsreaders
,
215 cmm_annotate_t
*group
)
217 unsigned int wait_loops
= 0;
218 struct urcu_bp_reader
*index
, *tmp
;
221 * Wait for each thread URCU_TLS(urcu_bp_reader).ctr to either
222 * indicate quiescence (not nested), or observe the current
226 if (wait_loops
< RCU_QS_ACTIVE_ATTEMPTS
)
229 cds_list_for_each_entry_safe(index
, tmp
, input_readers
, node
) {
230 switch (urcu_bp_reader_state(&index
->ctr
, group
)) {
231 case URCU_BP_READER_ACTIVE_CURRENT
:
232 if (cur_snap_readers
) {
233 cds_list_move(&index
->node
,
238 case URCU_BP_READER_INACTIVE
:
239 cds_list_move(&index
->node
, qsreaders
);
241 case URCU_BP_READER_ACTIVE_OLD
:
243 * Old snapshot. Leaving node in
244 * input_readers will make us busy-loop
245 * until the snapshot becomes current or
246 * the reader becomes inactive.
252 if (cds_list_empty(input_readers
)) {
255 /* Temporarily unlock the registry lock. */
256 mutex_unlock(&rcu_registry_lock
);
257 if (wait_loops
>= RCU_QS_ACTIVE_ATTEMPTS
)
258 (void) poll(NULL
, 0, RCU_SLEEP_DELAY_MS
);
261 /* Re-lock the registry lock before the next loop. */
262 mutex_lock(&rcu_registry_lock
);
267 void urcu_bp_synchronize_rcu(void)
269 cmm_annotate_define(acquire_group
);
270 cmm_annotate_define(release_group
);
271 CDS_LIST_HEAD(cur_snap_readers
);
272 CDS_LIST_HEAD(qsreaders
);
273 sigset_t newmask
, oldmask
;
276 ret
= sigfillset(&newmask
);
277 urcu_posix_assert(!ret
);
278 ret
= pthread_sigmask(SIG_BLOCK
, &newmask
, &oldmask
);
279 urcu_posix_assert(!ret
);
281 mutex_lock(&rcu_gp_lock
);
283 mutex_lock(&rcu_registry_lock
);
285 if (cds_list_empty(®istry
))
288 /* All threads should read qparity before accessing data structure
289 * where new ptr points to. */
290 /* Write new ptr before changing the qparity */
292 cmm_annotate_group_mb_release(&release_group
);
295 * Wait for readers to observe original parity or be quiescent.
296 * wait_for_readers() can release and grab again rcu_registry_lock
299 wait_for_readers(®istry
, &cur_snap_readers
, &qsreaders
, &acquire_group
);
302 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
303 * model easier to understand. It does not have a big performance impact
304 * anyway, given this is the write-side.
308 /* Switch parity: 0 -> 1, 1 -> 0 */
309 cmm_annotate_group_mem_release(&release_group
, &rcu_gp
.ctr
);
310 uatomic_store(&rcu_gp
.ctr
, rcu_gp
.ctr
^ URCU_BP_GP_CTR_PHASE
, CMM_RELAXED
);
313 * Must commit qparity update to memory before waiting for other parity
314 * quiescent state. Failure to do so could result in the writer waiting
315 * forever while new readers are always accessing data (no progress).
316 * Ensured by CMM_STORE_SHARED and CMM_LOAD_SHARED.
320 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
321 * model easier to understand. It does not have a big performance impact
322 * anyway, given this is the write-side.
327 * Wait for readers to observe new parity or be quiescent.
328 * wait_for_readers() can release and grab again rcu_registry_lock
331 wait_for_readers(&cur_snap_readers
, NULL
, &qsreaders
, &acquire_group
);
334 * Put quiescent reader list back into registry.
336 cds_list_splice(&qsreaders
, ®istry
);
339 * Finish waiting for reader threads before letting the old ptr being
343 cmm_annotate_group_mb_acquire(&acquire_group
);
345 mutex_unlock(&rcu_registry_lock
);
346 mutex_unlock(&rcu_gp_lock
);
347 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
348 urcu_posix_assert(!ret
);
352 * library wrappers to be used by non-LGPL compatible source code.
355 void urcu_bp_read_lock(void)
357 _urcu_bp_read_lock();
360 void urcu_bp_read_unlock(void)
362 _urcu_bp_read_unlock();
365 int urcu_bp_read_ongoing(void)
367 return _urcu_bp_read_ongoing();
371 * Only grow for now. If empty, allocate a ARENA_INIT_ALLOC sized chunk.
372 * Else, try expanding the last chunk. If this fails, allocate a new
373 * chunk twice as big as the last chunk.
374 * Memory used by chunks _never_ moves. A chunk could theoretically be
375 * freed when all "used" slots are released, but we don't do it at this
379 void expand_arena(struct registry_arena
*arena
)
381 struct registry_chunk
*new_chunk
, *last_chunk
;
382 size_t old_chunk_size_bytes
, new_chunk_size_bytes
, new_capacity
;
385 if (cds_list_empty(&arena
->chunk_list
)) {
386 new_chunk_size_bytes
= chunk_allocation_size(INIT_READER_COUNT
);
387 new_chunk
= (struct registry_chunk
*) mmap(NULL
,
388 new_chunk_size_bytes
,
389 PROT_READ
| PROT_WRITE
,
390 MAP_ANONYMOUS
| MAP_PRIVATE
,
392 if (new_chunk
== MAP_FAILED
)
394 memset(new_chunk
, 0, new_chunk_size_bytes
);
395 new_chunk
->capacity
= INIT_READER_COUNT
;
396 cds_list_add_tail(&new_chunk
->node
, &arena
->chunk_list
);
397 return; /* We're done. */
400 /* Try expanding last chunk. */
401 last_chunk
= cds_list_entry(arena
->chunk_list
.prev
,
402 struct registry_chunk
, node
);
403 old_chunk_size_bytes
= chunk_allocation_size(last_chunk
->capacity
);
404 new_capacity
= last_chunk
->capacity
<< 1;
405 new_chunk_size_bytes
= chunk_allocation_size(new_capacity
);
407 /* Don't allow memory mapping to move, just expand. */
408 new_chunk
= mremap_wrapper(last_chunk
, old_chunk_size_bytes
,
409 new_chunk_size_bytes
, 0);
410 if (new_chunk
!= MAP_FAILED
) {
411 /* Should not have moved. */
412 assert(new_chunk
== last_chunk
);
413 memset((char *) last_chunk
+ old_chunk_size_bytes
, 0,
414 new_chunk_size_bytes
- old_chunk_size_bytes
);
415 last_chunk
->capacity
= new_capacity
;
416 return; /* We're done. */
419 /* Remap did not succeed, we need to add a new chunk. */
420 new_chunk
= (struct registry_chunk
*) mmap(NULL
,
421 new_chunk_size_bytes
,
422 PROT_READ
| PROT_WRITE
,
423 MAP_ANONYMOUS
| MAP_PRIVATE
,
425 if (new_chunk
== MAP_FAILED
)
427 memset(new_chunk
, 0, new_chunk_size_bytes
);
428 new_chunk
->capacity
= new_capacity
;
429 cds_list_add_tail(&new_chunk
->node
, &arena
->chunk_list
);
433 struct rcu_reader
*arena_alloc(struct registry_arena
*arena
)
435 struct registry_chunk
*chunk
;
436 int expand_done
= 0; /* Only allow to expand once per alloc */
439 cds_list_for_each_entry(chunk
, &arena
->chunk_list
, node
) {
442 /* Skip fully used chunks. */
443 if (chunk
->used
== chunk
->capacity
) {
448 for (spot_idx
= 0; spot_idx
< chunk
->capacity
; spot_idx
++) {
449 if (!chunk
->readers
[spot_idx
].alloc
) {
450 chunk
->readers
[spot_idx
].alloc
= 1;
452 return &chunk
->readers
[spot_idx
];
466 /* Called with signals off and mutex locked */
468 void add_thread(void)
470 struct rcu_reader
*rcu_reader_reg
;
473 rcu_reader_reg
= arena_alloc(®istry_arena
);
476 ret
= pthread_setspecific(urcu_bp_key
, rcu_reader_reg
);
480 /* Add to registry */
481 rcu_reader_reg
->tid
= pthread_self();
482 urcu_posix_assert(rcu_reader_reg
->ctr
== 0);
483 cds_list_add(&rcu_reader_reg
->node
, ®istry
);
485 * Reader threads are pointing to the reader registry. This is
486 * why its memory should never be relocated.
488 URCU_TLS(urcu_bp_reader
) = rcu_reader_reg
;
491 /* Called with mutex locked */
493 void cleanup_thread(struct registry_chunk
*chunk
,
494 struct rcu_reader
*rcu_reader_reg
)
496 rcu_reader_reg
->ctr
= 0;
497 cds_list_del(&rcu_reader_reg
->node
);
498 rcu_reader_reg
->tid
= 0;
499 rcu_reader_reg
->alloc
= 0;
504 struct registry_chunk
*find_chunk(struct rcu_reader
*rcu_reader_reg
)
506 struct registry_chunk
*chunk
;
508 cds_list_for_each_entry(chunk
, ®istry_arena
.chunk_list
, node
) {
509 if (rcu_reader_reg
< (struct urcu_bp_reader
*) &chunk
->readers
[0])
511 if (rcu_reader_reg
>= (struct urcu_bp_reader
*) &chunk
->readers
[chunk
->capacity
])
518 /* Called with signals off and mutex locked */
520 void remove_thread(struct rcu_reader
*rcu_reader_reg
)
522 cleanup_thread(find_chunk(rcu_reader_reg
), rcu_reader_reg
);
523 URCU_TLS(urcu_bp_reader
) = NULL
;
526 /* Disable signals, take mutex, add to registry */
527 void urcu_bp_register(void)
529 sigset_t newmask
, oldmask
;
532 ret
= sigfillset(&newmask
);
535 ret
= pthread_sigmask(SIG_BLOCK
, &newmask
, &oldmask
);
540 * Check if a signal concurrently registered our thread since
541 * the check in rcu_read_lock().
543 if (URCU_TLS(urcu_bp_reader
))
547 * Take care of early registration before urcu_bp constructor.
551 mutex_lock(&rcu_registry_lock
);
553 mutex_unlock(&rcu_registry_lock
);
555 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
560 void urcu_bp_register_thread(void)
562 if (caa_unlikely(!URCU_TLS(urcu_bp_reader
)))
563 urcu_bp_register(); /* If not yet registered. */
566 /* Disable signals, take mutex, remove from registry */
568 void urcu_bp_unregister(struct rcu_reader
*rcu_reader_reg
)
570 sigset_t newmask
, oldmask
;
573 ret
= sigfillset(&newmask
);
576 ret
= pthread_sigmask(SIG_BLOCK
, &newmask
, &oldmask
);
580 mutex_lock(&rcu_registry_lock
);
581 remove_thread(rcu_reader_reg
);
582 mutex_unlock(&rcu_registry_lock
);
583 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
590 * Remove thread from the registry when it exits, and flag it as
591 * destroyed so garbage collection can take care of it.
594 void urcu_bp_thread_exit_notifier(void *rcu_key
)
596 urcu_bp_unregister(rcu_key
);
599 #ifdef CONFIG_RCU_FORCE_SYS_MEMBARRIER
601 void urcu_bp_sys_membarrier_status(bool available
)
608 void urcu_bp_sys_membarrier_status(bool available
)
612 urcu_bp_has_sys_membarrier
= 1;
617 void urcu_bp_sys_membarrier_init(void)
619 bool available
= false;
622 mask
= membarrier(MEMBARRIER_CMD_QUERY
, 0);
624 if (mask
& MEMBARRIER_CMD_PRIVATE_EXPEDITED
) {
625 if (membarrier(MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED
, 0))
630 urcu_bp_sys_membarrier_status(available
);
634 void _urcu_bp_init(void)
636 mutex_lock(&init_lock
);
637 if (!urcu_bp_refcount
++) {
640 ret
= pthread_key_create(&urcu_bp_key
,
641 urcu_bp_thread_exit_notifier
);
644 urcu_bp_sys_membarrier_init();
647 mutex_unlock(&init_lock
);
651 void urcu_bp_exit(void)
653 mutex_lock(&init_lock
);
654 if (!--urcu_bp_refcount
) {
655 struct registry_chunk
*chunk
, *tmp
;
658 cds_list_for_each_entry_safe(chunk
, tmp
,
659 ®istry_arena
.chunk_list
, node
) {
660 munmap((void *) chunk
, chunk_allocation_size(chunk
->capacity
));
662 CDS_INIT_LIST_HEAD(®istry_arena
.chunk_list
);
663 ret
= pthread_key_delete(urcu_bp_key
);
667 mutex_unlock(&init_lock
);
671 void urcu_bp_exit_destructor(void)
673 urcu_call_rcu_exit();
678 * Holding the rcu_gp_lock and rcu_registry_lock across fork will make
679 * sure we fork() don't race with a concurrent thread executing with
680 * any of those locks held. This ensures that the registry and data
681 * protected by rcu_gp_lock are in a coherent state in the child.
683 void urcu_bp_before_fork(void)
685 sigset_t newmask
, oldmask
;
688 ret
= sigfillset(&newmask
);
689 urcu_posix_assert(!ret
);
690 ret
= pthread_sigmask(SIG_BLOCK
, &newmask
, &oldmask
);
691 urcu_posix_assert(!ret
);
692 mutex_lock(&rcu_gp_lock
);
693 mutex_lock(&rcu_registry_lock
);
694 saved_fork_signal_mask
= oldmask
;
697 void urcu_bp_after_fork_parent(void)
702 oldmask
= saved_fork_signal_mask
;
703 mutex_unlock(&rcu_registry_lock
);
704 mutex_unlock(&rcu_gp_lock
);
705 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
706 urcu_posix_assert(!ret
);
710 * Prune all entries from registry except our own thread. Fits the Linux
711 * fork behavior. Called with rcu_gp_lock and rcu_registry_lock held.
714 void urcu_bp_prune_registry(void)
716 struct registry_chunk
*chunk
;
718 cds_list_for_each_entry(chunk
, ®istry_arena
.chunk_list
, node
) {
721 for (spot_idx
= 0; spot_idx
< chunk
->capacity
; spot_idx
++) {
722 struct urcu_bp_reader
*reader
= &chunk
->readers
[spot_idx
];
726 if (reader
->tid
== pthread_self())
728 cleanup_thread(chunk
, reader
);
733 void urcu_bp_after_fork_child(void)
738 urcu_bp_prune_registry();
739 oldmask
= saved_fork_signal_mask
;
740 mutex_unlock(&rcu_registry_lock
);
741 mutex_unlock(&rcu_gp_lock
);
742 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
743 urcu_posix_assert(!ret
);
746 void *urcu_bp_dereference_sym(void *p
)
748 return _rcu_dereference(p
);
751 void *urcu_bp_set_pointer_sym(void **p
, void *v
)
758 void *urcu_bp_xchg_pointer_sym(void **p
, void *v
)
761 return uatomic_xchg(p
, v
);
764 void *urcu_bp_cmpxchg_pointer_sym(void **p
, void *old
, void *_new
)
767 return uatomic_cmpxchg(p
, old
, _new
);
770 DEFINE_RCU_FLAVOR(rcu_flavor
);
772 #include "urcu-call-rcu-impl.h"
773 #include "urcu-defer-impl.h"
774 #include "urcu-poll-impl.h"