4 * Userspace RCU library, "bulletproof" version.
6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
26 #define URCU_NO_COMPAT_IDENTIFIERS
40 #include <urcu/config.h>
41 #include <urcu/arch.h>
42 #include <urcu/wfcqueue.h>
43 #include <urcu/map/urcu-bp.h>
44 #include <urcu/static/urcu-bp.h>
45 #include <urcu/pointer.h>
46 #include <urcu/tls-compat.h>
49 #include "urcu-utils.h"
52 /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
54 #include <urcu/urcu-bp.h>
58 #define MAP_ANONYMOUS MAP_ANON
63 void *mremap_wrapper(void *old_address
, size_t old_size
,
64 size_t new_size
, int flags
)
66 return mremap(old_address
, old_size
, new_size
, flags
);
70 #define MREMAP_MAYMOVE 1
71 #define MREMAP_FIXED 2
74 * mremap wrapper for non-Linux systems not allowing MAYMOVE.
75 * This is not generic.
78 void *mremap_wrapper(void *old_address
__attribute__((unused
)),
79 size_t old_size
__attribute__((unused
)),
80 size_t new_size
__attribute__((unused
)),
83 assert(!(flags
& MREMAP_MAYMOVE
));
89 /* Sleep delay in ms */
90 #define RCU_SLEEP_DELAY_MS 10
91 #define INIT_NR_THREADS 8
92 #define ARENA_INIT_ALLOC \
93 sizeof(struct registry_chunk) \
94 + INIT_NR_THREADS * sizeof(struct urcu_bp_reader)
97 * Active attempts to check for reader Q.S. before calling sleep().
99 #define RCU_QS_ACTIVE_ATTEMPTS 100
102 int urcu_bp_refcount
;
104 /* If the headers do not support membarrier system call, fall back smp_mb. */
105 #ifdef __NR_membarrier
106 # define membarrier(...) syscall(__NR_membarrier, __VA_ARGS__)
108 # define membarrier(...) -ENOSYS
111 enum membarrier_cmd
{
112 MEMBARRIER_CMD_QUERY
= 0,
113 MEMBARRIER_CMD_SHARED
= (1 << 0),
114 /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */
115 /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */
116 MEMBARRIER_CMD_PRIVATE_EXPEDITED
= (1 << 3),
117 MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED
= (1 << 4),
121 void __attribute__((constructor
)) _urcu_bp_init(void);
123 void urcu_bp_exit(void);
125 void __attribute__((destructor
)) urcu_bp_exit_destructor(void);
126 static void urcu_call_rcu_exit(void);
128 #ifndef CONFIG_RCU_FORCE_SYS_MEMBARRIER
129 int urcu_bp_has_sys_membarrier
;
133 * rcu_gp_lock ensures mutual exclusion between threads calling
136 static pthread_mutex_t rcu_gp_lock
= PTHREAD_MUTEX_INITIALIZER
;
138 * rcu_registry_lock ensures mutual exclusion between threads
139 * registering and unregistering themselves to/from the registry, and
140 * with threads reading that registry from synchronize_rcu(). However,
141 * this lock is not held all the way through the completion of awaiting
142 * for the grace period. It is sporadically released between iterations
144 * rcu_registry_lock may nest inside rcu_gp_lock.
146 static pthread_mutex_t rcu_registry_lock
= PTHREAD_MUTEX_INITIALIZER
;
148 static pthread_mutex_t init_lock
= PTHREAD_MUTEX_INITIALIZER
;
149 static int initialized
;
151 static pthread_key_t urcu_bp_key
;
153 struct urcu_bp_gp urcu_bp_gp
= { .ctr
= URCU_BP_GP_COUNT
};
154 URCU_ATTR_ALIAS("urcu_bp_gp") extern struct urcu_bp_gp rcu_gp_bp
;
157 * Pointer to registry elements. Written to only by each individual reader. Read
158 * by both the reader and the writers.
160 DEFINE_URCU_TLS(struct urcu_bp_reader
*, urcu_bp_reader
);
161 DEFINE_URCU_TLS_ALIAS(struct urcu_bp_reader
*, urcu_bp_reader
, rcu_reader_bp
);
163 static CDS_LIST_HEAD(registry
);
165 struct registry_chunk
{
166 size_t data_len
; /* data length */
167 size_t used
; /* amount of data used */
168 struct cds_list_head node
; /* chunk_list node */
172 struct registry_arena
{
173 struct cds_list_head chunk_list
;
176 static struct registry_arena registry_arena
= {
177 .chunk_list
= CDS_LIST_HEAD_INIT(registry_arena
.chunk_list
),
180 /* Saved fork signal mask, protected by rcu_gp_lock */
181 static sigset_t saved_fork_signal_mask
;
183 static void mutex_lock(pthread_mutex_t
*mutex
)
187 #ifndef DISTRUST_SIGNALS_EXTREME
188 ret
= pthread_mutex_lock(mutex
);
191 #else /* #ifndef DISTRUST_SIGNALS_EXTREME */
192 while ((ret
= pthread_mutex_trylock(mutex
)) != 0) {
193 if (ret
!= EBUSY
&& ret
!= EINTR
)
197 #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
200 static void mutex_unlock(pthread_mutex_t
*mutex
)
204 ret
= pthread_mutex_unlock(mutex
);
209 static void smp_mb_master(void)
211 if (caa_likely(urcu_bp_has_sys_membarrier
)) {
212 if (membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED
, 0))
220 * Always called with rcu_registry lock held. Releases this lock between
221 * iterations and grabs it again. Holds the lock when it returns.
223 static void wait_for_readers(struct cds_list_head
*input_readers
,
224 struct cds_list_head
*cur_snap_readers
,
225 struct cds_list_head
*qsreaders
)
227 unsigned int wait_loops
= 0;
228 struct urcu_bp_reader
*index
, *tmp
;
231 * Wait for each thread URCU_TLS(urcu_bp_reader).ctr to either
232 * indicate quiescence (not nested), or observe the current
236 if (wait_loops
< RCU_QS_ACTIVE_ATTEMPTS
)
239 cds_list_for_each_entry_safe(index
, tmp
, input_readers
, node
) {
240 switch (urcu_bp_reader_state(&index
->ctr
)) {
241 case URCU_BP_READER_ACTIVE_CURRENT
:
242 if (cur_snap_readers
) {
243 cds_list_move(&index
->node
,
248 case URCU_BP_READER_INACTIVE
:
249 cds_list_move(&index
->node
, qsreaders
);
251 case URCU_BP_READER_ACTIVE_OLD
:
253 * Old snapshot. Leaving node in
254 * input_readers will make us busy-loop
255 * until the snapshot becomes current or
256 * the reader becomes inactive.
262 if (cds_list_empty(input_readers
)) {
265 /* Temporarily unlock the registry lock. */
266 mutex_unlock(&rcu_registry_lock
);
267 if (wait_loops
>= RCU_QS_ACTIVE_ATTEMPTS
)
268 (void) poll(NULL
, 0, RCU_SLEEP_DELAY_MS
);
271 /* Re-lock the registry lock before the next loop. */
272 mutex_lock(&rcu_registry_lock
);
277 void urcu_bp_synchronize_rcu(void)
279 CDS_LIST_HEAD(cur_snap_readers
);
280 CDS_LIST_HEAD(qsreaders
);
281 sigset_t newmask
, oldmask
;
284 ret
= sigfillset(&newmask
);
286 ret
= pthread_sigmask(SIG_BLOCK
, &newmask
, &oldmask
);
289 mutex_lock(&rcu_gp_lock
);
291 mutex_lock(&rcu_registry_lock
);
293 if (cds_list_empty(®istry
))
296 /* All threads should read qparity before accessing data structure
297 * where new ptr points to. */
298 /* Write new ptr before changing the qparity */
302 * Wait for readers to observe original parity or be quiescent.
303 * wait_for_readers() can release and grab again rcu_registry_lock
306 wait_for_readers(®istry
, &cur_snap_readers
, &qsreaders
);
309 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
310 * model easier to understand. It does not have a big performance impact
311 * anyway, given this is the write-side.
315 /* Switch parity: 0 -> 1, 1 -> 0 */
316 CMM_STORE_SHARED(rcu_gp
.ctr
, rcu_gp
.ctr
^ URCU_BP_GP_CTR_PHASE
);
319 * Must commit qparity update to memory before waiting for other parity
320 * quiescent state. Failure to do so could result in the writer waiting
321 * forever while new readers are always accessing data (no progress).
322 * Ensured by CMM_STORE_SHARED and CMM_LOAD_SHARED.
326 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
327 * model easier to understand. It does not have a big performance impact
328 * anyway, given this is the write-side.
333 * Wait for readers to observe new parity or be quiescent.
334 * wait_for_readers() can release and grab again rcu_registry_lock
337 wait_for_readers(&cur_snap_readers
, NULL
, &qsreaders
);
340 * Put quiescent reader list back into registry.
342 cds_list_splice(&qsreaders
, ®istry
);
345 * Finish waiting for reader threads before letting the old ptr being
350 mutex_unlock(&rcu_registry_lock
);
351 mutex_unlock(&rcu_gp_lock
);
352 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
355 URCU_ATTR_ALIAS("urcu_bp_synchronize_rcu") void synchronize_rcu_bp();
358 * library wrappers to be used by non-LGPL compatible source code.
361 void urcu_bp_read_lock(void)
363 _urcu_bp_read_lock();
365 URCU_ATTR_ALIAS("urcu_bp_read_lock") void rcu_read_lock_bp();
367 void urcu_bp_read_unlock(void)
369 _urcu_bp_read_unlock();
371 URCU_ATTR_ALIAS("urcu_bp_read_unlock") void rcu_read_unlock_bp();
373 int urcu_bp_read_ongoing(void)
375 return _urcu_bp_read_ongoing();
377 URCU_ATTR_ALIAS("urcu_bp_read_ongoing") int rcu_read_ongoing_bp();
380 * Only grow for now. If empty, allocate a ARENA_INIT_ALLOC sized chunk.
381 * Else, try expanding the last chunk. If this fails, allocate a new
382 * chunk twice as big as the last chunk.
383 * Memory used by chunks _never_ moves. A chunk could theoretically be
384 * freed when all "used" slots are released, but we don't do it at this
388 void expand_arena(struct registry_arena
*arena
)
390 struct registry_chunk
*new_chunk
, *last_chunk
;
391 size_t old_chunk_len
, new_chunk_len
;
394 if (cds_list_empty(&arena
->chunk_list
)) {
395 assert(ARENA_INIT_ALLOC
>=
396 sizeof(struct registry_chunk
)
397 + sizeof(struct rcu_reader
));
398 new_chunk_len
= ARENA_INIT_ALLOC
;
399 new_chunk
= (struct registry_chunk
*) mmap(NULL
,
401 PROT_READ
| PROT_WRITE
,
402 MAP_ANONYMOUS
| MAP_PRIVATE
,
404 if (new_chunk
== MAP_FAILED
)
406 memset(new_chunk
, 0, new_chunk_len
);
407 new_chunk
->data_len
=
408 new_chunk_len
- sizeof(struct registry_chunk
);
409 cds_list_add_tail(&new_chunk
->node
, &arena
->chunk_list
);
410 return; /* We're done. */
413 /* Try expanding last chunk. */
414 last_chunk
= cds_list_entry(arena
->chunk_list
.prev
,
415 struct registry_chunk
, node
);
417 last_chunk
->data_len
+ sizeof(struct registry_chunk
);
418 new_chunk_len
= old_chunk_len
<< 1;
420 /* Don't allow memory mapping to move, just expand. */
421 new_chunk
= mremap_wrapper(last_chunk
, old_chunk_len
,
423 if (new_chunk
!= MAP_FAILED
) {
424 /* Should not have moved. */
425 assert(new_chunk
== last_chunk
);
426 memset((char *) last_chunk
+ old_chunk_len
, 0,
427 new_chunk_len
- old_chunk_len
);
428 last_chunk
->data_len
=
429 new_chunk_len
- sizeof(struct registry_chunk
);
430 return; /* We're done. */
433 /* Remap did not succeed, we need to add a new chunk. */
434 new_chunk
= (struct registry_chunk
*) mmap(NULL
,
436 PROT_READ
| PROT_WRITE
,
437 MAP_ANONYMOUS
| MAP_PRIVATE
,
439 if (new_chunk
== MAP_FAILED
)
441 memset(new_chunk
, 0, new_chunk_len
);
442 new_chunk
->data_len
=
443 new_chunk_len
- sizeof(struct registry_chunk
);
444 cds_list_add_tail(&new_chunk
->node
, &arena
->chunk_list
);
448 struct rcu_reader
*arena_alloc(struct registry_arena
*arena
)
450 struct registry_chunk
*chunk
;
451 struct rcu_reader
*rcu_reader_reg
;
452 int expand_done
= 0; /* Only allow to expand once per alloc */
453 size_t len
= sizeof(struct rcu_reader
);
456 cds_list_for_each_entry(chunk
, &arena
->chunk_list
, node
) {
457 if (chunk
->data_len
- chunk
->used
< len
)
460 for (rcu_reader_reg
= (struct rcu_reader
*) &chunk
->data
[0];
461 rcu_reader_reg
< (struct rcu_reader
*) &chunk
->data
[chunk
->data_len
];
463 if (!rcu_reader_reg
->alloc
) {
464 rcu_reader_reg
->alloc
= 1;
466 return rcu_reader_reg
;
480 /* Called with signals off and mutex locked */
482 void add_thread(void)
484 struct rcu_reader
*rcu_reader_reg
;
487 rcu_reader_reg
= arena_alloc(®istry_arena
);
490 ret
= pthread_setspecific(urcu_bp_key
, rcu_reader_reg
);
494 /* Add to registry */
495 rcu_reader_reg
->tid
= pthread_self();
496 assert(rcu_reader_reg
->ctr
== 0);
497 cds_list_add(&rcu_reader_reg
->node
, ®istry
);
499 * Reader threads are pointing to the reader registry. This is
500 * why its memory should never be relocated.
502 URCU_TLS(urcu_bp_reader
) = rcu_reader_reg
;
505 /* Called with mutex locked */
507 void cleanup_thread(struct registry_chunk
*chunk
,
508 struct rcu_reader
*rcu_reader_reg
)
510 rcu_reader_reg
->ctr
= 0;
511 cds_list_del(&rcu_reader_reg
->node
);
512 rcu_reader_reg
->tid
= 0;
513 rcu_reader_reg
->alloc
= 0;
514 chunk
->used
-= sizeof(struct rcu_reader
);
518 struct registry_chunk
*find_chunk(struct rcu_reader
*rcu_reader_reg
)
520 struct registry_chunk
*chunk
;
522 cds_list_for_each_entry(chunk
, ®istry_arena
.chunk_list
, node
) {
523 if (rcu_reader_reg
< (struct rcu_reader
*) &chunk
->data
[0])
525 if (rcu_reader_reg
>= (struct rcu_reader
*) &chunk
->data
[chunk
->data_len
])
532 /* Called with signals off and mutex locked */
534 void remove_thread(struct rcu_reader
*rcu_reader_reg
)
536 cleanup_thread(find_chunk(rcu_reader_reg
), rcu_reader_reg
);
537 URCU_TLS(urcu_bp_reader
) = NULL
;
540 /* Disable signals, take mutex, add to registry */
541 void urcu_bp_register(void)
543 sigset_t newmask
, oldmask
;
546 ret
= sigfillset(&newmask
);
549 ret
= pthread_sigmask(SIG_BLOCK
, &newmask
, &oldmask
);
554 * Check if a signal concurrently registered our thread since
555 * the check in rcu_read_lock().
557 if (URCU_TLS(urcu_bp_reader
))
561 * Take care of early registration before urcu_bp constructor.
565 mutex_lock(&rcu_registry_lock
);
567 mutex_unlock(&rcu_registry_lock
);
569 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
573 URCU_ATTR_ALIAS("urcu_bp_register") void rcu_bp_register();
575 void urcu_bp_register_thread(void)
577 if (caa_unlikely(!URCU_TLS(urcu_bp_reader
)))
578 urcu_bp_register(); /* If not yet registered. */
581 /* Disable signals, take mutex, remove from registry */
583 void urcu_bp_unregister(struct rcu_reader
*rcu_reader_reg
)
585 sigset_t newmask
, oldmask
;
588 ret
= sigfillset(&newmask
);
591 ret
= pthread_sigmask(SIG_BLOCK
, &newmask
, &oldmask
);
595 mutex_lock(&rcu_registry_lock
);
596 remove_thread(rcu_reader_reg
);
597 mutex_unlock(&rcu_registry_lock
);
598 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
605 * Remove thread from the registry when it exits, and flag it as
606 * destroyed so garbage collection can take care of it.
609 void urcu_bp_thread_exit_notifier(void *rcu_key
)
611 urcu_bp_unregister(rcu_key
);
614 #ifdef CONFIG_RCU_FORCE_SYS_MEMBARRIER
616 void urcu_bp_sys_membarrier_status(bool available
)
623 void urcu_bp_sys_membarrier_status(bool available
)
627 urcu_bp_has_sys_membarrier
= 1;
632 void urcu_bp_sys_membarrier_init(void)
634 bool available
= false;
637 mask
= membarrier(MEMBARRIER_CMD_QUERY
, 0);
639 if (mask
& MEMBARRIER_CMD_PRIVATE_EXPEDITED
) {
640 if (membarrier(MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED
, 0))
645 urcu_bp_sys_membarrier_status(available
);
649 void _urcu_bp_init(void)
651 mutex_lock(&init_lock
);
652 if (!urcu_bp_refcount
++) {
655 ret
= pthread_key_create(&urcu_bp_key
,
656 urcu_bp_thread_exit_notifier
);
659 urcu_bp_sys_membarrier_init();
662 mutex_unlock(&init_lock
);
666 void urcu_bp_exit(void)
668 mutex_lock(&init_lock
);
669 if (!--urcu_bp_refcount
) {
670 struct registry_chunk
*chunk
, *tmp
;
673 cds_list_for_each_entry_safe(chunk
, tmp
,
674 ®istry_arena
.chunk_list
, node
) {
675 munmap((void *) chunk
, chunk
->data_len
676 + sizeof(struct registry_chunk
));
678 CDS_INIT_LIST_HEAD(®istry_arena
.chunk_list
);
679 ret
= pthread_key_delete(urcu_bp_key
);
683 mutex_unlock(&init_lock
);
687 void urcu_bp_exit_destructor(void)
689 urcu_call_rcu_exit();
694 * Holding the rcu_gp_lock and rcu_registry_lock across fork will make
695 * sure we fork() don't race with a concurrent thread executing with
696 * any of those locks held. This ensures that the registry and data
697 * protected by rcu_gp_lock are in a coherent state in the child.
699 void urcu_bp_before_fork(void)
701 sigset_t newmask
, oldmask
;
704 ret
= sigfillset(&newmask
);
706 ret
= pthread_sigmask(SIG_BLOCK
, &newmask
, &oldmask
);
708 mutex_lock(&rcu_gp_lock
);
709 mutex_lock(&rcu_registry_lock
);
710 saved_fork_signal_mask
= oldmask
;
712 URCU_ATTR_ALIAS("urcu_bp_before_fork") void rcu_bp_before_fork();
714 void urcu_bp_after_fork_parent(void)
719 oldmask
= saved_fork_signal_mask
;
720 mutex_unlock(&rcu_registry_lock
);
721 mutex_unlock(&rcu_gp_lock
);
722 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
725 URCU_ATTR_ALIAS("urcu_bp_after_fork_parent")
726 void rcu_bp_after_fork_parent(void);
729 * Prune all entries from registry except our own thread. Fits the Linux
730 * fork behavior. Called with rcu_gp_lock and rcu_registry_lock held.
733 void urcu_bp_prune_registry(void)
735 struct registry_chunk
*chunk
;
736 struct urcu_bp_reader
*rcu_reader_reg
;
738 cds_list_for_each_entry(chunk
, ®istry_arena
.chunk_list
, node
) {
739 for (rcu_reader_reg
= (struct urcu_bp_reader
*) &chunk
->data
[0];
740 rcu_reader_reg
< (struct urcu_bp_reader
*) &chunk
->data
[chunk
->data_len
];
742 if (!rcu_reader_reg
->alloc
)
744 if (rcu_reader_reg
->tid
== pthread_self())
746 cleanup_thread(chunk
, rcu_reader_reg
);
751 void urcu_bp_after_fork_child(void)
756 urcu_bp_prune_registry();
757 oldmask
= saved_fork_signal_mask
;
758 mutex_unlock(&rcu_registry_lock
);
759 mutex_unlock(&rcu_gp_lock
);
760 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
763 URCU_ATTR_ALIAS("urcu_bp_after_fork_child")
764 void rcu_bp_after_fork_child(void);
766 void *urcu_bp_dereference_sym(void *p
)
768 return _rcu_dereference(p
);
770 URCU_ATTR_ALIAS("urcu_bp_dereference_sym")
771 void *rcu_dereference_sym_bp();
773 void *urcu_bp_set_pointer_sym(void **p
, void *v
)
779 URCU_ATTR_ALIAS("urcu_bp_set_pointer_sym")
780 void *rcu_set_pointer_sym_bp();
782 void *urcu_bp_xchg_pointer_sym(void **p
, void *v
)
785 return uatomic_xchg(p
, v
);
787 URCU_ATTR_ALIAS("urcu_bp_xchg_pointer_sym")
788 void *rcu_xchg_pointer_sym_bp();
790 void *urcu_bp_cmpxchg_pointer_sym(void **p
, void *old
, void *_new
)
793 return uatomic_cmpxchg(p
, old
, _new
);
795 URCU_ATTR_ALIAS("urcu_bp_cmpxchg_pointer_sym")
796 void *rcu_cmpxchg_pointer_sym_bp();
798 DEFINE_RCU_FLAVOR(rcu_flavor
);
799 DEFINE_RCU_FLAVOR_ALIAS(rcu_flavor
, alias_rcu_flavor
);
801 #include "urcu-call-rcu-impl.h"
802 #include "urcu-defer-impl.h"