4 * Userspace RCU library
6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
29 #define _DEFAULT_SOURCE
40 #include "urcu/wfcqueue.h"
41 #include "urcu/map/urcu.h"
42 #include "urcu/static/urcu.h"
43 #include "urcu-pointer.h"
44 #include "urcu/tls-compat.h"
47 #include "urcu-wait.h"
49 /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
55 * If a reader is really non-cooperative and refuses to commit its
56 * rcu_active_readers count to memory (there is no barrier in the reader
57 * per-se), kick it after 10 loops waiting for it.
59 #define KICK_READER_LOOPS 10
62 * Active attempts to check for reader Q.S. before calling futex().
64 #define RCU_QS_ACTIVE_ATTEMPTS 100
67 * RCU_MEMBARRIER is only possibly available on Linux.
69 #if defined(RCU_MEMBARRIER) && defined(__linux__)
70 #include <urcu/syscall-compat.h>
73 /* If the headers do not support SYS_membarrier, fall back on RCU_MB */
75 # define membarrier(...) syscall(SYS_membarrier, __VA_ARGS__)
77 # define membarrier(...) -ENOSYS
81 MEMBARRIER_CMD_QUERY
= 0,
82 MEMBARRIER_CMD_SHARED
= (1 << 0),
87 int rcu_has_sys_membarrier
;
89 void __attribute__((constructor
)) rcu_init(void);
101 void __attribute__((constructor
)) rcu_init(void);
102 void __attribute__((destructor
)) rcu_exit(void);
106 * rcu_gp_lock ensures mutual exclusion between threads calling
109 static pthread_mutex_t rcu_gp_lock
= PTHREAD_MUTEX_INITIALIZER
;
111 * rcu_registry_lock ensures mutual exclusion between threads
112 * registering and unregistering themselves to/from the registry, and
113 * with threads reading that registry from synchronize_rcu(). However,
114 * this lock is not held all the way through the completion of awaiting
115 * for the grace period. It is sporadically released between iterations
117 * rcu_registry_lock may nest inside rcu_gp_lock.
119 static pthread_mutex_t rcu_registry_lock
= PTHREAD_MUTEX_INITIALIZER
;
120 struct rcu_gp rcu_gp
= { .ctr
= RCU_GP_COUNT
};
123 * Written to only by each individual reader. Read by both the reader and the
126 DEFINE_URCU_TLS(struct rcu_reader
, rcu_reader
);
128 static CDS_LIST_HEAD(registry
);
131 * Queue keeping threads awaiting to wait for a grace period. Contains
132 * struct gp_waiters_thread objects.
134 static DEFINE_URCU_WAIT_QUEUE(gp_waiters
);
136 static void mutex_lock(pthread_mutex_t
*mutex
)
140 #ifndef DISTRUST_SIGNALS_EXTREME
141 ret
= pthread_mutex_lock(mutex
);
144 #else /* #ifndef DISTRUST_SIGNALS_EXTREME */
145 while ((ret
= pthread_mutex_trylock(mutex
)) != 0) {
146 if (ret
!= EBUSY
&& ret
!= EINTR
)
148 if (CMM_LOAD_SHARED(URCU_TLS(rcu_reader
).need_mb
)) {
150 _CMM_STORE_SHARED(URCU_TLS(rcu_reader
).need_mb
, 0);
153 (void) poll(NULL
, 0, 10);
155 #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
158 static void mutex_unlock(pthread_mutex_t
*mutex
)
162 ret
= pthread_mutex_unlock(mutex
);
167 #ifdef RCU_MEMBARRIER
168 static void smp_mb_master(int group
)
170 if (caa_likely(rcu_has_sys_membarrier
))
171 (void) membarrier(MEMBARRIER_CMD_SHARED
, 0);
178 static void smp_mb_master(int group
)
185 static void force_mb_all_readers(void)
187 struct rcu_reader
*index
;
190 * Ask for each threads to execute a cmm_smp_mb() so we can consider the
191 * compiler barriers around rcu read lock as real memory barriers.
193 if (cds_list_empty(®istry
))
196 * pthread_kill has a cmm_smp_mb(). But beware, we assume it performs
197 * a cache flush on architectures with non-coherent cache. Let's play
198 * safe and don't assume anything : we use cmm_smp_mc() to make sure the
199 * cache flush is enforced.
201 cds_list_for_each_entry(index
, ®istry
, node
) {
202 CMM_STORE_SHARED(index
->need_mb
, 1);
203 pthread_kill(index
->tid
, SIGRCU
);
206 * Wait for sighandler (and thus mb()) to execute on every thread.
208 * Note that the pthread_kill() will never be executed on systems
209 * that correctly deliver signals in a timely manner. However, it
210 * is not uncommon for kernels to have bugs that can result in
211 * lost or unduly delayed signals.
213 * If you are seeing the below pthread_kill() executing much at
214 * all, we suggest testing the underlying kernel and filing the
215 * relevant bug report. For Linux kernels, we recommend getting
216 * the Linux Test Project (LTP).
218 cds_list_for_each_entry(index
, ®istry
, node
) {
219 while (CMM_LOAD_SHARED(index
->need_mb
)) {
220 pthread_kill(index
->tid
, SIGRCU
);
221 (void) poll(NULL
, 0, 1);
224 cmm_smp_mb(); /* read ->need_mb before ending the barrier */
227 static void smp_mb_master(int group
)
229 force_mb_all_readers();
231 #endif /* #ifdef RCU_SIGNAL */
234 * synchronize_rcu() waiting. Single thread.
236 static void wait_gp(void)
238 /* Read reader_gp before read futex */
239 smp_mb_master(RCU_MB_GROUP
);
240 if (uatomic_read(&rcu_gp
.futex
) != -1)
242 while (futex_async(&rcu_gp
.futex
, FUTEX_WAIT
, -1,
246 /* Value already changed. */
249 /* Retry if interrupted by signal. */
250 break; /* Get out of switch. */
252 /* Unexpected error. */
259 * Always called with rcu_registry lock held. Releases this lock between
260 * iterations and grabs it again. Holds the lock when it returns.
262 static void wait_for_readers(struct cds_list_head
*input_readers
,
263 struct cds_list_head
*cur_snap_readers
,
264 struct cds_list_head
*qsreaders
)
266 unsigned int wait_loops
= 0;
267 struct rcu_reader
*index
, *tmp
;
268 #ifdef HAS_INCOHERENT_CACHES
269 unsigned int wait_gp_loops
= 0;
270 #endif /* HAS_INCOHERENT_CACHES */
273 * Wait for each thread URCU_TLS(rcu_reader).ctr to either
274 * indicate quiescence (not nested), or observe the current
278 if (wait_loops
< RCU_QS_ACTIVE_ATTEMPTS
)
280 if (wait_loops
>= RCU_QS_ACTIVE_ATTEMPTS
) {
281 uatomic_dec(&rcu_gp
.futex
);
282 /* Write futex before read reader_gp */
283 smp_mb_master(RCU_MB_GROUP
);
286 cds_list_for_each_entry_safe(index
, tmp
, input_readers
, node
) {
287 switch (rcu_reader_state(&index
->ctr
)) {
288 case RCU_READER_ACTIVE_CURRENT
:
289 if (cur_snap_readers
) {
290 cds_list_move(&index
->node
,
295 case RCU_READER_INACTIVE
:
296 cds_list_move(&index
->node
, qsreaders
);
298 case RCU_READER_ACTIVE_OLD
:
300 * Old snapshot. Leaving node in
301 * input_readers will make us busy-loop
302 * until the snapshot becomes current or
303 * the reader becomes inactive.
309 #ifndef HAS_INCOHERENT_CACHES
310 if (cds_list_empty(input_readers
)) {
311 if (wait_loops
>= RCU_QS_ACTIVE_ATTEMPTS
) {
312 /* Read reader_gp before write futex */
313 smp_mb_master(RCU_MB_GROUP
);
314 uatomic_set(&rcu_gp
.futex
, 0);
318 /* Temporarily unlock the registry lock. */
319 mutex_unlock(&rcu_registry_lock
);
320 if (wait_loops
>= RCU_QS_ACTIVE_ATTEMPTS
)
324 /* Re-lock the registry lock before the next loop. */
325 mutex_lock(&rcu_registry_lock
);
327 #else /* #ifndef HAS_INCOHERENT_CACHES */
329 * BUSY-LOOP. Force the reader thread to commit its
330 * URCU_TLS(rcu_reader).ctr update to memory if we wait
333 if (cds_list_empty(input_readers
)) {
334 if (wait_loops
>= RCU_QS_ACTIVE_ATTEMPTS
) {
335 /* Read reader_gp before write futex */
336 smp_mb_master(RCU_MB_GROUP
);
337 uatomic_set(&rcu_gp
.futex
, 0);
341 if (wait_gp_loops
== KICK_READER_LOOPS
) {
342 smp_mb_master(RCU_MB_GROUP
);
345 /* Temporarily unlock the registry lock. */
346 mutex_unlock(&rcu_registry_lock
);
347 if (wait_loops
>= RCU_QS_ACTIVE_ATTEMPTS
) {
353 /* Re-lock the registry lock before the next loop. */
354 mutex_lock(&rcu_registry_lock
);
356 #endif /* #else #ifndef HAS_INCOHERENT_CACHES */
360 void synchronize_rcu(void)
362 CDS_LIST_HEAD(cur_snap_readers
);
363 CDS_LIST_HEAD(qsreaders
);
364 DEFINE_URCU_WAIT_NODE(wait
, URCU_WAIT_WAITING
);
365 struct urcu_waiters waiters
;
368 * Add ourself to gp_waiters queue of threads awaiting to wait
369 * for a grace period. Proceed to perform the grace period only
370 * if we are the first thread added into the queue.
371 * The implicit memory barrier before urcu_wait_add()
372 * orders prior memory accesses of threads put into the wait
373 * queue before their insertion into the wait queue.
375 if (urcu_wait_add(&gp_waiters
, &wait
) != 0) {
376 /* Not first in queue: will be awakened by another thread. */
377 urcu_adaptative_busy_wait(&wait
);
378 /* Order following memory accesses after grace period. */
382 /* We won't need to wake ourself up */
383 urcu_wait_set_state(&wait
, URCU_WAIT_RUNNING
);
385 mutex_lock(&rcu_gp_lock
);
388 * Move all waiters into our local queue.
390 urcu_move_waiters(&waiters
, &gp_waiters
);
392 mutex_lock(&rcu_registry_lock
);
394 if (cds_list_empty(®istry
))
398 * All threads should read qparity before accessing data structure
399 * where new ptr points to. Must be done within rcu_registry_lock
400 * because it iterates on reader threads.
402 /* Write new ptr before changing the qparity */
403 smp_mb_master(RCU_MB_GROUP
);
406 * Wait for readers to observe original parity or be quiescent.
407 * wait_for_readers() can release and grab again rcu_registry_lock
410 wait_for_readers(®istry
, &cur_snap_readers
, &qsreaders
);
413 * Must finish waiting for quiescent state for original parity before
414 * committing next rcu_gp.ctr update to memory. Failure to do so could
415 * result in the writer waiting forever while new readers are always
416 * accessing data (no progress). Enforce compiler-order of load
417 * URCU_TLS(rcu_reader).ctr before store to rcu_gp.ctr.
422 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
423 * model easier to understand. It does not have a big performance impact
424 * anyway, given this is the write-side.
428 /* Switch parity: 0 -> 1, 1 -> 0 */
429 CMM_STORE_SHARED(rcu_gp
.ctr
, rcu_gp
.ctr
^ RCU_GP_CTR_PHASE
);
432 * Must commit rcu_gp.ctr update to memory before waiting for quiescent
433 * state. Failure to do so could result in the writer waiting forever
434 * while new readers are always accessing data (no progress). Enforce
435 * compiler-order of store to rcu_gp.ctr before load rcu_reader ctr.
441 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
442 * model easier to understand. It does not have a big performance impact
443 * anyway, given this is the write-side.
448 * Wait for readers to observe new parity or be quiescent.
449 * wait_for_readers() can release and grab again rcu_registry_lock
452 wait_for_readers(&cur_snap_readers
, NULL
, &qsreaders
);
455 * Put quiescent reader list back into registry.
457 cds_list_splice(&qsreaders
, ®istry
);
460 * Finish waiting for reader threads before letting the old ptr
461 * being freed. Must be done within rcu_registry_lock because it
462 * iterates on reader threads.
464 smp_mb_master(RCU_MB_GROUP
);
466 mutex_unlock(&rcu_registry_lock
);
467 mutex_unlock(&rcu_gp_lock
);
470 * Wakeup waiters only after we have completed the grace period
471 * and have ensured the memory barriers at the end of the grace
472 * period have been issued.
474 urcu_wake_all_waiters(&waiters
);
478 * library wrappers to be used by non-LGPL compatible source code.
481 void rcu_read_lock(void)
486 void rcu_read_unlock(void)
491 int rcu_read_ongoing(void)
493 return _rcu_read_ongoing();
496 void rcu_register_thread(void)
498 URCU_TLS(rcu_reader
).tid
= pthread_self();
499 assert(URCU_TLS(rcu_reader
).need_mb
== 0);
500 assert(!(URCU_TLS(rcu_reader
).ctr
& RCU_GP_CTR_NEST_MASK
));
502 mutex_lock(&rcu_registry_lock
);
503 rcu_init(); /* In case gcc does not support constructor attribute */
504 cds_list_add(&URCU_TLS(rcu_reader
).node
, ®istry
);
505 mutex_unlock(&rcu_registry_lock
);
508 void rcu_unregister_thread(void)
510 mutex_lock(&rcu_registry_lock
);
511 cds_list_del(&URCU_TLS(rcu_reader
).node
);
512 mutex_unlock(&rcu_registry_lock
);
515 #ifdef RCU_MEMBARRIER
523 ret
= membarrier(MEMBARRIER_CMD_QUERY
, 0);
524 if (ret
>= 0 && (ret
& MEMBARRIER_CMD_SHARED
)) {
525 rcu_has_sys_membarrier
= 1;
531 static void sigrcu_handler(int signo
, siginfo_t
*siginfo
, void *context
)
534 * Executing this cmm_smp_mb() is the only purpose of this signal handler.
535 * It punctually promotes cmm_barrier() into cmm_smp_mb() on every thread it is
539 _CMM_STORE_SHARED(URCU_TLS(rcu_reader
).need_mb
, 0);
544 * rcu_init constructor. Called when the library is linked, but also when
545 * reader threads are calling rcu_register_thread().
546 * Should only be called by a single thread at a given time. This is ensured by
547 * holing the rcu_registry_lock from rcu_register_thread() or by running
548 * at library load time, which should not be executed by multiple
549 * threads nor concurrently with rcu_register_thread() anyway.
553 struct sigaction act
;
560 act
.sa_sigaction
= sigrcu_handler
;
561 act
.sa_flags
= SA_SIGINFO
| SA_RESTART
;
562 sigemptyset(&act
.sa_mask
);
563 ret
= sigaction(SIGRCU
, &act
, NULL
);
571 * Don't unregister the SIGRCU signal handler anymore, because
572 * call_rcu threads could still be using it shortly before the
574 * Assertion disabled because call_rcu threads are now rcu
575 * readers, and left running at exit.
576 * assert(cds_list_empty(®istry));
580 #endif /* #ifdef RCU_SIGNAL */
582 DEFINE_RCU_FLAVOR(rcu_flavor
);
584 #include "urcu-call-rcu-impl.h"
585 #include "urcu-defer-impl.h"