4 * Userspace RCU library
6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
29 #define _DEFAULT_SOURCE
40 #include "urcu/wfcqueue.h"
41 #include "urcu/map/urcu.h"
42 #include "urcu/static/urcu.h"
43 #include "urcu-pointer.h"
44 #include "urcu/tls-compat.h"
47 #include "urcu-wait.h"
49 /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
55 * If a reader is really non-cooperative and refuses to commit its
56 * rcu_active_readers count to memory (there is no barrier in the reader
57 * per-se), kick it after 10 loops waiting for it.
59 #define KICK_READER_LOOPS 10
62 * Active attempts to check for reader Q.S. before calling futex().
64 #define RCU_QS_ACTIVE_ATTEMPTS 100
67 * RCU_MEMBARRIER is only possibly available on Linux.
69 #if defined(RCU_MEMBARRIER) && defined(__linux__)
70 #include <urcu/syscall-compat.h>
73 /* If the headers do not support SYS_membarrier, fall back on RCU_MB */
75 # define membarrier(...) syscall(SYS_membarrier, __VA_ARGS__)
77 # define membarrier(...) -ENOSYS
80 #define MEMBARRIER_EXPEDITED (1 << 0)
81 #define MEMBARRIER_DELAYED (1 << 1)
82 #define MEMBARRIER_QUERY (1 << 16)
86 int rcu_has_sys_membarrier
;
88 void __attribute__((constructor
)) rcu_init(void);
100 void __attribute__((constructor
)) rcu_init(void);
101 void __attribute__((destructor
)) rcu_exit(void);
105 * rcu_gp_lock ensures mutual exclusion between threads calling
108 static pthread_mutex_t rcu_gp_lock
= PTHREAD_MUTEX_INITIALIZER
;
110 * rcu_registry_lock ensures mutual exclusion between threads
111 * registering and unregistering themselves to/from the registry, and
112 * with threads reading that registry from synchronize_rcu(). However,
113 * this lock is not held all the way through the completion of awaiting
114 * for the grace period. It is sporadically released between iterations
116 * rcu_registry_lock may nest inside rcu_gp_lock.
118 static pthread_mutex_t rcu_registry_lock
= PTHREAD_MUTEX_INITIALIZER
;
119 struct rcu_gp rcu_gp
= { .ctr
= RCU_GP_COUNT
};
122 * Written to only by each individual reader. Read by both the reader and the
125 DEFINE_URCU_TLS(struct rcu_reader
, rcu_reader
);
127 static CDS_LIST_HEAD(registry
);
130 * Queue keeping threads awaiting to wait for a grace period. Contains
131 * struct gp_waiters_thread objects.
133 static DEFINE_URCU_WAIT_QUEUE(gp_waiters
);
135 static void mutex_lock(pthread_mutex_t
*mutex
)
139 #ifndef DISTRUST_SIGNALS_EXTREME
140 ret
= pthread_mutex_lock(mutex
);
143 #else /* #ifndef DISTRUST_SIGNALS_EXTREME */
144 while ((ret
= pthread_mutex_trylock(mutex
)) != 0) {
145 if (ret
!= EBUSY
&& ret
!= EINTR
)
147 if (CMM_LOAD_SHARED(URCU_TLS(rcu_reader
).need_mb
)) {
149 _CMM_STORE_SHARED(URCU_TLS(rcu_reader
).need_mb
, 0);
154 #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
157 static void mutex_unlock(pthread_mutex_t
*mutex
)
161 ret
= pthread_mutex_unlock(mutex
);
166 #ifdef RCU_MEMBARRIER
167 static void smp_mb_master(int group
)
169 if (caa_likely(rcu_has_sys_membarrier
))
170 (void) membarrier(MEMBARRIER_EXPEDITED
);
177 static void smp_mb_master(int group
)
184 static void force_mb_all_readers(void)
186 struct rcu_reader
*index
;
189 * Ask for each threads to execute a cmm_smp_mb() so we can consider the
190 * compiler barriers around rcu read lock as real memory barriers.
192 if (cds_list_empty(®istry
))
195 * pthread_kill has a cmm_smp_mb(). But beware, we assume it performs
196 * a cache flush on architectures with non-coherent cache. Let's play
197 * safe and don't assume anything : we use cmm_smp_mc() to make sure the
198 * cache flush is enforced.
200 cds_list_for_each_entry(index
, ®istry
, node
) {
201 CMM_STORE_SHARED(index
->need_mb
, 1);
202 pthread_kill(index
->tid
, SIGRCU
);
205 * Wait for sighandler (and thus mb()) to execute on every thread.
207 * Note that the pthread_kill() will never be executed on systems
208 * that correctly deliver signals in a timely manner. However, it
209 * is not uncommon for kernels to have bugs that can result in
210 * lost or unduly delayed signals.
212 * If you are seeing the below pthread_kill() executing much at
213 * all, we suggest testing the underlying kernel and filing the
214 * relevant bug report. For Linux kernels, we recommend getting
215 * the Linux Test Project (LTP).
217 cds_list_for_each_entry(index
, ®istry
, node
) {
218 while (CMM_LOAD_SHARED(index
->need_mb
)) {
219 pthread_kill(index
->tid
, SIGRCU
);
223 cmm_smp_mb(); /* read ->need_mb before ending the barrier */
226 static void smp_mb_master(int group
)
228 force_mb_all_readers();
230 #endif /* #ifdef RCU_SIGNAL */
233 * synchronize_rcu() waiting. Single thread.
235 static void wait_gp(void)
237 /* Read reader_gp before read futex */
238 smp_mb_master(RCU_MB_GROUP
);
239 if (uatomic_read(&rcu_gp
.futex
) == -1)
240 futex_async(&rcu_gp
.futex
, FUTEX_WAIT
, -1,
245 * Always called with rcu_registry lock held. Releases this lock between
246 * iterations and grabs it again. Holds the lock when it returns.
248 static void wait_for_readers(struct cds_list_head
*input_readers
,
249 struct cds_list_head
*cur_snap_readers
,
250 struct cds_list_head
*qsreaders
)
252 unsigned int wait_loops
= 0;
253 struct rcu_reader
*index
, *tmp
;
254 #ifdef HAS_INCOHERENT_CACHES
255 unsigned int wait_gp_loops
= 0;
256 #endif /* HAS_INCOHERENT_CACHES */
259 * Wait for each thread URCU_TLS(rcu_reader).ctr to either
260 * indicate quiescence (not nested), or observe the current
264 if (wait_loops
< RCU_QS_ACTIVE_ATTEMPTS
)
266 if (wait_loops
>= RCU_QS_ACTIVE_ATTEMPTS
) {
267 uatomic_dec(&rcu_gp
.futex
);
268 /* Write futex before read reader_gp */
269 smp_mb_master(RCU_MB_GROUP
);
272 cds_list_for_each_entry_safe(index
, tmp
, input_readers
, node
) {
273 switch (rcu_reader_state(&index
->ctr
)) {
274 case RCU_READER_ACTIVE_CURRENT
:
275 if (cur_snap_readers
) {
276 cds_list_move(&index
->node
,
281 case RCU_READER_INACTIVE
:
282 cds_list_move(&index
->node
, qsreaders
);
284 case RCU_READER_ACTIVE_OLD
:
286 * Old snapshot. Leaving node in
287 * input_readers will make us busy-loop
288 * until the snapshot becomes current or
289 * the reader becomes inactive.
295 #ifndef HAS_INCOHERENT_CACHES
296 if (cds_list_empty(input_readers
)) {
297 if (wait_loops
>= RCU_QS_ACTIVE_ATTEMPTS
) {
298 /* Read reader_gp before write futex */
299 smp_mb_master(RCU_MB_GROUP
);
300 uatomic_set(&rcu_gp
.futex
, 0);
304 /* Temporarily unlock the registry lock. */
305 mutex_unlock(&rcu_registry_lock
);
306 if (wait_loops
>= RCU_QS_ACTIVE_ATTEMPTS
)
310 /* Re-lock the registry lock before the next loop. */
311 mutex_lock(&rcu_registry_lock
);
313 #else /* #ifndef HAS_INCOHERENT_CACHES */
315 * BUSY-LOOP. Force the reader thread to commit its
316 * URCU_TLS(rcu_reader).ctr update to memory if we wait
319 if (cds_list_empty(input_readers
)) {
320 if (wait_loops
>= RCU_QS_ACTIVE_ATTEMPTS
) {
321 /* Read reader_gp before write futex */
322 smp_mb_master(RCU_MB_GROUP
);
323 uatomic_set(&rcu_gp
.futex
, 0);
327 if (wait_gp_loops
== KICK_READER_LOOPS
) {
328 smp_mb_master(RCU_MB_GROUP
);
331 /* Temporarily unlock the registry lock. */
332 mutex_unlock(&rcu_registry_lock
);
333 if (wait_loops
>= RCU_QS_ACTIVE_ATTEMPTS
) {
339 /* Re-lock the registry lock before the next loop. */
340 mutex_lock(&rcu_registry_lock
);
342 #endif /* #else #ifndef HAS_INCOHERENT_CACHES */
346 void synchronize_rcu(void)
348 CDS_LIST_HEAD(cur_snap_readers
);
349 CDS_LIST_HEAD(qsreaders
);
350 DEFINE_URCU_WAIT_NODE(wait
, URCU_WAIT_WAITING
);
351 struct urcu_waiters waiters
;
354 * Add ourself to gp_waiters queue of threads awaiting to wait
355 * for a grace period. Proceed to perform the grace period only
356 * if we are the first thread added into the queue.
357 * The implicit memory barrier before urcu_wait_add()
358 * orders prior memory accesses of threads put into the wait
359 * queue before their insertion into the wait queue.
361 if (urcu_wait_add(&gp_waiters
, &wait
) != 0) {
362 /* Not first in queue: will be awakened by another thread. */
363 urcu_adaptative_busy_wait(&wait
);
364 /* Order following memory accesses after grace period. */
368 /* We won't need to wake ourself up */
369 urcu_wait_set_state(&wait
, URCU_WAIT_RUNNING
);
371 mutex_lock(&rcu_gp_lock
);
374 * Move all waiters into our local queue.
376 urcu_move_waiters(&waiters
, &gp_waiters
);
378 mutex_lock(&rcu_registry_lock
);
380 if (cds_list_empty(®istry
))
384 * All threads should read qparity before accessing data structure
385 * where new ptr points to. Must be done within rcu_registry_lock
386 * because it iterates on reader threads.
388 /* Write new ptr before changing the qparity */
389 smp_mb_master(RCU_MB_GROUP
);
392 * Wait for readers to observe original parity or be quiescent.
393 * wait_for_readers() can release and grab again rcu_registry_lock
396 wait_for_readers(®istry
, &cur_snap_readers
, &qsreaders
);
399 * Must finish waiting for quiescent state for original parity before
400 * committing next rcu_gp.ctr update to memory. Failure to do so could
401 * result in the writer waiting forever while new readers are always
402 * accessing data (no progress). Enforce compiler-order of load
403 * URCU_TLS(rcu_reader).ctr before store to rcu_gp.ctr.
408 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
409 * model easier to understand. It does not have a big performance impact
410 * anyway, given this is the write-side.
414 /* Switch parity: 0 -> 1, 1 -> 0 */
415 CMM_STORE_SHARED(rcu_gp
.ctr
, rcu_gp
.ctr
^ RCU_GP_CTR_PHASE
);
418 * Must commit rcu_gp.ctr update to memory before waiting for quiescent
419 * state. Failure to do so could result in the writer waiting forever
420 * while new readers are always accessing data (no progress). Enforce
421 * compiler-order of store to rcu_gp.ctr before load rcu_reader ctr.
427 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
428 * model easier to understand. It does not have a big performance impact
429 * anyway, given this is the write-side.
434 * Wait for readers to observe new parity or be quiescent.
435 * wait_for_readers() can release and grab again rcu_registry_lock
438 wait_for_readers(&cur_snap_readers
, NULL
, &qsreaders
);
441 * Put quiescent reader list back into registry.
443 cds_list_splice(&qsreaders
, ®istry
);
446 * Finish waiting for reader threads before letting the old ptr
447 * being freed. Must be done within rcu_registry_lock because it
448 * iterates on reader threads.
450 smp_mb_master(RCU_MB_GROUP
);
452 mutex_unlock(&rcu_registry_lock
);
453 mutex_unlock(&rcu_gp_lock
);
456 * Wakeup waiters only after we have completed the grace period
457 * and have ensured the memory barriers at the end of the grace
458 * period have been issued.
460 urcu_wake_all_waiters(&waiters
);
464 * library wrappers to be used by non-LGPL compatible source code.
467 void rcu_read_lock(void)
472 void rcu_read_unlock(void)
477 int rcu_read_ongoing(void)
479 return _rcu_read_ongoing();
482 void rcu_register_thread(void)
484 URCU_TLS(rcu_reader
).tid
= pthread_self();
485 assert(URCU_TLS(rcu_reader
).need_mb
== 0);
486 assert(!(URCU_TLS(rcu_reader
).ctr
& RCU_GP_CTR_NEST_MASK
));
488 mutex_lock(&rcu_registry_lock
);
489 rcu_init(); /* In case gcc does not support constructor attribute */
490 cds_list_add(&URCU_TLS(rcu_reader
).node
, ®istry
);
491 mutex_unlock(&rcu_registry_lock
);
494 void rcu_unregister_thread(void)
496 mutex_lock(&rcu_registry_lock
);
497 cds_list_del(&URCU_TLS(rcu_reader
).node
);
498 mutex_unlock(&rcu_registry_lock
);
501 #ifdef RCU_MEMBARRIER
507 if (!membarrier(MEMBARRIER_EXPEDITED
| MEMBARRIER_QUERY
))
508 rcu_has_sys_membarrier
= 1;
513 static void sigrcu_handler(int signo
, siginfo_t
*siginfo
, void *context
)
516 * Executing this cmm_smp_mb() is the only purpose of this signal handler.
517 * It punctually promotes cmm_barrier() into cmm_smp_mb() on every thread it is
521 _CMM_STORE_SHARED(URCU_TLS(rcu_reader
).need_mb
, 0);
526 * rcu_init constructor. Called when the library is linked, but also when
527 * reader threads are calling rcu_register_thread().
528 * Should only be called by a single thread at a given time. This is ensured by
529 * holing the rcu_registry_lock from rcu_register_thread() or by running
530 * at library load time, which should not be executed by multiple
531 * threads nor concurrently with rcu_register_thread() anyway.
535 struct sigaction act
;
542 act
.sa_sigaction
= sigrcu_handler
;
543 act
.sa_flags
= SA_SIGINFO
| SA_RESTART
;
544 sigemptyset(&act
.sa_mask
);
545 ret
= sigaction(SIGRCU
, &act
, NULL
);
553 * Don't unregister the SIGRCU signal handler anymore, because
554 * call_rcu threads could still be using it shortly before the
556 * Assertion disabled because call_rcu threads are now rcu
557 * readers, and left running at exit.
558 * assert(cds_list_empty(®istry));
562 #endif /* #ifdef RCU_SIGNAL */
564 DEFINE_RCU_FLAVOR(rcu_flavor
);
566 #include "urcu-call-rcu-impl.h"
567 #include "urcu-defer-impl.h"