4 * Userspace RCU library
6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
29 #define _DEFAULT_SOURCE
40 #include "urcu/wfcqueue.h"
41 #include "urcu/map/urcu.h"
42 #include "urcu/static/urcu.h"
43 #include "urcu-pointer.h"
44 #include "urcu/tls-compat.h"
47 #include "urcu-wait.h"
49 /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
55 * If a reader is really non-cooperative and refuses to commit its
56 * rcu_active_readers count to memory (there is no barrier in the reader
57 * per-se), kick it after 10 loops waiting for it.
59 #define KICK_READER_LOOPS 10
62 * Active attempts to check for reader Q.S. before calling futex().
64 #define RCU_QS_ACTIVE_ATTEMPTS 100
67 * RCU_MEMBARRIER is only possibly available on Linux.
69 #if defined(RCU_MEMBARRIER) && defined(__linux__)
70 #include <urcu/syscall-compat.h>
73 /* If the headers do not support SYS_membarrier, fall back on RCU_MB */
75 # define membarrier(...) syscall(SYS_membarrier, __VA_ARGS__)
77 # define membarrier(...) -ENOSYS
80 #define MEMBARRIER_EXPEDITED (1 << 0)
81 #define MEMBARRIER_DELAYED (1 << 1)
82 #define MEMBARRIER_QUERY (1 << 16)
86 int rcu_has_sys_membarrier
;
88 void __attribute__((constructor
)) rcu_init(void);
100 void __attribute__((constructor
)) rcu_init(void);
101 void __attribute__((destructor
)) rcu_exit(void);
105 * rcu_gp_lock ensures mutual exclusion between threads calling
108 static pthread_mutex_t rcu_gp_lock
= PTHREAD_MUTEX_INITIALIZER
;
110 * rcu_registry_lock ensures mutual exclusion between threads
111 * registering and unregistering themselves to/from the registry, and
112 * with threads reading that registry from synchronize_rcu(). However,
113 * this lock is not held all the way through the completion of awaiting
114 * for the grace period. It is sporadically released between iterations
116 * rcu_registry_lock may nest inside rcu_gp_lock.
118 static pthread_mutex_t rcu_registry_lock
= PTHREAD_MUTEX_INITIALIZER
;
119 struct rcu_gp rcu_gp
= { .ctr
= RCU_GP_COUNT
};
122 * Written to only by each individual reader. Read by both the reader and the
125 DEFINE_URCU_TLS(struct rcu_reader
, rcu_reader
);
127 static CDS_LIST_HEAD(registry
);
130 * Queue keeping threads awaiting to wait for a grace period. Contains
131 * struct gp_waiters_thread objects.
133 static DEFINE_URCU_WAIT_QUEUE(gp_waiters
);
135 static void mutex_lock(pthread_mutex_t
*mutex
)
139 #ifndef DISTRUST_SIGNALS_EXTREME
140 ret
= pthread_mutex_lock(mutex
);
143 #else /* #ifndef DISTRUST_SIGNALS_EXTREME */
144 while ((ret
= pthread_mutex_trylock(mutex
)) != 0) {
145 if (ret
!= EBUSY
&& ret
!= EINTR
)
147 if (CMM_LOAD_SHARED(URCU_TLS(rcu_reader
).need_mb
)) {
149 _CMM_STORE_SHARED(URCU_TLS(rcu_reader
).need_mb
, 0);
152 (void) poll(NULL
, 0, 10);
154 #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
157 static void mutex_unlock(pthread_mutex_t
*mutex
)
161 ret
= pthread_mutex_unlock(mutex
);
166 #ifdef RCU_MEMBARRIER
167 static void smp_mb_master(int group
)
169 if (caa_likely(rcu_has_sys_membarrier
))
170 (void) membarrier(MEMBARRIER_EXPEDITED
);
177 static void smp_mb_master(int group
)
184 static void force_mb_all_readers(void)
186 struct rcu_reader
*index
;
189 * Ask for each threads to execute a cmm_smp_mb() so we can consider the
190 * compiler barriers around rcu read lock as real memory barriers.
192 if (cds_list_empty(®istry
))
195 * pthread_kill has a cmm_smp_mb(). But beware, we assume it performs
196 * a cache flush on architectures with non-coherent cache. Let's play
197 * safe and don't assume anything : we use cmm_smp_mc() to make sure the
198 * cache flush is enforced.
200 cds_list_for_each_entry(index
, ®istry
, node
) {
201 CMM_STORE_SHARED(index
->need_mb
, 1);
202 pthread_kill(index
->tid
, SIGRCU
);
205 * Wait for sighandler (and thus mb()) to execute on every thread.
207 * Note that the pthread_kill() will never be executed on systems
208 * that correctly deliver signals in a timely manner. However, it
209 * is not uncommon for kernels to have bugs that can result in
210 * lost or unduly delayed signals.
212 * If you are seeing the below pthread_kill() executing much at
213 * all, we suggest testing the underlying kernel and filing the
214 * relevant bug report. For Linux kernels, we recommend getting
215 * the Linux Test Project (LTP).
217 cds_list_for_each_entry(index
, ®istry
, node
) {
218 while (CMM_LOAD_SHARED(index
->need_mb
)) {
219 pthread_kill(index
->tid
, SIGRCU
);
220 (void) poll(NULL
, 0, 1);
223 cmm_smp_mb(); /* read ->need_mb before ending the barrier */
226 static void smp_mb_master(int group
)
228 force_mb_all_readers();
230 #endif /* #ifdef RCU_SIGNAL */
233 * synchronize_rcu() waiting. Single thread.
235 static void wait_gp(void)
237 /* Read reader_gp before read futex */
238 smp_mb_master(RCU_MB_GROUP
);
239 if (uatomic_read(&rcu_gp
.futex
) != -1)
241 while (futex_async(&rcu_gp
.futex
, FUTEX_WAIT
, -1,
245 /* Value already changed. */
248 /* Retry if interrupted by signal. */
249 break; /* Get out of switch. */
251 /* Unexpected error. */
258 * Always called with rcu_registry lock held. Releases this lock between
259 * iterations and grabs it again. Holds the lock when it returns.
261 static void wait_for_readers(struct cds_list_head
*input_readers
,
262 struct cds_list_head
*cur_snap_readers
,
263 struct cds_list_head
*qsreaders
)
265 unsigned int wait_loops
= 0;
266 struct rcu_reader
*index
, *tmp
;
267 #ifdef HAS_INCOHERENT_CACHES
268 unsigned int wait_gp_loops
= 0;
269 #endif /* HAS_INCOHERENT_CACHES */
272 * Wait for each thread URCU_TLS(rcu_reader).ctr to either
273 * indicate quiescence (not nested), or observe the current
277 if (wait_loops
< RCU_QS_ACTIVE_ATTEMPTS
)
279 if (wait_loops
>= RCU_QS_ACTIVE_ATTEMPTS
) {
280 uatomic_dec(&rcu_gp
.futex
);
281 /* Write futex before read reader_gp */
282 smp_mb_master(RCU_MB_GROUP
);
285 cds_list_for_each_entry_safe(index
, tmp
, input_readers
, node
) {
286 switch (rcu_reader_state(&index
->ctr
)) {
287 case RCU_READER_ACTIVE_CURRENT
:
288 if (cur_snap_readers
) {
289 cds_list_move(&index
->node
,
294 case RCU_READER_INACTIVE
:
295 cds_list_move(&index
->node
, qsreaders
);
297 case RCU_READER_ACTIVE_OLD
:
299 * Old snapshot. Leaving node in
300 * input_readers will make us busy-loop
301 * until the snapshot becomes current or
302 * the reader becomes inactive.
308 #ifndef HAS_INCOHERENT_CACHES
309 if (cds_list_empty(input_readers
)) {
310 if (wait_loops
>= RCU_QS_ACTIVE_ATTEMPTS
) {
311 /* Read reader_gp before write futex */
312 smp_mb_master(RCU_MB_GROUP
);
313 uatomic_set(&rcu_gp
.futex
, 0);
317 /* Temporarily unlock the registry lock. */
318 mutex_unlock(&rcu_registry_lock
);
319 if (wait_loops
>= RCU_QS_ACTIVE_ATTEMPTS
)
323 /* Re-lock the registry lock before the next loop. */
324 mutex_lock(&rcu_registry_lock
);
326 #else /* #ifndef HAS_INCOHERENT_CACHES */
328 * BUSY-LOOP. Force the reader thread to commit its
329 * URCU_TLS(rcu_reader).ctr update to memory if we wait
332 if (cds_list_empty(input_readers
)) {
333 if (wait_loops
>= RCU_QS_ACTIVE_ATTEMPTS
) {
334 /* Read reader_gp before write futex */
335 smp_mb_master(RCU_MB_GROUP
);
336 uatomic_set(&rcu_gp
.futex
, 0);
340 if (wait_gp_loops
== KICK_READER_LOOPS
) {
341 smp_mb_master(RCU_MB_GROUP
);
344 /* Temporarily unlock the registry lock. */
345 mutex_unlock(&rcu_registry_lock
);
346 if (wait_loops
>= RCU_QS_ACTIVE_ATTEMPTS
) {
352 /* Re-lock the registry lock before the next loop. */
353 mutex_lock(&rcu_registry_lock
);
355 #endif /* #else #ifndef HAS_INCOHERENT_CACHES */
359 void synchronize_rcu(void)
361 CDS_LIST_HEAD(cur_snap_readers
);
362 CDS_LIST_HEAD(qsreaders
);
363 DEFINE_URCU_WAIT_NODE(wait
, URCU_WAIT_WAITING
);
364 struct urcu_waiters waiters
;
367 * Add ourself to gp_waiters queue of threads awaiting to wait
368 * for a grace period. Proceed to perform the grace period only
369 * if we are the first thread added into the queue.
370 * The implicit memory barrier before urcu_wait_add()
371 * orders prior memory accesses of threads put into the wait
372 * queue before their insertion into the wait queue.
374 if (urcu_wait_add(&gp_waiters
, &wait
) != 0) {
375 /* Not first in queue: will be awakened by another thread. */
376 urcu_adaptative_busy_wait(&wait
);
377 /* Order following memory accesses after grace period. */
381 /* We won't need to wake ourself up */
382 urcu_wait_set_state(&wait
, URCU_WAIT_RUNNING
);
384 mutex_lock(&rcu_gp_lock
);
387 * Move all waiters into our local queue.
389 urcu_move_waiters(&waiters
, &gp_waiters
);
391 mutex_lock(&rcu_registry_lock
);
393 if (cds_list_empty(®istry
))
397 * All threads should read qparity before accessing data structure
398 * where new ptr points to. Must be done within rcu_registry_lock
399 * because it iterates on reader threads.
401 /* Write new ptr before changing the qparity */
402 smp_mb_master(RCU_MB_GROUP
);
405 * Wait for readers to observe original parity or be quiescent.
406 * wait_for_readers() can release and grab again rcu_registry_lock
409 wait_for_readers(®istry
, &cur_snap_readers
, &qsreaders
);
412 * Must finish waiting for quiescent state for original parity before
413 * committing next rcu_gp.ctr update to memory. Failure to do so could
414 * result in the writer waiting forever while new readers are always
415 * accessing data (no progress). Enforce compiler-order of load
416 * URCU_TLS(rcu_reader).ctr before store to rcu_gp.ctr.
421 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
422 * model easier to understand. It does not have a big performance impact
423 * anyway, given this is the write-side.
427 /* Switch parity: 0 -> 1, 1 -> 0 */
428 CMM_STORE_SHARED(rcu_gp
.ctr
, rcu_gp
.ctr
^ RCU_GP_CTR_PHASE
);
431 * Must commit rcu_gp.ctr update to memory before waiting for quiescent
432 * state. Failure to do so could result in the writer waiting forever
433 * while new readers are always accessing data (no progress). Enforce
434 * compiler-order of store to rcu_gp.ctr before load rcu_reader ctr.
440 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
441 * model easier to understand. It does not have a big performance impact
442 * anyway, given this is the write-side.
447 * Wait for readers to observe new parity or be quiescent.
448 * wait_for_readers() can release and grab again rcu_registry_lock
451 wait_for_readers(&cur_snap_readers
, NULL
, &qsreaders
);
454 * Put quiescent reader list back into registry.
456 cds_list_splice(&qsreaders
, ®istry
);
459 * Finish waiting for reader threads before letting the old ptr
460 * being freed. Must be done within rcu_registry_lock because it
461 * iterates on reader threads.
463 smp_mb_master(RCU_MB_GROUP
);
465 mutex_unlock(&rcu_registry_lock
);
466 mutex_unlock(&rcu_gp_lock
);
469 * Wakeup waiters only after we have completed the grace period
470 * and have ensured the memory barriers at the end of the grace
471 * period have been issued.
473 urcu_wake_all_waiters(&waiters
);
477 * library wrappers to be used by non-LGPL compatible source code.
480 void rcu_read_lock(void)
485 void rcu_read_unlock(void)
490 int rcu_read_ongoing(void)
492 return _rcu_read_ongoing();
495 void rcu_register_thread(void)
497 URCU_TLS(rcu_reader
).tid
= pthread_self();
498 assert(URCU_TLS(rcu_reader
).need_mb
== 0);
499 assert(!(URCU_TLS(rcu_reader
).ctr
& RCU_GP_CTR_NEST_MASK
));
501 mutex_lock(&rcu_registry_lock
);
502 rcu_init(); /* In case gcc does not support constructor attribute */
503 cds_list_add(&URCU_TLS(rcu_reader
).node
, ®istry
);
504 mutex_unlock(&rcu_registry_lock
);
507 void rcu_unregister_thread(void)
509 mutex_lock(&rcu_registry_lock
);
510 cds_list_del(&URCU_TLS(rcu_reader
).node
);
511 mutex_unlock(&rcu_registry_lock
);
514 #ifdef RCU_MEMBARRIER
520 if (!membarrier(MEMBARRIER_EXPEDITED
| MEMBARRIER_QUERY
))
521 rcu_has_sys_membarrier
= 1;
526 static void sigrcu_handler(int signo
, siginfo_t
*siginfo
, void *context
)
529 * Executing this cmm_smp_mb() is the only purpose of this signal handler.
530 * It punctually promotes cmm_barrier() into cmm_smp_mb() on every thread it is
534 _CMM_STORE_SHARED(URCU_TLS(rcu_reader
).need_mb
, 0);
539 * rcu_init constructor. Called when the library is linked, but also when
540 * reader threads are calling rcu_register_thread().
541 * Should only be called by a single thread at a given time. This is ensured by
542 * holing the rcu_registry_lock from rcu_register_thread() or by running
543 * at library load time, which should not be executed by multiple
544 * threads nor concurrently with rcu_register_thread() anyway.
548 struct sigaction act
;
555 act
.sa_sigaction
= sigrcu_handler
;
556 act
.sa_flags
= SA_SIGINFO
| SA_RESTART
;
557 sigemptyset(&act
.sa_mask
);
558 ret
= sigaction(SIGRCU
, &act
, NULL
);
566 * Don't unregister the SIGRCU signal handler anymore, because
567 * call_rcu threads could still be using it shortly before the
569 * Assertion disabled because call_rcu threads are now rcu
570 * readers, and left running at exit.
571 * assert(cds_list_empty(®istry));
575 #endif /* #ifdef RCU_SIGNAL */
577 DEFINE_RCU_FLAVOR(rcu_flavor
);
579 #include "urcu-call-rcu-impl.h"
580 #include "urcu-defer-impl.h"