4 * Userspace RCU QSBR library
6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
26 #define URCU_NO_COMPAT_IDENTIFIERS
38 #include <urcu/wfcqueue.h>
39 #include <urcu/map/urcu-qsbr.h>
40 #define BUILD_QSBR_LIB
41 #include <urcu/static/urcu-qsbr.h>
42 #include <urcu/pointer.h>
43 #include <urcu/tls-compat.h>
46 #include "urcu-wait.h"
47 #include "urcu-utils.h"
50 /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
52 #include <urcu/urcu-qsbr.h>
55 void __attribute__((destructor
)) urcu_qsbr_exit(void);
58 * rcu_gp_lock ensures mutual exclusion between threads calling
61 static pthread_mutex_t rcu_gp_lock
= PTHREAD_MUTEX_INITIALIZER
;
63 * rcu_registry_lock ensures mutual exclusion between threads
64 * registering and unregistering themselves to/from the registry, and
65 * with threads reading that registry from synchronize_rcu(). However,
66 * this lock is not held all the way through the completion of awaiting
67 * for the grace period. It is sporadically released between iterations
69 * rcu_registry_lock may nest inside rcu_gp_lock.
71 static pthread_mutex_t rcu_registry_lock
= PTHREAD_MUTEX_INITIALIZER
;
72 struct urcu_gp urcu_qsbr_gp
= { .ctr
= URCU_QSBR_GP_ONLINE
};
75 * Active attempts to check for reader Q.S. before calling futex().
77 #define RCU_QS_ACTIVE_ATTEMPTS 100
80 * Written to only by each individual reader. Read by both the reader and the
83 DEFINE_URCU_TLS(struct urcu_qsbr_reader
, urcu_qsbr_reader
);
85 static CDS_LIST_HEAD(registry
);
88 * Queue keeping threads awaiting to wait for a grace period. Contains
89 * struct gp_waiters_thread objects.
91 static DEFINE_URCU_WAIT_QUEUE(gp_waiters
);
93 static void mutex_lock(pthread_mutex_t
*mutex
)
97 #ifndef DISTRUST_SIGNALS_EXTREME
98 ret
= pthread_mutex_lock(mutex
);
101 #else /* #ifndef DISTRUST_SIGNALS_EXTREME */
102 while ((ret
= pthread_mutex_trylock(mutex
)) != 0) {
103 if (ret
!= EBUSY
&& ret
!= EINTR
)
107 #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
110 static void mutex_unlock(pthread_mutex_t
*mutex
)
114 ret
= pthread_mutex_unlock(mutex
);
120 * synchronize_rcu() waiting. Single thread.
122 static void wait_gp(void)
124 /* Read reader_gp before read futex */
126 if (uatomic_read(&urcu_qsbr_gp
.futex
) != -1)
128 while (futex_noasync(&urcu_qsbr_gp
.futex
, FUTEX_WAIT
, -1,
132 /* Value already changed. */
135 /* Retry if interrupted by signal. */
136 break; /* Get out of switch. */
138 /* Unexpected error. */
145 * Always called with rcu_registry lock held. Releases this lock between
146 * iterations and grabs it again. Holds the lock when it returns.
148 static void wait_for_readers(struct cds_list_head
*input_readers
,
149 struct cds_list_head
*cur_snap_readers
,
150 struct cds_list_head
*qsreaders
)
152 unsigned int wait_loops
= 0;
153 struct urcu_qsbr_reader
*index
, *tmp
;
156 * Wait for each thread URCU_TLS(urcu_qsbr_reader).ctr to either
157 * indicate quiescence (offline), or for them to observe the
158 * current urcu_qsbr_gp.ctr value.
161 if (wait_loops
< RCU_QS_ACTIVE_ATTEMPTS
)
163 if (wait_loops
>= RCU_QS_ACTIVE_ATTEMPTS
) {
164 uatomic_set(&urcu_qsbr_gp
.futex
, -1);
166 * Write futex before write waiting (the other side
167 * reads them in the opposite order).
170 cds_list_for_each_entry(index
, input_readers
, node
) {
171 _CMM_STORE_SHARED(index
->waiting
, 1);
173 /* Write futex before read reader_gp */
176 cds_list_for_each_entry_safe(index
, tmp
, input_readers
, node
) {
177 switch (urcu_qsbr_reader_state(&index
->ctr
)) {
178 case URCU_READER_ACTIVE_CURRENT
:
179 if (cur_snap_readers
) {
180 cds_list_move(&index
->node
,
185 case URCU_READER_INACTIVE
:
186 cds_list_move(&index
->node
, qsreaders
);
188 case URCU_READER_ACTIVE_OLD
:
190 * Old snapshot. Leaving node in
191 * input_readers will make us busy-loop
192 * until the snapshot becomes current or
193 * the reader becomes inactive.
199 if (cds_list_empty(input_readers
)) {
200 if (wait_loops
>= RCU_QS_ACTIVE_ATTEMPTS
) {
201 /* Read reader_gp before write futex */
203 uatomic_set(&urcu_qsbr_gp
.futex
, 0);
207 /* Temporarily unlock the registry lock. */
208 mutex_unlock(&rcu_registry_lock
);
209 if (wait_loops
>= RCU_QS_ACTIVE_ATTEMPTS
) {
212 #ifndef HAS_INCOHERENT_CACHES
214 #else /* #ifndef HAS_INCOHERENT_CACHES */
216 #endif /* #else #ifndef HAS_INCOHERENT_CACHES */
218 /* Re-lock the registry lock before the next loop. */
219 mutex_lock(&rcu_registry_lock
);
225 * Using a two-subphases algorithm for architectures with smaller than 64-bit
226 * long-size to ensure we do not encounter an overflow bug.
229 #if (CAA_BITS_PER_LONG < 64)
230 void urcu_qsbr_synchronize_rcu(void)
232 CDS_LIST_HEAD(cur_snap_readers
);
233 CDS_LIST_HEAD(qsreaders
);
234 unsigned long was_online
;
235 DEFINE_URCU_WAIT_NODE(wait
, URCU_WAIT_WAITING
);
236 struct urcu_waiters waiters
;
238 was_online
= urcu_qsbr_read_ongoing();
240 /* All threads should read qparity before accessing data structure
241 * where new ptr points to. In the "then" case, rcu_thread_offline
242 * includes a memory barrier.
244 * Mark the writer thread offline to make sure we don't wait for
245 * our own quiescent state. This allows using synchronize_rcu()
246 * in threads registered as readers.
249 urcu_qsbr_thread_offline();
254 * Add ourself to gp_waiters queue of threads awaiting to wait
255 * for a grace period. Proceed to perform the grace period only
256 * if we are the first thread added into the queue.
258 if (urcu_wait_add(&gp_waiters
, &wait
) != 0) {
259 /* Not first in queue: will be awakened by another thread. */
260 urcu_adaptative_busy_wait(&wait
);
263 /* We won't need to wake ourself up */
264 urcu_wait_set_state(&wait
, URCU_WAIT_RUNNING
);
266 mutex_lock(&rcu_gp_lock
);
269 * Move all waiters into our local queue.
271 urcu_move_waiters(&waiters
, &gp_waiters
);
273 mutex_lock(&rcu_registry_lock
);
275 if (cds_list_empty(®istry
))
279 * Wait for readers to observe original parity or be quiescent.
280 * wait_for_readers() can release and grab again rcu_registry_lock
283 wait_for_readers(®istry
, &cur_snap_readers
, &qsreaders
);
286 * Must finish waiting for quiescent state for original parity
287 * before committing next urcu_qsbr_gp.ctr update to memory. Failure
288 * to do so could result in the writer waiting forever while new
289 * readers are always accessing data (no progress). Enforce
290 * compiler-order of load URCU_TLS(urcu_qsbr_reader).ctr before store
291 * to urcu_qsbr_gp.ctr.
296 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
297 * model easier to understand. It does not have a big performance impact
298 * anyway, given this is the write-side.
302 /* Switch parity: 0 -> 1, 1 -> 0 */
303 CMM_STORE_SHARED(urcu_qsbr_gp
.ctr
, urcu_qsbr_gp
.ctr
^ URCU_QSBR_GP_CTR
);
306 * Must commit urcu_qsbr_gp.ctr update to memory before waiting for
307 * quiescent state. Failure to do so could result in the writer
308 * waiting forever while new readers are always accessing data
309 * (no progress). Enforce compiler-order of store to urcu_qsbr_gp.ctr
310 * before load URCU_TLS(urcu_qsbr_reader).ctr.
315 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
316 * model easier to understand. It does not have a big performance impact
317 * anyway, given this is the write-side.
322 * Wait for readers to observe new parity or be quiescent.
323 * wait_for_readers() can release and grab again rcu_registry_lock
326 wait_for_readers(&cur_snap_readers
, NULL
, &qsreaders
);
329 * Put quiescent reader list back into registry.
331 cds_list_splice(&qsreaders
, ®istry
);
333 mutex_unlock(&rcu_registry_lock
);
334 mutex_unlock(&rcu_gp_lock
);
335 urcu_wake_all_waiters(&waiters
);
338 * Finish waiting for reader threads before letting the old ptr being
342 urcu_qsbr_thread_online();
346 #else /* !(CAA_BITS_PER_LONG < 64) */
347 void urcu_qsbr_synchronize_rcu(void)
349 CDS_LIST_HEAD(qsreaders
);
350 unsigned long was_online
;
351 DEFINE_URCU_WAIT_NODE(wait
, URCU_WAIT_WAITING
);
352 struct urcu_waiters waiters
;
354 was_online
= urcu_qsbr_read_ongoing();
357 * Mark the writer thread offline to make sure we don't wait for
358 * our own quiescent state. This allows using synchronize_rcu()
359 * in threads registered as readers.
362 urcu_qsbr_thread_offline();
367 * Add ourself to gp_waiters queue of threads awaiting to wait
368 * for a grace period. Proceed to perform the grace period only
369 * if we are the first thread added into the queue.
371 if (urcu_wait_add(&gp_waiters
, &wait
) != 0) {
372 /* Not first in queue: will be awakened by another thread. */
373 urcu_adaptative_busy_wait(&wait
);
376 /* We won't need to wake ourself up */
377 urcu_wait_set_state(&wait
, URCU_WAIT_RUNNING
);
379 mutex_lock(&rcu_gp_lock
);
382 * Move all waiters into our local queue.
384 urcu_move_waiters(&waiters
, &gp_waiters
);
386 mutex_lock(&rcu_registry_lock
);
388 if (cds_list_empty(®istry
))
391 /* Increment current G.P. */
392 CMM_STORE_SHARED(urcu_qsbr_gp
.ctr
, urcu_qsbr_gp
.ctr
+ URCU_QSBR_GP_CTR
);
395 * Must commit urcu_qsbr_gp.ctr update to memory before waiting for
396 * quiescent state. Failure to do so could result in the writer
397 * waiting forever while new readers are always accessing data
398 * (no progress). Enforce compiler-order of store to urcu_qsbr_gp.ctr
399 * before load URCU_TLS(urcu_qsbr_reader).ctr.
404 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
405 * model easier to understand. It does not have a big performance impact
406 * anyway, given this is the write-side.
411 * Wait for readers to observe new count of be quiescent.
412 * wait_for_readers() can release and grab again rcu_registry_lock
415 wait_for_readers(®istry
, NULL
, &qsreaders
);
418 * Put quiescent reader list back into registry.
420 cds_list_splice(&qsreaders
, ®istry
);
422 mutex_unlock(&rcu_registry_lock
);
423 mutex_unlock(&rcu_gp_lock
);
424 urcu_wake_all_waiters(&waiters
);
427 urcu_qsbr_thread_online();
431 #endif /* !(CAA_BITS_PER_LONG < 64) */
434 * library wrappers to be used by non-LGPL compatible source code.
437 void urcu_qsbr_read_lock(void)
439 _urcu_qsbr_read_lock();
442 void urcu_qsbr_read_unlock(void)
444 _urcu_qsbr_read_unlock();
447 int urcu_qsbr_read_ongoing(void)
449 return _urcu_qsbr_read_ongoing();
451 void rcu_read_ongoing_qsbr();
453 void urcu_qsbr_quiescent_state(void)
455 _urcu_qsbr_quiescent_state();
457 void rcu_quiescent_state_qsbr();
459 void urcu_qsbr_thread_offline(void)
461 _urcu_qsbr_thread_offline();
463 void rcu_thread_offline_qsbr();
465 void urcu_qsbr_thread_online(void)
467 _urcu_qsbr_thread_online();
470 void urcu_qsbr_register_thread(void)
472 URCU_TLS(urcu_qsbr_reader
).tid
= pthread_self();
473 assert(URCU_TLS(urcu_qsbr_reader
).ctr
== 0);
475 mutex_lock(&rcu_registry_lock
);
476 assert(!URCU_TLS(urcu_qsbr_reader
).registered
);
477 URCU_TLS(urcu_qsbr_reader
).registered
= 1;
478 cds_list_add(&URCU_TLS(urcu_qsbr_reader
).node
, ®istry
);
479 mutex_unlock(&rcu_registry_lock
);
480 _urcu_qsbr_thread_online();
483 void urcu_qsbr_unregister_thread(void)
486 * We have to make the thread offline otherwise we end up dealocking
487 * with a waiting writer.
489 _urcu_qsbr_thread_offline();
490 assert(URCU_TLS(urcu_qsbr_reader
).registered
);
491 URCU_TLS(urcu_qsbr_reader
).registered
= 0;
492 mutex_lock(&rcu_registry_lock
);
493 cds_list_del(&URCU_TLS(urcu_qsbr_reader
).node
);
494 mutex_unlock(&rcu_registry_lock
);
497 void urcu_qsbr_exit(void)
500 * Assertion disabled because call_rcu threads are now rcu
501 * readers, and left running at exit.
502 * assert(cds_list_empty(®istry));
506 DEFINE_RCU_FLAVOR(rcu_flavor
);
508 #include "urcu-call-rcu-impl.h"
509 #include "urcu-defer-impl.h"