4 * Userspace RCU library
6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
39 #include "urcu-static.h"
40 /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
45 int has_sys_membarrier
;
47 void __attribute__((constructor
)) rcu_init(void);
59 void __attribute__((constructor
)) rcu_init(void);
60 void __attribute__((destructor
)) rcu_exit(void);
63 static pthread_mutex_t rcu_gp_lock
= PTHREAD_MUTEX_INITIALIZER
;
68 * Global grace period counter.
69 * Contains the current RCU_GP_CTR_PHASE.
70 * Also has a RCU_GP_COUNT of 1, to accelerate the reader fast path.
71 * Written to only by writer with mutex taken. Read by both writer and readers.
73 unsigned long rcu_gp_ctr
= RCU_GP_COUNT
;
76 * Written to only by each individual reader. Read by both the reader and the
79 struct rcu_reader __thread rcu_reader
;
82 unsigned int yield_active
;
83 unsigned int __thread rand_yield
;
86 static CDS_LIST_HEAD(registry
);
88 static void mutex_lock(pthread_mutex_t
*mutex
)
92 #ifndef DISTRUST_SIGNALS_EXTREME
93 ret
= pthread_mutex_lock(mutex
);
95 perror("Error in pthread mutex lock");
98 #else /* #ifndef DISTRUST_SIGNALS_EXTREME */
99 while ((ret
= pthread_mutex_trylock(mutex
)) != 0) {
100 if (ret
!= EBUSY
&& ret
!= EINTR
) {
101 printf("ret = %d, errno = %d\n", ret
, errno
);
102 perror("Error in pthread mutex lock");
105 if (CMM_LOAD_SHARED(rcu_reader
.need_mb
)) {
107 _CMM_STORE_SHARED(rcu_reader
.need_mb
, 0);
112 #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
115 static void mutex_unlock(pthread_mutex_t
*mutex
)
119 ret
= pthread_mutex_unlock(mutex
);
121 perror("Error in pthread mutex unlock");
126 #ifdef RCU_MEMBARRIER
127 static void smp_mb_master(int group
)
129 if (likely(has_sys_membarrier
))
130 membarrier(MEMBARRIER_EXPEDITED
);
137 static void smp_mb_master(int group
)
144 static void force_mb_all_readers(void)
146 struct rcu_reader
*index
;
149 * Ask for each threads to execute a cmm_smp_mb() so we can consider the
150 * compiler barriers around rcu read lock as real memory barriers.
152 if (cds_list_empty(®istry
))
155 * pthread_kill has a cmm_smp_mb(). But beware, we assume it performs
156 * a cache flush on architectures with non-coherent cache. Let's play
157 * safe and don't assume anything : we use cmm_smp_mc() to make sure the
158 * cache flush is enforced.
160 cds_list_for_each_entry(index
, ®istry
, node
) {
161 CMM_STORE_SHARED(index
->need_mb
, 1);
162 pthread_kill(index
->tid
, SIGRCU
);
165 * Wait for sighandler (and thus mb()) to execute on every thread.
167 * Note that the pthread_kill() will never be executed on systems
168 * that correctly deliver signals in a timely manner. However, it
169 * is not uncommon for kernels to have bugs that can result in
170 * lost or unduly delayed signals.
172 * If you are seeing the below pthread_kill() executing much at
173 * all, we suggest testing the underlying kernel and filing the
174 * relevant bug report. For Linux kernels, we recommend getting
175 * the Linux Test Project (LTP).
177 cds_list_for_each_entry(index
, ®istry
, node
) {
178 while (CMM_LOAD_SHARED(index
->need_mb
)) {
179 pthread_kill(index
->tid
, SIGRCU
);
183 cmm_smp_mb(); /* read ->need_mb before ending the barrier */
186 static void smp_mb_master(int group
)
188 force_mb_all_readers();
190 #endif /* #ifdef RCU_SIGNAL */
193 * synchronize_rcu() waiting. Single thread.
195 static void wait_gp(void)
197 /* Read reader_gp before read futex */
198 smp_mb_master(RCU_MB_GROUP
);
199 if (uatomic_read(&gp_futex
) == -1)
200 futex_async(&gp_futex
, FUTEX_WAIT
, -1,
204 void update_counter_and_wait(void)
206 CDS_LIST_HEAD(qsreaders
);
208 struct rcu_reader
*index
, *tmp
;
210 /* Switch parity: 0 -> 1, 1 -> 0 */
211 CMM_STORE_SHARED(rcu_gp_ctr
, rcu_gp_ctr
^ RCU_GP_CTR_PHASE
);
214 * Must commit rcu_gp_ctr update to memory before waiting for quiescent
215 * state. Failure to do so could result in the writer waiting forever
216 * while new readers are always accessing data (no progress). Enforce
217 * compiler-order of store to rcu_gp_ctr before load rcu_reader ctr.
223 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
224 * model easier to understand. It does not have a big performance impact
225 * anyway, given this is the write-side.
230 * Wait for each thread rcu_reader.ctr count to become 0.
234 if (wait_loops
== RCU_QS_ACTIVE_ATTEMPTS
) {
235 uatomic_dec(&gp_futex
);
236 /* Write futex before read reader_gp */
237 smp_mb_master(RCU_MB_GROUP
);
240 cds_list_for_each_entry_safe(index
, tmp
, ®istry
, node
) {
241 if (!rcu_gp_ongoing(&index
->ctr
))
242 cds_list_move(&index
->node
, &qsreaders
);
245 #ifndef HAS_INCOHERENT_CACHES
246 if (cds_list_empty(®istry
)) {
247 if (wait_loops
== RCU_QS_ACTIVE_ATTEMPTS
) {
248 /* Read reader_gp before write futex */
249 smp_mb_master(RCU_MB_GROUP
);
250 uatomic_set(&gp_futex
, 0);
254 if (wait_loops
== RCU_QS_ACTIVE_ATTEMPTS
)
259 #else /* #ifndef HAS_INCOHERENT_CACHES */
261 * BUSY-LOOP. Force the reader thread to commit its
262 * rcu_reader.ctr update to memory if we wait for too long.
264 if (cds_list_empty(®istry
)) {
265 if (wait_loops
== RCU_QS_ACTIVE_ATTEMPTS
) {
266 /* Read reader_gp before write futex */
267 smp_mb_master(RCU_MB_GROUP
);
268 uatomic_set(&gp_futex
, 0);
272 switch (wait_loops
) {
273 case RCU_QS_ACTIVE_ATTEMPTS
:
275 break; /* only escape switch */
276 case KICK_READER_LOOPS
:
277 smp_mb_master(RCU_MB_GROUP
);
279 break; /* only escape switch */
284 #endif /* #else #ifndef HAS_INCOHERENT_CACHES */
286 /* put back the reader list in the registry */
287 cds_list_splice(&qsreaders
, ®istry
);
290 void synchronize_rcu(void)
292 mutex_lock(&rcu_gp_lock
);
294 if (cds_list_empty(®istry
))
297 /* All threads should read qparity before accessing data structure
298 * where new ptr points to. Must be done within rcu_gp_lock because it
299 * iterates on reader threads.*/
300 /* Write new ptr before changing the qparity */
301 smp_mb_master(RCU_MB_GROUP
);
304 * Wait for previous parity to be empty of readers.
306 update_counter_and_wait(); /* 0 -> 1, wait readers in parity 0 */
309 * Must finish waiting for quiescent state for parity 0 before
310 * committing next rcu_gp_ctr update to memory. Failure to do so could
311 * result in the writer waiting forever while new readers are always
312 * accessing data (no progress). Enforce compiler-order of load
313 * rcu_reader ctr before store to rcu_gp_ctr.
318 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
319 * model easier to understand. It does not have a big performance impact
320 * anyway, given this is the write-side.
325 * Wait for previous parity to be empty of readers.
327 update_counter_and_wait(); /* 1 -> 0, wait readers in parity 1 */
329 /* Finish waiting for reader threads before letting the old ptr being
330 * freed. Must be done within rcu_gp_lock because it iterates on reader
332 smp_mb_master(RCU_MB_GROUP
);
334 mutex_unlock(&rcu_gp_lock
);
338 * library wrappers to be used by non-LGPL compatible source code.
341 void rcu_read_lock(void)
346 void rcu_read_unlock(void)
351 void rcu_register_thread(void)
353 rcu_reader
.tid
= pthread_self();
354 assert(rcu_reader
.need_mb
== 0);
355 assert(!(rcu_reader
.ctr
& RCU_GP_CTR_NEST_MASK
));
357 mutex_lock(&rcu_gp_lock
);
358 rcu_init(); /* In case gcc does not support constructor attribute */
359 cds_list_add(&rcu_reader
.node
, ®istry
);
360 mutex_unlock(&rcu_gp_lock
);
363 void rcu_unregister_thread(void)
365 mutex_lock(&rcu_gp_lock
);
366 cds_list_del(&rcu_reader
.node
);
367 mutex_unlock(&rcu_gp_lock
);
370 #ifdef RCU_MEMBARRIER
376 if (!membarrier(MEMBARRIER_EXPEDITED
| MEMBARRIER_QUERY
))
377 has_sys_membarrier
= 1;
382 static void sigrcu_handler(int signo
, siginfo_t
*siginfo
, void *context
)
385 * Executing this cmm_smp_mb() is the only purpose of this signal handler.
386 * It punctually promotes cmm_barrier() into cmm_smp_mb() on every thread it is
390 _CMM_STORE_SHARED(rcu_reader
.need_mb
, 0);
395 * rcu_init constructor. Called when the library is linked, but also when
396 * reader threads are calling rcu_register_thread().
397 * Should only be called by a single thread at a given time. This is ensured by
398 * holing the rcu_gp_lock from rcu_register_thread() or by running at library
399 * load time, which should not be executed by multiple threads nor concurrently
400 * with rcu_register_thread() anyway.
404 struct sigaction act
;
411 act
.sa_sigaction
= sigrcu_handler
;
412 act
.sa_flags
= SA_SIGINFO
| SA_RESTART
;
413 sigemptyset(&act
.sa_mask
);
414 ret
= sigaction(SIGRCU
, &act
, NULL
);
416 perror("Error in sigaction");
423 struct sigaction act
;
426 ret
= sigaction(SIGRCU
, NULL
, &act
);
428 perror("Error in sigaction");
431 assert(act
.sa_sigaction
== sigrcu_handler
);
432 assert(cds_list_empty(®istry
));
435 #endif /* #ifdef RCU_SIGNAL */
437 #include "urcu-call-rcu-impl.h"
438 #include "urcu-defer-impl.h"