4 * Userspace RCU library
6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
35 #include "urcu-static.h"
36 /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
42 void __attribute__((constructor
)) urcu_init(void);
43 void __attribute__((destructor
)) urcu_exit(void);
50 static pthread_mutex_t urcu_mutex
= PTHREAD_MUTEX_INITIALIZER
;
55 * Global grace period counter.
56 * Contains the current RCU_GP_CTR_BIT.
57 * Also has a RCU_GP_COUNT of 1, to accelerate the reader fast path.
58 * Written to only by writer with mutex taken. Read by both writer and readers.
60 long urcu_gp_ctr
= RCU_GP_COUNT
;
63 * Written to only by each individual reader. Read by both the reader and the
66 struct urcu_reader __thread urcu_reader
;
69 unsigned int yield_active
;
70 unsigned int __thread rand_yield
;
73 static LIST_HEAD(registry
);
75 static void internal_urcu_lock(void)
79 #ifndef DISTRUST_SIGNALS_EXTREME
80 ret
= pthread_mutex_lock(&urcu_mutex
);
82 perror("Error in pthread mutex lock");
85 #else /* #ifndef DISTRUST_SIGNALS_EXTREME */
86 while ((ret
= pthread_mutex_trylock(&urcu_mutex
)) != 0) {
87 if (ret
!= EBUSY
&& ret
!= EINTR
) {
88 printf("ret = %d, errno = %d\n", ret
, errno
);
89 perror("Error in pthread mutex lock");
92 if (urcu_reader
.need_mb
) {
94 urcu_reader
.need_mb
= 0;
99 #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
102 static void internal_urcu_unlock(void)
106 ret
= pthread_mutex_unlock(&urcu_mutex
);
108 perror("Error in pthread mutex unlock");
114 * called with urcu_mutex held.
116 static void switch_next_urcu_qparity(void)
118 STORE_SHARED(urcu_gp_ctr
, urcu_gp_ctr
^ RCU_GP_CTR_BIT
);
122 static void force_mb_single_thread(struct urcu_reader
*index
)
127 static void force_mb_all_threads(void)
131 #else /* #ifdef URCU_MB */
132 static void force_mb_single_thread(struct urcu_reader
*index
)
134 assert(!list_empty(®istry
));
136 * pthread_kill has a smp_mb(). But beware, we assume it performs
137 * a cache flush on architectures with non-coherent cache. Let's play
138 * safe and don't assume anything : we use smp_mc() to make sure the
139 * cache flush is enforced.
142 smp_mc(); /* write ->need_mb before sending the signals */
143 pthread_kill(index
->tid
, SIGURCU
);
146 * Wait for sighandler (and thus mb()) to execute on every thread.
149 while (index
->need_mb
) {
152 smp_mb(); /* read ->need_mb before ending the barrier */
155 static void force_mb_all_threads(void)
157 struct urcu_reader
*index
;
160 * Ask for each threads to execute a smp_mb() so we can consider the
161 * compiler barriers around rcu read lock as real memory barriers.
163 if (list_empty(®istry
))
166 * pthread_kill has a smp_mb(). But beware, we assume it performs
167 * a cache flush on architectures with non-coherent cache. Let's play
168 * safe and don't assume anything : we use smp_mc() to make sure the
169 * cache flush is enforced.
171 list_for_each_entry(index
, ®istry
, head
) {
173 smp_mc(); /* write need_mb before sending the signal */
174 pthread_kill(index
->tid
, SIGURCU
);
177 * Wait for sighandler (and thus mb()) to execute on every thread.
179 * Note that the pthread_kill() will never be executed on systems
180 * that correctly deliver signals in a timely manner. However, it
181 * is not uncommon for kernels to have bugs that can result in
182 * lost or unduly delayed signals.
184 * If you are seeing the below pthread_kill() executing much at
185 * all, we suggest testing the underlying kernel and filing the
186 * relevant bug report. For Linux kernels, we recommend getting
187 * the Linux Test Project (LTP).
189 list_for_each_entry(index
, ®istry
, head
) {
190 while (index
->need_mb
) {
191 pthread_kill(index
->tid
, SIGURCU
);
195 smp_mb(); /* read ->need_mb before ending the barrier */
197 #endif /* #else #ifdef URCU_MB */
200 * synchronize_rcu() waiting. Single thread.
202 static void wait_gp(struct urcu_reader
*index
)
204 uatomic_dec(&gp_futex
);
205 force_mb_single_thread(index
); /* Write futex before read reader_gp */
206 if (!rcu_old_gp_ongoing(&index
->ctr
)) {
207 /* Read reader_gp before write futex */
208 force_mb_single_thread(index
);
209 /* Callbacks are queued, don't wait. */
210 uatomic_set(&gp_futex
, 0);
212 /* Read reader_gp before read futex */
213 force_mb_single_thread(index
);
214 if (uatomic_read(&gp_futex
) == -1)
215 futex(&gp_futex
, FUTEX_WAIT
, -1,
220 void wait_for_quiescent_state(void)
222 struct urcu_reader
*index
;
224 if (list_empty(®istry
))
227 * Wait for each thread urcu_reader.ctr count to become 0.
229 list_for_each_entry(index
, ®istry
, head
) {
231 #ifndef HAS_INCOHERENT_CACHES
232 while (rcu_old_gp_ongoing(&index
->ctr
)) {
233 if (wait_loops
++ == RCU_QS_ACTIVE_ATTEMPTS
) {
239 #else /* #ifndef HAS_INCOHERENT_CACHES */
241 * BUSY-LOOP. Force the reader thread to commit its
242 * urcu_reader.ctr update to memory if we wait for too long.
244 while (rcu_old_gp_ongoing(&index
->ctr
)) {
245 switch (wait_loops
++) {
246 case RCU_QS_ACTIVE_ATTEMPTS
:
249 case KICK_READER_LOOPS
:
250 force_mb_single_thread(index
);
257 #endif /* #else #ifndef HAS_INCOHERENT_CACHES */
261 void synchronize_rcu(void)
263 internal_urcu_lock();
265 /* All threads should read qparity before accessing data structure
266 * where new ptr points to. Must be done within internal_urcu_lock
267 * because it iterates on reader threads.*/
268 /* Write new ptr before changing the qparity */
269 force_mb_all_threads();
271 switch_next_urcu_qparity(); /* 0 -> 1 */
274 * Must commit qparity update to memory before waiting for parity
275 * 0 quiescent state. Failure to do so could result in the writer
276 * waiting forever while new readers are always accessing data (no
278 * Ensured by STORE_SHARED and LOAD_SHARED.
282 * Adding a smp_mb() which is _not_ formally required, but makes the
283 * model easier to understand. It does not have a big performance impact
284 * anyway, given this is the write-side.
289 * Wait for previous parity to be empty of readers.
291 wait_for_quiescent_state(); /* Wait readers in parity 0 */
294 * Must finish waiting for quiescent state for parity 0 before
295 * committing qparity update to memory. Failure to do so could result in
296 * the writer waiting forever while new readers are always accessing
297 * data (no progress).
298 * Ensured by STORE_SHARED and LOAD_SHARED.
302 * Adding a smp_mb() which is _not_ formally required, but makes the
303 * model easier to understand. It does not have a big performance impact
304 * anyway, given this is the write-side.
308 switch_next_urcu_qparity(); /* 1 -> 0 */
311 * Must commit qparity update to memory before waiting for parity
312 * 1 quiescent state. Failure to do so could result in the writer
313 * waiting forever while new readers are always accessing data (no
315 * Ensured by STORE_SHARED and LOAD_SHARED.
319 * Adding a smp_mb() which is _not_ formally required, but makes the
320 * model easier to understand. It does not have a big performance impact
321 * anyway, given this is the write-side.
326 * Wait for previous parity to be empty of readers.
328 wait_for_quiescent_state(); /* Wait readers in parity 1 */
330 /* Finish waiting for reader threads before letting the old ptr being
331 * freed. Must be done within internal_urcu_lock because it iterates on
333 force_mb_all_threads();
335 internal_urcu_unlock();
339 * library wrappers to be used by non-LGPL compatible source code.
342 void rcu_read_lock(void)
347 void rcu_read_unlock(void)
352 void *rcu_dereference(void *p
)
354 return _rcu_dereference(p
);
357 void *rcu_assign_pointer_sym(void **p
, void *v
)
360 return STORE_SHARED(p
, v
);
363 void *rcu_xchg_pointer_sym(void **p
, void *v
)
366 return uatomic_xchg(p
, v
);
369 void *rcu_cmpxchg_pointer_sym(void **p
, void *old
, void *_new
)
372 return uatomic_cmpxchg(p
, old
, _new
);
375 void *rcu_publish_content_sym(void **p
, void *v
)
379 oldptr
= _rcu_xchg_pointer(p
, v
);
384 void rcu_register_thread(void)
386 urcu_reader
.tid
= pthread_self();
387 assert(urcu_reader
.need_mb
== 0);
388 assert(urcu_reader
.ctr
== 0);
390 internal_urcu_lock();
391 urcu_init(); /* In case gcc does not support constructor attribute */
392 list_add(&urcu_reader
.head
, ®istry
);
393 internal_urcu_unlock();
396 void rcu_unregister_thread(void)
398 internal_urcu_lock();
399 list_del(&urcu_reader
.head
);
400 internal_urcu_unlock();
404 static void sigurcu_handler(int signo
, siginfo_t
*siginfo
, void *context
)
407 * Executing this smp_mb() is the only purpose of this signal handler.
408 * It punctually promotes barrier() into smp_mb() on every thread it is
412 urcu_reader
.need_mb
= 0;
417 * urcu_init constructor. Called when the library is linked, but also when
418 * reader threads are calling rcu_register_thread().
419 * Should only be called by a single thread at a given time. This is ensured by
420 * holing the internal_urcu_lock() from rcu_register_thread() or by running at
421 * library load time, which should not be executed by multiple threads nor
422 * concurrently with rcu_register_thread() anyway.
426 struct sigaction act
;
433 act
.sa_sigaction
= sigurcu_handler
;
434 act
.sa_flags
= SA_SIGINFO
| SA_RESTART
;
435 sigemptyset(&act
.sa_mask
);
436 ret
= sigaction(SIGURCU
, &act
, NULL
);
438 perror("Error in sigaction");
445 struct sigaction act
;
448 ret
= sigaction(SIGURCU
, NULL
, &act
);
450 perror("Error in sigaction");
453 assert(act
.sa_sigaction
== sigurcu_handler
);
454 assert(list_empty(®istry
));
456 #endif /* #ifndef URCU_MB */