4 * Userspace RCU library
6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
35 #include "urcu-static.h"
36 /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
39 void __attribute__((constructor
)) urcu_init(void);
40 void __attribute__((destructor
)) urcu_exit(void);
44 pthread_mutex_t urcu_mutex
= PTHREAD_MUTEX_INITIALIZER
;
47 * Global grace period counter.
52 * Written to only by each individual reader. Read by both the reader and the
55 long __thread urcu_active_readers
;
57 /* Thread IDs of registered readers */
58 #define INIT_NUM_THREADS 4
60 struct reader_registry
{
62 long *urcu_active_readers
;
67 unsigned int yield_active
;
68 unsigned int __thread rand_yield
;
71 static struct reader_registry
*registry
;
72 static char __thread need_mb
;
73 static int num_readers
, alloc_readers
;
75 void internal_urcu_lock(void)
79 #ifndef DISTRUST_SIGNALS_EXTREME
80 ret
= pthread_mutex_lock(&urcu_mutex
);
82 perror("Error in pthread mutex lock");
85 #else /* #ifndef DISTRUST_SIGNALS_EXTREME */
86 while ((ret
= pthread_mutex_trylock(&urcu_mutex
)) != 0) {
87 if (ret
!= EBUSY
&& ret
!= EINTR
) {
88 printf("ret = %d, errno = %d\n", ret
, errno
);
89 perror("Error in pthread mutex lock");
99 #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
102 void internal_urcu_unlock(void)
106 ret
= pthread_mutex_unlock(&urcu_mutex
);
108 perror("Error in pthread mutex unlock");
114 #ifdef HAS_INCOHERENT_CACHES
115 static void force_mb_single_thread(struct reader_registry
*index
)
119 #endif /* #ifdef HAS_INCOHERENT_CACHES */
121 static void force_mb_all_threads(void)
125 #else /* #ifdef DEBUG_FULL_MB */
126 #ifdef HAS_INCOHERENT_CACHES
127 static void force_mb_single_thread(struct reader_registry
*index
)
131 * pthread_kill has a smp_mb(). But beware, we assume it performs
132 * a cache flush on architectures with non-coherent cache. Let's play
133 * safe and don't assume anything : we use smp_mc() to make sure the
134 * cache flush is enforced.
137 smp_mc(); /* write ->need_mb before sending the signals */
138 pthread_kill(index
->tid
, SIGURCU
);
141 * Wait for sighandler (and thus mb()) to execute on every thread.
144 while (*index
->need_mb
) {
147 smp_mb(); /* read ->need_mb before ending the barrier */
149 #endif /* #ifdef HAS_INCOHERENT_CACHES */
151 static void force_mb_all_threads(void)
153 struct reader_registry
*index
;
155 * Ask for each threads to execute a smp_mb() so we can consider the
156 * compiler barriers around rcu read lock as real memory barriers.
161 * pthread_kill has a smp_mb(). But beware, we assume it performs
162 * a cache flush on architectures with non-coherent cache. Let's play
163 * safe and don't assume anything : we use smp_mc() to make sure the
164 * cache flush is enforced.
166 for (index
= registry
; index
< registry
+ num_readers
; index
++) {
168 smp_mc(); /* write need_mb before sending the signal */
169 pthread_kill(index
->tid
, SIGURCU
);
172 * Wait for sighandler (and thus mb()) to execute on every thread.
174 * Note that the pthread_kill() will never be executed on systems
175 * that correctly deliver signals in a timely manner. However, it
176 * is not uncommon for kernels to have bugs that can result in
177 * lost or unduly delayed signals.
179 * If you are seeing the below pthread_kill() executing much at
180 * all, we suggest testing the underlying kernel and filing the
181 * relevant bug report. For Linux kernels, we recommend getting
182 * the Linux Test Project (LTP).
184 for (index
= registry
; index
< registry
+ num_readers
; index
++) {
185 while (*index
->need_mb
) {
186 pthread_kill(index
->tid
, SIGURCU
);
190 smp_mb(); /* read ->need_mb before ending the barrier */
192 #endif /* #else #ifdef DEBUG_FULL_MB */
194 void wait_for_quiescent_state(void)
196 struct reader_registry
*index
;
201 * Wait for each thread urcu_active_readers count to become 0.
203 for (index
= registry
; index
< registry
+ num_readers
; index
++) {
204 #ifndef HAS_INCOHERENT_CACHES
205 while (rcu_old_gp_ongoing(index
->urcu_active_readers
))
207 #else /* #ifndef HAS_INCOHERENT_CACHES */
210 * BUSY-LOOP. Force the reader thread to commit its
211 * urcu_active_readers update to memory if we wait for too long.
213 while (rcu_old_gp_ongoing(index
->urcu_active_readers
)) {
214 if (wait_loops
++ == KICK_READER_LOOPS
) {
215 force_mb_single_thread(index
);
221 #endif /* #else #ifndef HAS_INCOHERENT_CACHES */
225 void synchronize_rcu(void)
227 internal_urcu_lock();
228 force_mb_all_threads();
230 wait_for_quiescent_state();
231 force_mb_all_threads();
232 internal_urcu_unlock();
236 * library wrappers to be used by non-LGPL compatible source code.
239 void rcu_read_lock(void)
244 void rcu_read_unlock(void)
249 void *rcu_dereference(void *p
)
251 return _rcu_dereference(p
);
254 void *rcu_assign_pointer_sym(void **p
, void *v
)
257 return STORE_SHARED(p
, v
);
260 void *rcu_xchg_pointer_sym(void **p
, void *v
)
266 void *rcu_publish_content_sym(void **p
, void *v
)
270 oldptr
= _rcu_xchg_pointer(p
, v
);
275 static void rcu_add_reader(pthread_t id
)
277 struct reader_registry
*oldarray
;
280 alloc_readers
= INIT_NUM_THREADS
;
283 malloc(sizeof(struct reader_registry
) * alloc_readers
);
285 if (alloc_readers
< num_readers
+ 1) {
287 registry
= malloc(sizeof(struct reader_registry
)
288 * (alloc_readers
<< 1));
289 memcpy(registry
, oldarray
,
290 sizeof(struct reader_registry
) * alloc_readers
);
294 registry
[num_readers
].tid
= id
;
295 /* reference to the TLS of _this_ reader thread. */
296 registry
[num_readers
].urcu_active_readers
= &urcu_active_readers
;
297 registry
[num_readers
].need_mb
= &need_mb
;
302 * Never shrink (implementation limitation).
303 * This is O(nb threads). Eventually use a hash table.
305 static void rcu_remove_reader(pthread_t id
)
307 struct reader_registry
*index
;
309 assert(registry
!= NULL
);
310 for (index
= registry
; index
< registry
+ num_readers
; index
++) {
311 if (pthread_equal(index
->tid
, id
)) {
312 memcpy(index
, ®istry
[num_readers
- 1],
313 sizeof(struct reader_registry
));
314 registry
[num_readers
- 1].tid
= 0;
315 registry
[num_readers
- 1].urcu_active_readers
= NULL
;
320 /* Hrm not found, forgot to register ? */
324 void rcu_register_thread(void)
326 internal_urcu_lock();
327 urcu_init(); /* In case gcc does not support constructor attribute */
328 rcu_add_reader(pthread_self());
329 internal_urcu_unlock();
332 void rcu_unregister_thread(void)
334 internal_urcu_lock();
335 rcu_remove_reader(pthread_self());
336 internal_urcu_unlock();
339 #ifndef DEBUG_FULL_MB
340 static void sigurcu_handler(int signo
, siginfo_t
*siginfo
, void *context
)
343 * Executing this smp_mb() is the only purpose of this signal handler.
344 * It punctually promotes barrier() into smp_mb() on every thread it is
353 * urcu_init constructor. Called when the library is linked, but also when
354 * reader threads are calling rcu_register_thread().
355 * Should only be called by a single thread at a given time. This is ensured by
356 * holing the internal_urcu_lock() from rcu_register_thread() or by running at
357 * library load time, which should not be executed by multiple threads nor
358 * concurrently with rcu_register_thread() anyway.
362 struct sigaction act
;
369 act
.sa_sigaction
= sigurcu_handler
;
370 ret
= sigaction(SIGURCU
, &act
, NULL
);
372 perror("Error in sigaction");
379 struct sigaction act
;
382 ret
= sigaction(SIGURCU
, NULL
, &act
);
384 perror("Error in sigaction");
387 assert(act
.sa_sigaction
== sigurcu_handler
);
390 #endif /* #ifndef DEBUG_FULL_MB */