4 * Userspace RCU library
6 * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
8 * Distributed under GPLv2
10 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
24 pthread_mutex_t urcu_mutex
= PTHREAD_MUTEX_INITIALIZER
;
27 * Global grace period counter.
28 * Contains the current RCU_GP_CTR_BIT.
29 * Also has a RCU_GP_CTR_BIT of 1, to accelerate the reader fast path.
30 * Written to only by writer with mutex taken. Read by both writer and readers.
32 long urcu_gp_ctr
= RCU_GP_COUNT
;
35 * Written to only by each individual reader. Read by both the reader and the
38 long __thread urcu_active_readers
;
40 /* Thread IDs of registered readers */
41 #define INIT_NUM_THREADS 4
43 struct reader_registry
{
45 long *urcu_active_readers
;
50 unsigned int yield_active
;
51 unsigned int __thread rand_yield
;
54 static struct reader_registry
*registry
;
55 static char __thread need_mb
;
56 static int num_readers
, alloc_readers
;
58 void internal_urcu_lock(void)
62 #ifndef DISTRUST_SIGNALS_EXTREME
63 ret
= pthread_mutex_lock(&urcu_mutex
);
65 perror("Error in pthread mutex lock");
68 #else /* #ifndef DISTRUST_SIGNALS_EXTREME */
69 while ((ret
= pthread_mutex_trylock(&urcu_mutex
)) != 0) {
70 if (ret
!= EBUSY
&& ret
!= EINTR
) {
71 printf("ret = %d, errno = %d\n", ret
, errno
);
72 perror("Error in pthread mutex lock");
82 #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
85 void internal_urcu_unlock(void)
89 ret
= pthread_mutex_unlock(&urcu_mutex
);
91 perror("Error in pthread mutex unlock");
97 * called with urcu_mutex held.
99 static void switch_next_urcu_qparity(void)
101 STORE_SHARED(urcu_gp_ctr
, urcu_gp_ctr
^ RCU_GP_CTR_BIT
);
105 #ifdef HAS_INCOHERENT_CACHES
106 static void force_mb_single_thread(struct reader_registry
*index
)
110 #endif /* #ifdef HAS_INCOHERENT_CACHES */
112 static void force_mb_all_threads(void)
116 #else /* #ifdef DEBUG_FULL_MB */
117 #ifdef HAS_INCOHERENT_CACHES
118 static void force_mb_single_thread(struct reader_registry
*index
)
122 * pthread_kill has a smp_mb(). But beware, we assume it performs
123 * a cache flush on architectures with non-coherent cache. Let's play
124 * safe and don't assume anything : we use smp_mc() to make sure the
125 * cache flush is enforced.
128 smp_mc(); /* write ->need_mb before sending the signals */
129 pthread_kill(index
->tid
, SIGURCU
);
132 * Wait for sighandler (and thus mb()) to execute on every thread.
135 while (*index
->need_mb
) {
138 smp_mb(); /* read ->need_mb before ending the barrier */
140 #endif /* #ifdef HAS_INCOHERENT_CACHES */
142 static void force_mb_all_threads(void)
144 struct reader_registry
*index
;
146 * Ask for each threads to execute a smp_mb() so we can consider the
147 * compiler barriers around rcu read lock as real memory barriers.
152 * pthread_kill has a smp_mb(). But beware, we assume it performs
153 * a cache flush on architectures with non-coherent cache. Let's play
154 * safe and don't assume anything : we use smp_mc() to make sure the
155 * cache flush is enforced.
157 for (index
= registry
; index
< registry
+ num_readers
; index
++) {
159 smp_mc(); /* write need_mb before sending the signal */
160 pthread_kill(index
->tid
, SIGURCU
);
163 * Wait for sighandler (and thus mb()) to execute on every thread.
165 * Note that the pthread_kill() will never be executed on systems
166 * that correctly deliver signals in a timely manner. However, it
167 * is not uncommon for kernels to have bugs that can result in
168 * lost or unduly delayed signals.
170 * If you are seeing the below pthread_kill() executing much at
171 * all, we suggest testing the underlying kernel and filing the
172 * relevant bug report. For Linux kernels, we recommend getting
173 * the Linux Test Project (LTP).
175 for (index
= registry
; index
< registry
+ num_readers
; index
++) {
176 while (*index
->need_mb
) {
177 pthread_kill(index
->tid
, SIGURCU
);
181 smp_mb(); /* read ->need_mb before ending the barrier */
183 #endif /* #else #ifdef DEBUG_FULL_MB */
185 void wait_for_quiescent_state(void)
187 struct reader_registry
*index
;
192 * Wait for each thread urcu_active_readers count to become 0.
194 for (index
= registry
; index
< registry
+ num_readers
; index
++) {
195 #ifndef HAS_INCOHERENT_CACHES
196 while (rcu_old_gp_ongoing(index
->urcu_active_readers
))
198 #else /* #ifndef HAS_INCOHERENT_CACHES */
201 * BUSY-LOOP. Force the reader thread to commit its
202 * urcu_active_readers update to memory if we wait for too long.
204 while (rcu_old_gp_ongoing(index
->urcu_active_readers
)) {
205 if (wait_loops
++ == KICK_READER_LOOPS
) {
206 force_mb_single_thread(index
);
212 #endif /* #else #ifndef HAS_INCOHERENT_CACHES */
216 void synchronize_rcu(void)
218 internal_urcu_lock();
220 /* All threads should read qparity before accessing data structure
221 * where new ptr points to. Must be done within internal_urcu_lock
222 * because it iterates on reader threads.*/
223 /* Write new ptr before changing the qparity */
224 force_mb_all_threads();
226 switch_next_urcu_qparity(); /* 0 -> 1 */
229 * Must commit qparity update to memory before waiting for parity
230 * 0 quiescent state. Failure to do so could result in the writer
231 * waiting forever while new readers are always accessing data (no
233 * Ensured by STORE_SHARED and LOAD_SHARED.
237 * Wait for previous parity to be empty of readers.
239 wait_for_quiescent_state(); /* Wait readers in parity 0 */
242 * Must finish waiting for quiescent state for parity 0 before
243 * committing qparity update to memory. Failure to do so could result in
244 * the writer waiting forever while new readers are always accessing
245 * data (no progress).
246 * Ensured by STORE_SHARED and LOAD_SHARED.
249 switch_next_urcu_qparity(); /* 1 -> 0 */
252 * Must commit qparity update to memory before waiting for parity
253 * 1 quiescent state. Failure to do so could result in the writer
254 * waiting forever while new readers are always accessing data (no
256 * Ensured by STORE_SHARED and LOAD_SHARED.
260 * Wait for previous parity to be empty of readers.
262 wait_for_quiescent_state(); /* Wait readers in parity 1 */
264 /* Finish waiting for reader threads before letting the old ptr being
265 * freed. Must be done within internal_urcu_lock because it iterates on
267 force_mb_all_threads();
269 internal_urcu_unlock();
272 void urcu_add_reader(pthread_t id
)
274 struct reader_registry
*oldarray
;
277 alloc_readers
= INIT_NUM_THREADS
;
280 malloc(sizeof(struct reader_registry
) * alloc_readers
);
282 if (alloc_readers
< num_readers
+ 1) {
284 registry
= malloc(sizeof(struct reader_registry
)
285 * (alloc_readers
<< 1));
286 memcpy(registry
, oldarray
,
287 sizeof(struct reader_registry
) * alloc_readers
);
291 registry
[num_readers
].tid
= id
;
292 /* reference to the TLS of _this_ reader thread. */
293 registry
[num_readers
].urcu_active_readers
= &urcu_active_readers
;
294 registry
[num_readers
].need_mb
= &need_mb
;
299 * Never shrink (implementation limitation).
300 * This is O(nb threads). Eventually use a hash table.
302 void urcu_remove_reader(pthread_t id
)
304 struct reader_registry
*index
;
306 assert(registry
!= NULL
);
307 for (index
= registry
; index
< registry
+ num_readers
; index
++) {
308 if (pthread_equal(index
->tid
, id
)) {
309 memcpy(index
, ®istry
[num_readers
- 1],
310 sizeof(struct reader_registry
));
311 registry
[num_readers
- 1].tid
= 0;
312 registry
[num_readers
- 1].urcu_active_readers
= NULL
;
317 /* Hrm not found, forgot to register ? */
321 void urcu_register_thread(void)
323 internal_urcu_lock();
324 urcu_add_reader(pthread_self());
325 internal_urcu_unlock();
328 void urcu_unregister_thread(void)
330 internal_urcu_lock();
331 urcu_remove_reader(pthread_self());
332 internal_urcu_unlock();
335 #ifndef DEBUG_FULL_MB
336 void sigurcu_handler(int signo
, siginfo_t
*siginfo
, void *context
)
339 * Executing this smp_mb() is the only purpose of this signal handler.
340 * It punctually promotes barrier() into smp_mb() on every thread it is
348 void __attribute__((constructor
)) urcu_init(void)
350 struct sigaction act
;
353 act
.sa_sigaction
= sigurcu_handler
;
354 ret
= sigaction(SIGURCU
, &act
, NULL
);
356 perror("Error in sigaction");
361 void __attribute__((destructor
)) urcu_exit(void)
363 struct sigaction act
;
366 ret
= sigaction(SIGURCU
, NULL
, &act
);
368 perror("Error in sigaction");
371 assert(act
.sa_sigaction
== sigurcu_handler
);
374 #endif /* #ifndef DEBUG_FULL_MB */