4 * Userspace RCU library
6 * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
8 * Distributed under GPLv2
20 pthread_mutex_t urcu_mutex
= PTHREAD_MUTEX_INITIALIZER
;
23 * Global grace period counter.
24 * Contains the current RCU_GP_CTR_BIT.
25 * Also has a RCU_GP_CTR_BIT of 1, to accelerate the reader fast path.
26 * Written to only by writer with mutex taken. Read by both writer and readers.
28 long urcu_gp_ctr
= RCU_GP_COUNT
;
31 * Written to only by each individual reader. Read by both the reader and the
34 long __thread urcu_active_readers
;
36 /* Thread IDs of registered readers */
37 #define INIT_NUM_THREADS 4
41 long *urcu_active_readers
;
45 unsigned int yield_active
;
46 unsigned int __thread rand_yield
;
49 static struct reader_data
*reader_data
;
50 static int num_readers
, alloc_readers
;
55 void internal_urcu_lock(void)
58 ret
= pthread_mutex_lock(&urcu_mutex
);
60 perror("Error in pthread mutex lock");
65 void internal_urcu_unlock(void)
69 ret
= pthread_mutex_unlock(&urcu_mutex
);
71 perror("Error in pthread mutex unlock");
77 * called with urcu_mutex held.
79 static void switch_next_urcu_qparity(void)
81 STORE_SHARED(urcu_gp_ctr
, urcu_gp_ctr
^ RCU_GP_CTR_BIT
);
85 static void force_mb_single_thread(pthread_t tid
)
90 static void force_mb_all_threads(void)
96 static void force_mb_single_thread(pthread_t tid
)
101 * pthread_kill has a smp_mb(). But beware, we assume it performs
102 * a cache flush on architectures with non-coherent cache. Let's play
103 * safe and don't assume anything : we use smp_mc() to make sure the
104 * cache flush is enforced.
105 * smp_mb(); write sig_done before sending the signals
107 smp_mc(); /* write sig_done before sending the signals */
108 pthread_kill(tid
, SIGURCU
);
110 * Wait for sighandler (and thus mb()) to execute on every thread.
113 while (LOAD_SHARED(sig_done
) < 1)
115 smp_mb(); /* read sig_done before ending the barrier */
118 static void force_mb_all_threads(void)
120 struct reader_data
*index
;
122 * Ask for each threads to execute a smp_mb() so we can consider the
123 * compiler barriers around rcu read lock as real memory barriers.
129 * pthread_kill has a smp_mb(). But beware, we assume it performs
130 * a cache flush on architectures with non-coherent cache. Let's play
131 * safe and don't assume anything : we use smp_mc() to make sure the
132 * cache flush is enforced.
133 * smp_mb(); write sig_done before sending the signals
135 smp_mc(); /* write sig_done before sending the signals */
136 for (index
= reader_data
; index
< reader_data
+ num_readers
; index
++)
137 pthread_kill(index
->tid
, SIGURCU
);
139 * Wait for sighandler (and thus mb()) to execute on every thread.
142 while (LOAD_SHARED(sig_done
) < num_readers
)
144 smp_mb(); /* read sig_done before ending the barrier */
148 void wait_for_quiescent_state(void)
150 struct reader_data
*index
;
155 * Wait for each thread urcu_active_readers count to become 0.
157 for (index
= reader_data
; index
< reader_data
+ num_readers
; index
++) {
160 * BUSY-LOOP. Force the reader thread to commit its
161 * urcu_active_readers update to memory if we wait for too long.
163 while (rcu_old_gp_ongoing(index
->urcu_active_readers
)) {
164 if (wait_loops
++ == KICK_READER_LOOPS
) {
165 force_mb_single_thread(index
->tid
);
174 void synchronize_rcu(void)
176 internal_urcu_lock();
178 /* All threads should read qparity before accessing data structure
179 * where new ptr points to. Must be done within internal_urcu_lock
180 * because it iterates on reader threads.*/
181 /* Write new ptr before changing the qparity */
182 force_mb_all_threads();
184 switch_next_urcu_qparity(); /* 0 -> 1 */
187 * Must commit qparity update to memory before waiting for parity
188 * 0 quiescent state. Failure to do so could result in the writer
189 * waiting forever while new readers are always accessing data (no
191 * Ensured by STORE_SHARED and LOAD_SHARED.
195 * Wait for previous parity to be empty of readers.
197 wait_for_quiescent_state(); /* Wait readers in parity 0 */
200 * Must finish waiting for quiescent state for parity 0 before
201 * committing qparity update to memory. Failure to do so could result in
202 * the writer waiting forever while new readers are always accessing
203 * data (no progress).
204 * Ensured by STORE_SHARED and LOAD_SHARED.
207 switch_next_urcu_qparity(); /* 1 -> 0 */
210 * Must commit qparity update to memory before waiting for parity
211 * 1 quiescent state. Failure to do so could result in the writer
212 * waiting forever while new readers are always accessing data (no
214 * Ensured by STORE_SHARED and LOAD_SHARED.
218 * Wait for previous parity to be empty of readers.
220 wait_for_quiescent_state(); /* Wait readers in parity 1 */
222 /* Finish waiting for reader threads before letting the old ptr being
223 * freed. Must be done within internal_urcu_lock because it iterates on
225 force_mb_all_threads();
227 internal_urcu_unlock();
230 void urcu_add_reader(pthread_t id
)
232 struct reader_data
*oldarray
;
235 alloc_readers
= INIT_NUM_THREADS
;
238 malloc(sizeof(struct reader_data
) * alloc_readers
);
240 if (alloc_readers
< num_readers
+ 1) {
241 oldarray
= reader_data
;
242 reader_data
= malloc(sizeof(struct reader_data
)
243 * (alloc_readers
<< 1));
244 memcpy(reader_data
, oldarray
,
245 sizeof(struct reader_data
) * alloc_readers
);
249 reader_data
[num_readers
].tid
= id
;
250 /* reference to the TLS of _this_ reader thread. */
251 reader_data
[num_readers
].urcu_active_readers
= &urcu_active_readers
;
256 * Never shrink (implementation limitation).
257 * This is O(nb threads). Eventually use a hash table.
259 void urcu_remove_reader(pthread_t id
)
261 struct reader_data
*index
;
263 assert(reader_data
!= NULL
);
264 for (index
= reader_data
; index
< reader_data
+ num_readers
; index
++) {
265 if (pthread_equal(index
->tid
, id
)) {
266 memcpy(index
, &reader_data
[num_readers
- 1],
267 sizeof(struct reader_data
));
268 reader_data
[num_readers
- 1].tid
= 0;
269 reader_data
[num_readers
- 1].urcu_active_readers
= NULL
;
274 /* Hrm not found, forgot to register ? */
278 void urcu_register_thread(void)
280 internal_urcu_lock();
281 urcu_add_reader(pthread_self());
282 internal_urcu_unlock();
285 void urcu_unregister_thread(void)
287 internal_urcu_lock();
288 urcu_remove_reader(pthread_self());
289 internal_urcu_unlock();
292 #ifndef DEBUG_FULL_MB
293 void sigurcu_handler(int signo
, siginfo_t
*siginfo
, void *context
)
296 * Executing this smp_mb() is the only purpose of this signal handler.
297 * It punctually promotes barrier() into smp_mb() on every thread it is
301 atomic_inc(&sig_done
);
304 void __attribute__((constructor
)) urcu_init(void)
306 struct sigaction act
;
309 act
.sa_sigaction
= sigurcu_handler
;
310 ret
= sigaction(SIGURCU
, &act
, NULL
);
312 perror("Error in sigaction");
317 void __attribute__((destructor
)) urcu_exit(void)
319 struct sigaction act
;
322 ret
= sigaction(SIGURCU
, NULL
, &act
);
324 perror("Error in sigaction");
327 assert(act
.sa_sigaction
== sigurcu_handler
);