4 * Userspace RCU library
6 * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
8 * Distributed under GPLv2
20 pthread_mutex_t urcu_mutex
= PTHREAD_MUTEX_INITIALIZER
;
22 /* Global grace period counter */
25 int __thread urcu_active_readers
;
27 /* Thread IDs of registered readers */
28 #define INIT_NUM_THREADS 4
32 int *urcu_active_readers
;
36 unsigned int yield_active
;
37 unsigned int __thread rand_yield
;
40 static struct reader_data
*reader_data
;
41 static int num_readers
, alloc_readers
;
44 void internal_urcu_lock(void)
47 ret
= pthread_mutex_lock(&urcu_mutex
);
49 perror("Error in pthread mutex lock");
54 void internal_urcu_unlock(void)
58 ret
= pthread_mutex_unlock(&urcu_mutex
);
60 perror("Error in pthread mutex unlock");
66 * called with urcu_mutex held.
68 static void switch_next_urcu_qparity(void)
70 urcu_gp_ctr
^= RCU_GP_CTR_BIT
;
73 static void force_mb_all_threads(void)
75 struct reader_data
*index
;
77 * Ask for each threads to execute a mb() so we can consider the
78 * compiler barriers around rcu read lock as real memory barriers.
85 mb(); /* write sig_done before sending the signals */
87 for (index
= reader_data
; index
< reader_data
+ num_readers
; index
++) {
88 pthread_kill(index
->tid
, SIGURCU
);
92 * Wait for sighandler (and thus mb()) to execute on every thread.
95 while (sig_done
< num_readers
)
98 mb(); /* read sig_done before ending the barrier */
102 void wait_for_quiescent_state(void)
104 struct reader_data
*index
;
108 /* Wait for each thread urcu_active_readers count to become 0.
110 for (index
= reader_data
; index
< reader_data
+ num_readers
; index
++) {
114 while (rcu_old_gp_ongoing(index
->urcu_active_readers
))
118 * Locally : read *index->urcu_active_readers before freeing old
120 * Remote (reader threads) : Order urcu_qparity update and other
121 * thread's quiescent state counter read.
123 force_mb_all_threads();
126 static void switch_qparity(void)
128 /* All threads should read qparity before accessing data structure. */
129 /* Write ptr before changing the qparity */
130 force_mb_all_threads();
132 switch_next_urcu_qparity();
136 * Wait for previous parity to be empty of readers.
138 wait_for_quiescent_state();
141 void synchronize_rcu(void)
144 internal_urcu_lock();
150 internal_urcu_lock();
155 * Return old pointer, OK to free, no more reference exist.
156 * Called under rcu_write_lock.
158 void *urcu_publish_content(void **ptr
, void *new)
163 internal_urcu_lock();
166 * We can publish the new pointer before we change the current qparity.
167 * Readers seeing the new pointer while being in the previous qparity
168 * window will make us wait until the end of the quiescent state before
169 * we release the unrelated memory area. However, given we hold the
170 * urcu_mutex, we are making sure that no further garbage collection can
171 * occur until we release the mutex, therefore we guarantee that this
172 * given reader will have completed its execution using the new pointer
173 * when the next quiescent state window will be over.
184 internal_urcu_unlock();
190 void urcu_add_reader(pthread_t id
)
192 struct reader_data
*oldarray
;
195 alloc_readers
= INIT_NUM_THREADS
;
198 malloc(sizeof(struct reader_data
) * alloc_readers
);
200 if (alloc_readers
< num_readers
+ 1) {
201 oldarray
= reader_data
;
202 reader_data
= malloc(sizeof(struct reader_data
)
203 * (alloc_readers
<< 1));
204 memcpy(reader_data
, oldarray
,
205 sizeof(struct reader_data
) * alloc_readers
);
209 reader_data
[num_readers
].tid
= id
;
210 /* reference to the TLS of _this_ reader thread. */
211 reader_data
[num_readers
].urcu_active_readers
= &urcu_active_readers
;
216 * Never shrink (implementation limitation).
217 * This is O(nb threads). Eventually use a hash table.
219 void urcu_remove_reader(pthread_t id
)
221 struct reader_data
*index
;
223 assert(reader_data
!= NULL
);
224 for (index
= reader_data
; index
< reader_data
+ num_readers
; index
++) {
225 if (pthread_equal(index
->tid
, id
)) {
226 memcpy(index
, &reader_data
[num_readers
- 1],
227 sizeof(struct reader_data
));
228 reader_data
[num_readers
- 1].tid
= 0;
229 reader_data
[num_readers
- 1].urcu_active_readers
= NULL
;
234 /* Hrm not found, forgot to register ? */
238 void urcu_register_thread(void)
240 internal_urcu_lock();
241 urcu_add_reader(pthread_self());
242 internal_urcu_unlock();
245 void urcu_unregister_thread(void)
247 internal_urcu_lock();
248 urcu_remove_reader(pthread_self());
249 internal_urcu_unlock();
252 void sigurcu_handler(int signo
, siginfo_t
*siginfo
, void *context
)
255 atomic_inc(&sig_done
);
258 void __attribute__((constructor
)) urcu_init(void)
260 struct sigaction act
;
263 act
.sa_sigaction
= sigurcu_handler
;
264 ret
= sigaction(SIGURCU
, &act
, NULL
);
266 perror("Error in sigaction");
271 void __attribute__((destructor
)) urcu_exit(void)
273 struct sigaction act
;
276 ret
= sigaction(SIGURCU
, NULL
, &act
);
278 perror("Error in sigaction");
281 assert(act
.sa_sigaction
== sigurcu_handler
);