4 * Userspace RCU library
6 * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
8 * Distributed under GPLv2
20 pthread_mutex_t urcu_mutex
= PTHREAD_MUTEX_INITIALIZER
;
22 /* Global quiescent period parity */
25 int __thread urcu_active_readers
[2];
27 /* Thread IDs of registered readers */
28 #define INIT_NUM_THREADS 4
32 int *urcu_active_readers
;
36 unsigned int yield_active
;
37 unsigned int __thread rand_yield
;
40 static struct reader_data
*reader_data
;
41 static int num_readers
, alloc_readers
;
44 void internal_urcu_lock(void)
47 ret
= pthread_mutex_lock(&urcu_mutex
);
49 perror("Error in pthread mutex lock");
54 void internal_urcu_unlock(void)
58 ret
= pthread_mutex_unlock(&urcu_mutex
);
60 perror("Error in pthread mutex unlock");
66 * called with urcu_mutex held.
68 static int switch_next_urcu_qparity(void)
70 int old_parity
= urcu_qparity
;
71 urcu_qparity
= 1 - old_parity
;
75 static void force_mb_all_threads(void)
77 struct reader_data
*index
;
79 * Ask for each threads to execute a mb() so we can consider the
80 * compiler barriers around rcu read lock as real memory barriers.
87 mb(); /* write sig_done before sending the signals */
89 for (index
= reader_data
; index
< reader_data
+ num_readers
; index
++) {
90 pthread_kill(index
->tid
, SIGURCU
);
94 * Wait for sighandler (and thus mb()) to execute on every thread.
97 while (sig_done
< num_readers
)
100 mb(); /* read sig_done before ending the barrier */
104 void wait_for_quiescent_state(int parity
)
106 struct reader_data
*index
;
110 /* Wait for each thread urcu_active_readers count to become 0.
112 for (index
= reader_data
; index
< reader_data
+ num_readers
; index
++) {
116 while (index
->urcu_active_readers
[parity
] != 0)
120 * Locally : read *index->urcu_active_readers before freeing old
122 * Remote (reader threads) : Order urcu_qparity update and other
123 * thread's quiescent state counter read.
125 force_mb_all_threads();
128 static void switch_qparity(void)
132 /* All threads should read qparity before accessing data structure. */
133 /* Write ptr before changing the qparity */
134 force_mb_all_threads();
136 prev_parity
= switch_next_urcu_qparity();
140 * Wait for previous parity to be empty of readers.
142 wait_for_quiescent_state(prev_parity
);
145 void synchronize_rcu(void)
148 internal_urcu_lock();
154 internal_urcu_lock();
159 * Return old pointer, OK to free, no more reference exist.
160 * Called under rcu_write_lock.
162 void *urcu_publish_content(void **ptr
, void *new)
167 internal_urcu_lock();
170 * We can publish the new pointer before we change the current qparity.
171 * Readers seeing the new pointer while being in the previous qparity
172 * window will make us wait until the end of the quiescent state before
173 * we release the unrelated memory area. However, given we hold the
174 * urcu_mutex, we are making sure that no further garbage collection can
175 * occur until we release the mutex, therefore we guarantee that this
176 * given reader will have completed its execution using the new pointer
177 * when the next quiescent state window will be over.
188 internal_urcu_unlock();
194 void urcu_add_reader(pthread_t id
)
196 struct reader_data
*oldarray
;
199 alloc_readers
= INIT_NUM_THREADS
;
202 malloc(sizeof(struct reader_data
) * alloc_readers
);
204 if (alloc_readers
< num_readers
+ 1) {
205 oldarray
= reader_data
;
206 reader_data
= malloc(sizeof(struct reader_data
)
207 * (alloc_readers
<< 1));
208 memcpy(reader_data
, oldarray
,
209 sizeof(struct reader_data
) * alloc_readers
);
213 reader_data
[num_readers
].tid
= id
;
214 /* reference to the TLS of _this_ reader thread. */
215 reader_data
[num_readers
].urcu_active_readers
= urcu_active_readers
;
220 * Never shrink (implementation limitation).
221 * This is O(nb threads). Eventually use a hash table.
223 void urcu_remove_reader(pthread_t id
)
225 struct reader_data
*index
;
227 assert(reader_data
!= NULL
);
228 for (index
= reader_data
; index
< reader_data
+ num_readers
; index
++) {
229 if (pthread_equal(index
->tid
, id
)) {
230 memcpy(index
, &reader_data
[num_readers
- 1],
231 sizeof(struct reader_data
));
232 reader_data
[num_readers
- 1].tid
= 0;
233 reader_data
[num_readers
- 1].urcu_active_readers
= NULL
;
238 /* Hrm not found, forgot to register ? */
242 void urcu_register_thread(void)
244 internal_urcu_lock();
245 urcu_add_reader(pthread_self());
246 internal_urcu_unlock();
249 void urcu_unregister_thread(void)
251 internal_urcu_lock();
252 urcu_remove_reader(pthread_self());
253 internal_urcu_unlock();
256 void sigurcu_handler(int signo
, siginfo_t
*siginfo
, void *context
)
259 atomic_inc(&sig_done
);
262 void __attribute__((constructor
)) urcu_init(void)
264 struct sigaction act
;
267 act
.sa_sigaction
= sigurcu_handler
;
268 ret
= sigaction(SIGURCU
, &act
, NULL
);
270 perror("Error in sigaction");
275 void __attribute__((destructor
)) urcu_exit(void)
277 struct sigaction act
;
280 ret
= sigaction(SIGURCU
, NULL
, &act
);
282 perror("Error in sigaction");
285 assert(act
.sa_sigaction
== sigurcu_handler
);