Commit | Line | Data |
---|---|---|
b257a10b MD |
1 | /* |
2 | * urcu.c | |
3 | * | |
4 | * Userspace RCU library | |
5 | * | |
6 | * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> | |
7 | * | |
8 | * Distributed under GPLv2 | |
9 | */ | |
10 | ||
27b012e2 MD |
11 | #include <stdio.h> |
12 | #include <pthread.h> | |
13 | #include <signal.h> | |
14 | #include <assert.h> | |
f69f195a MD |
15 | #include <stdlib.h> |
16 | #include <string.h> | |
09a9f986 | 17 | #include <errno.h> |
e8043c1b | 18 | #include <poll.h> |
27b012e2 MD |
19 | |
20 | #include "urcu.h" | |
21 | ||
22 | pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER; | |
23 | ||
128166c9 MD |
24 | /* |
25 | * Global grace period counter. | |
26 | * Contains the current RCU_GP_CTR_BIT. | |
27 | * Also has a RCU_GP_CTR_BIT of 1, to accelerate the reader fast path. | |
b0d5e790 | 28 | * Written to only by writer with mutex taken. Read by both writer and readers. |
128166c9 MD |
29 | */ |
30 | long urcu_gp_ctr = RCU_GP_COUNT; | |
27b012e2 | 31 | |
b0d5e790 MD |
32 | /* |
33 | * Written to only by each individual reader. Read by both the reader and the | |
34 | * writers. | |
35 | */ | |
6e8b8429 | 36 | long __thread urcu_active_readers; |
27b012e2 MD |
37 | |
38 | /* Thread IDs of registered readers */ | |
39 | #define INIT_NUM_THREADS 4 | |
40 | ||
0a52082b | 41 | struct reader_registry { |
27b012e2 | 42 | pthread_t tid; |
128166c9 | 43 | long *urcu_active_readers; |
09a9f986 | 44 | char *need_mb; |
27b012e2 MD |
45 | }; |
46 | ||
cf380c2f | 47 | #ifdef DEBUG_YIELD |
9d335088 MD |
48 | unsigned int yield_active; |
49 | unsigned int __thread rand_yield; | |
cf380c2f MD |
50 | #endif |
51 | ||
0a52082b | 52 | static struct reader_registry *registry; |
09a9f986 | 53 | static char __thread need_mb; |
27b012e2 | 54 | static int num_readers, alloc_readers; |
27b012e2 | 55 | |
c265818b | 56 | void internal_urcu_lock(void) |
41718ff9 MD |
57 | { |
58 | int ret; | |
09a9f986 PM |
59 | |
60 | #ifndef DISTRUST_SIGNALS_EXTREME | |
41718ff9 MD |
61 | ret = pthread_mutex_lock(&urcu_mutex); |
62 | if (ret) { | |
63 | perror("Error in pthread mutex lock"); | |
64 | exit(-1); | |
65 | } | |
09a9f986 PM |
66 | #else /* #ifndef DISTRUST_SIGNALS_EXTREME */ |
67 | while ((ret = pthread_mutex_trylock(&urcu_mutex)) != 0) { | |
68 | if (ret != EBUSY && ret != EINTR) { | |
69 | printf("ret = %d, errno = %d\n", ret, errno); | |
70 | perror("Error in pthread mutex lock"); | |
71 | exit(-1); | |
72 | } | |
73 | if (need_mb) { | |
74 | smp_mb(); | |
75 | need_mb = 0; | |
76 | smp_mb(); | |
77 | } | |
78 | poll(NULL,0,10); | |
79 | } | |
80 | #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */ | |
41718ff9 MD |
81 | } |
82 | ||
c265818b | 83 | void internal_urcu_unlock(void) |
41718ff9 MD |
84 | { |
85 | int ret; | |
86 | ||
87 | ret = pthread_mutex_unlock(&urcu_mutex); | |
88 | if (ret) { | |
89 | perror("Error in pthread mutex unlock"); | |
90 | exit(-1); | |
91 | } | |
92 | } | |
93 | ||
27b012e2 MD |
94 | /* |
95 | * called with urcu_mutex held. | |
96 | */ | |
1430ee0b | 97 | static void switch_next_urcu_qparity(void) |
27b012e2 | 98 | { |
b0d5e790 | 99 | STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr ^ RCU_GP_CTR_BIT); |
27b012e2 MD |
100 | } |
101 | ||
bb488185 | 102 | #ifdef DEBUG_FULL_MB |
e8043c1b | 103 | #ifdef HAS_INCOHERENT_CACHES |
09a9f986 | 104 | static void force_mb_single_thread(struct reader_registry *index) |
40e140c9 MD |
105 | { |
106 | smp_mb(); | |
107 | } | |
e8043c1b | 108 | #endif /* #ifdef HAS_INCOHERENT_CACHES */ |
40e140c9 | 109 | |
bb488185 MD |
110 | static void force_mb_all_threads(void) |
111 | { | |
b715b99e | 112 | smp_mb(); |
bb488185 | 113 | } |
e8043c1b MD |
114 | #else /* #ifdef DEBUG_FULL_MB */ |
115 | #ifdef HAS_INCOHERENT_CACHES | |
09a9f986 | 116 | static void force_mb_single_thread(struct reader_registry *index) |
40e140c9 | 117 | { |
0a52082b | 118 | assert(registry); |
157dca95 MD |
119 | /* |
120 | * pthread_kill has a smp_mb(). But beware, we assume it performs | |
121 | * a cache flush on architectures with non-coherent cache. Let's play | |
122 | * safe and don't assume anything : we use smp_mc() to make sure the | |
123 | * cache flush is enforced. | |
157dca95 | 124 | */ |
09a9f986 PM |
125 | *index->need_mb = 1; |
126 | smp_mc(); /* write ->need_mb before sending the signals */ | |
127 | pthread_kill(index->tid, SIGURCU); | |
128 | smp_mb(); | |
40e140c9 MD |
129 | /* |
130 | * Wait for sighandler (and thus mb()) to execute on every thread. | |
131 | * BUSY-LOOP. | |
132 | */ | |
09a9f986 PM |
133 | while (*index->need_mb) { |
134 | poll(NULL, 0, 1); | |
135 | } | |
136 | smp_mb(); /* read ->need_mb before ending the barrier */ | |
40e140c9 | 137 | } |
e8043c1b | 138 | #endif /* #ifdef HAS_INCOHERENT_CACHES */ |
40e140c9 | 139 | |
27b012e2 MD |
140 | static void force_mb_all_threads(void) |
141 | { | |
0a52082b | 142 | struct reader_registry *index; |
27b012e2 | 143 | /* |
b715b99e | 144 | * Ask for each threads to execute a smp_mb() so we can consider the |
27b012e2 MD |
145 | * compiler barriers around rcu read lock as real memory barriers. |
146 | */ | |
0a52082b | 147 | if (!registry) |
27b012e2 | 148 | return; |
3a86deba MD |
149 | /* |
150 | * pthread_kill has a smp_mb(). But beware, we assume it performs | |
157dca95 MD |
151 | * a cache flush on architectures with non-coherent cache. Let's play |
152 | * safe and don't assume anything : we use smp_mc() to make sure the | |
153 | * cache flush is enforced. | |
3a86deba | 154 | */ |
09a9f986 PM |
155 | for (index = registry; index < registry + num_readers; index++) { |
156 | *index->need_mb = 1; | |
157 | smp_mc(); /* write need_mb before sending the signal */ | |
f69f195a | 158 | pthread_kill(index->tid, SIGURCU); |
09a9f986 | 159 | } |
27b012e2 MD |
160 | /* |
161 | * Wait for sighandler (and thus mb()) to execute on every thread. | |
09a9f986 PM |
162 | * |
163 | * Note that the pthread_kill() will never be executed on systems | |
164 | * that correctly deliver signals in a timely manner. However, it | |
165 | * is not uncommon for kernels to have bugs that can result in | |
166 | * lost or unduly delayed signals. | |
167 | * | |
168 | * If you are seeing the below pthread_kill() executing much at | |
169 | * all, we suggest testing the underlying kernel and filing the | |
170 | * relevant bug report. For Linux kernels, we recommend getting | |
171 | * the Linux Test Project (LTP). | |
27b012e2 | 172 | */ |
09a9f986 PM |
173 | for (index = registry; index < registry + num_readers; index++) { |
174 | while (*index->need_mb) { | |
175 | pthread_kill(index->tid, SIGURCU); | |
176 | poll(NULL, 0, 1); | |
177 | } | |
178 | } | |
179 | smp_mb(); /* read ->need_mb before ending the barrier */ | |
27b012e2 | 180 | } |
e8043c1b | 181 | #endif /* #else #ifdef DEBUG_FULL_MB */ |
27b012e2 | 182 | |
1430ee0b | 183 | void wait_for_quiescent_state(void) |
27b012e2 | 184 | { |
0a52082b | 185 | struct reader_registry *index; |
27b012e2 | 186 | |
0a52082b | 187 | if (!registry) |
27b012e2 | 188 | return; |
40e140c9 MD |
189 | /* |
190 | * Wait for each thread urcu_active_readers count to become 0. | |
27b012e2 | 191 | */ |
0a52082b | 192 | for (index = registry; index < registry + num_readers; index++) { |
e8043c1b MD |
193 | #ifndef HAS_INCOHERENT_CACHES |
194 | while (rcu_old_gp_ongoing(index->urcu_active_readers)) | |
195 | cpu_relax(); | |
196 | #else /* #ifndef HAS_INCOHERENT_CACHES */ | |
40e140c9 | 197 | int wait_loops = 0; |
27b012e2 | 198 | /* |
40e140c9 MD |
199 | * BUSY-LOOP. Force the reader thread to commit its |
200 | * urcu_active_readers update to memory if we wait for too long. | |
27b012e2 | 201 | */ |
40e140c9 MD |
202 | while (rcu_old_gp_ongoing(index->urcu_active_readers)) { |
203 | if (wait_loops++ == KICK_READER_LOOPS) { | |
09a9f986 | 204 | force_mb_single_thread(index); |
40e140c9 | 205 | wait_loops = 0; |
3b55dbf4 MD |
206 | } else { |
207 | cpu_relax(); | |
40e140c9 MD |
208 | } |
209 | } | |
e8043c1b | 210 | #endif /* #else #ifndef HAS_INCOHERENT_CACHES */ |
27b012e2 | 211 | } |
27b012e2 MD |
212 | } |
213 | ||
9598a481 | 214 | void synchronize_rcu(void) |
2bc59bd7 | 215 | { |
135530fd MD |
216 | internal_urcu_lock(); |
217 | ||
9598a481 | 218 | /* All threads should read qparity before accessing data structure |
135530fd MD |
219 | * where new ptr points to. Must be done within internal_urcu_lock |
220 | * because it iterates on reader threads.*/ | |
9598a481 | 221 | /* Write new ptr before changing the qparity */ |
2bc59bd7 | 222 | force_mb_all_threads(); |
9598a481 | 223 | |
9598a481 | 224 | switch_next_urcu_qparity(); /* 0 -> 1 */ |
2bc59bd7 PM |
225 | |
226 | /* | |
9598a481 MD |
227 | * Must commit qparity update to memory before waiting for parity |
228 | * 0 quiescent state. Failure to do so could result in the writer | |
229 | * waiting forever while new readers are always accessing data (no | |
230 | * progress). | |
b0d5e790 | 231 | * Ensured by STORE_SHARED and LOAD_SHARED. |
2bc59bd7 | 232 | */ |
2bc59bd7 | 233 | |
9598a481 MD |
234 | /* |
235 | * Wait for previous parity to be empty of readers. | |
236 | */ | |
237 | wait_for_quiescent_state(); /* Wait readers in parity 0 */ | |
9598a481 MD |
238 | |
239 | /* | |
240 | * Must finish waiting for quiescent state for parity 0 before | |
241 | * committing qparity update to memory. Failure to do so could result in | |
242 | * the writer waiting forever while new readers are always accessing | |
243 | * data (no progress). | |
b0d5e790 | 244 | * Ensured by STORE_SHARED and LOAD_SHARED. |
9598a481 | 245 | */ |
9598a481 MD |
246 | |
247 | switch_next_urcu_qparity(); /* 1 -> 0 */ | |
9598a481 MD |
248 | |
249 | /* | |
250 | * Must commit qparity update to memory before waiting for parity | |
251 | * 1 quiescent state. Failure to do so could result in the writer | |
252 | * waiting forever while new readers are always accessing data (no | |
253 | * progress). | |
b0d5e790 | 254 | * Ensured by STORE_SHARED and LOAD_SHARED. |
9598a481 | 255 | */ |
9598a481 MD |
256 | |
257 | /* | |
258 | * Wait for previous parity to be empty of readers. | |
259 | */ | |
260 | wait_for_quiescent_state(); /* Wait readers in parity 1 */ | |
9598a481 | 261 | |
9598a481 | 262 | /* Finish waiting for reader threads before letting the old ptr being |
135530fd MD |
263 | * freed. Must be done within internal_urcu_lock because it iterates on |
264 | * reader threads. */ | |
9598a481 | 265 | force_mb_all_threads(); |
135530fd MD |
266 | |
267 | internal_urcu_unlock(); | |
2bc59bd7 PM |
268 | } |
269 | ||
27b012e2 MD |
270 | void urcu_add_reader(pthread_t id) |
271 | { | |
0a52082b | 272 | struct reader_registry *oldarray; |
f69f195a | 273 | |
0a52082b | 274 | if (!registry) { |
27b012e2 | 275 | alloc_readers = INIT_NUM_THREADS; |
f69f195a | 276 | num_readers = 0; |
0a52082b MD |
277 | registry = |
278 | malloc(sizeof(struct reader_registry) * alloc_readers); | |
27b012e2 MD |
279 | } |
280 | if (alloc_readers < num_readers + 1) { | |
0a52082b MD |
281 | oldarray = registry; |
282 | registry = malloc(sizeof(struct reader_registry) | |
27b012e2 | 283 | * (alloc_readers << 1)); |
0a52082b MD |
284 | memcpy(registry, oldarray, |
285 | sizeof(struct reader_registry) * alloc_readers); | |
27b012e2 MD |
286 | alloc_readers <<= 1; |
287 | free(oldarray); | |
288 | } | |
0a52082b | 289 | registry[num_readers].tid = id; |
27b012e2 | 290 | /* reference to the TLS of _this_ reader thread. */ |
0a52082b | 291 | registry[num_readers].urcu_active_readers = &urcu_active_readers; |
09a9f986 | 292 | registry[num_readers].need_mb = &need_mb; |
27b012e2 MD |
293 | num_readers++; |
294 | } | |
295 | ||
296 | /* | |
297 | * Never shrink (implementation limitation). | |
298 | * This is O(nb threads). Eventually use a hash table. | |
299 | */ | |
300 | void urcu_remove_reader(pthread_t id) | |
301 | { | |
0a52082b | 302 | struct reader_registry *index; |
27b012e2 | 303 | |
0a52082b MD |
304 | assert(registry != NULL); |
305 | for (index = registry; index < registry + num_readers; index++) { | |
e6d6e2dc | 306 | if (pthread_equal(index->tid, id)) { |
0a52082b MD |
307 | memcpy(index, ®istry[num_readers - 1], |
308 | sizeof(struct reader_registry)); | |
309 | registry[num_readers - 1].tid = 0; | |
310 | registry[num_readers - 1].urcu_active_readers = NULL; | |
27b012e2 MD |
311 | num_readers--; |
312 | return; | |
313 | } | |
314 | } | |
315 | /* Hrm not found, forgot to register ? */ | |
316 | assert(0); | |
317 | } | |
318 | ||
319 | void urcu_register_thread(void) | |
320 | { | |
c265818b | 321 | internal_urcu_lock(); |
41718ff9 | 322 | urcu_add_reader(pthread_self()); |
c265818b | 323 | internal_urcu_unlock(); |
27b012e2 MD |
324 | } |
325 | ||
f69f195a | 326 | void urcu_unregister_thread(void) |
27b012e2 | 327 | { |
c265818b | 328 | internal_urcu_lock(); |
41718ff9 | 329 | urcu_remove_reader(pthread_self()); |
c265818b | 330 | internal_urcu_unlock(); |
27b012e2 MD |
331 | } |
332 | ||
bb488185 | 333 | #ifndef DEBUG_FULL_MB |
f69f195a | 334 | void sigurcu_handler(int signo, siginfo_t *siginfo, void *context) |
27b012e2 | 335 | { |
40e140c9 MD |
336 | /* |
337 | * Executing this smp_mb() is the only purpose of this signal handler. | |
338 | * It punctually promotes barrier() into smp_mb() on every thread it is | |
339 | * executed on. | |
340 | */ | |
b715b99e | 341 | smp_mb(); |
09a9f986 PM |
342 | need_mb = 0; |
343 | smp_mb(); | |
27b012e2 MD |
344 | } |
345 | ||
346 | void __attribute__((constructor)) urcu_init(void) | |
347 | { | |
348 | struct sigaction act; | |
349 | int ret; | |
350 | ||
351 | act.sa_sigaction = sigurcu_handler; | |
352 | ret = sigaction(SIGURCU, &act, NULL); | |
f69f195a MD |
353 | if (ret) { |
354 | perror("Error in sigaction"); | |
27b012e2 MD |
355 | exit(-1); |
356 | } | |
357 | } | |
358 | ||
359 | void __attribute__((destructor)) urcu_exit(void) | |
360 | { | |
361 | struct sigaction act; | |
362 | int ret; | |
363 | ||
364 | ret = sigaction(SIGURCU, NULL, &act); | |
f69f195a MD |
365 | if (ret) { |
366 | perror("Error in sigaction"); | |
27b012e2 MD |
367 | exit(-1); |
368 | } | |
369 | assert(act.sa_sigaction == sigurcu_handler); | |
0a52082b | 370 | free(registry); |
27b012e2 | 371 | } |
e8043c1b | 372 | #endif /* #ifndef DEBUG_FULL_MB */ |