Commit | Line | Data |
---|---|---|
b257a10b MD |
1 | /* |
2 | * urcu.c | |
3 | * | |
4 | * Userspace RCU library | |
5 | * | |
6 | * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> | |
7 | * | |
8 | * Distributed under GPLv2 | |
9 | */ | |
10 | ||
27b012e2 MD |
11 | #include <stdio.h> |
12 | #include <pthread.h> | |
13 | #include <signal.h> | |
14 | #include <assert.h> | |
f69f195a MD |
15 | #include <stdlib.h> |
16 | #include <string.h> | |
09a9f986 | 17 | #include <errno.h> |
27b012e2 MD |
18 | |
19 | #include "urcu.h" | |
20 | ||
21 | pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER; | |
22 | ||
128166c9 MD |
23 | /* |
24 | * Global grace period counter. | |
25 | * Contains the current RCU_GP_CTR_BIT. | |
26 | * Also has a RCU_GP_CTR_BIT of 1, to accelerate the reader fast path. | |
b0d5e790 | 27 | * Written to only by writer with mutex taken. Read by both writer and readers. |
128166c9 MD |
28 | */ |
29 | long urcu_gp_ctr = RCU_GP_COUNT; | |
27b012e2 | 30 | |
b0d5e790 MD |
31 | /* |
32 | * Written to only by each individual reader. Read by both the reader and the | |
33 | * writers. | |
34 | */ | |
6e8b8429 | 35 | long __thread urcu_active_readers; |
27b012e2 MD |
36 | |
37 | /* Thread IDs of registered readers */ | |
38 | #define INIT_NUM_THREADS 4 | |
39 | ||
0a52082b | 40 | struct reader_registry { |
27b012e2 | 41 | pthread_t tid; |
128166c9 | 42 | long *urcu_active_readers; |
09a9f986 | 43 | char *need_mb; |
27b012e2 MD |
44 | }; |
45 | ||
cf380c2f | 46 | #ifdef DEBUG_YIELD |
9d335088 MD |
47 | unsigned int yield_active; |
48 | unsigned int __thread rand_yield; | |
cf380c2f MD |
49 | #endif |
50 | ||
0a52082b | 51 | static struct reader_registry *registry; |
09a9f986 | 52 | static char __thread need_mb; |
27b012e2 | 53 | static int num_readers, alloc_readers; |
27b012e2 | 54 | |
c265818b | 55 | void internal_urcu_lock(void) |
41718ff9 MD |
56 | { |
57 | int ret; | |
09a9f986 PM |
58 | |
59 | #ifndef DISTRUST_SIGNALS_EXTREME | |
41718ff9 MD |
60 | ret = pthread_mutex_lock(&urcu_mutex); |
61 | if (ret) { | |
62 | perror("Error in pthread mutex lock"); | |
63 | exit(-1); | |
64 | } | |
09a9f986 PM |
65 | #else /* #ifndef DISTRUST_SIGNALS_EXTREME */ |
66 | while ((ret = pthread_mutex_trylock(&urcu_mutex)) != 0) { | |
67 | if (ret != EBUSY && ret != EINTR) { | |
68 | printf("ret = %d, errno = %d\n", ret, errno); | |
69 | perror("Error in pthread mutex lock"); | |
70 | exit(-1); | |
71 | } | |
72 | if (need_mb) { | |
73 | smp_mb(); | |
74 | need_mb = 0; | |
75 | smp_mb(); | |
76 | } | |
77 | poll(NULL,0,10); | |
78 | } | |
79 | #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */ | |
41718ff9 MD |
80 | } |
81 | ||
c265818b | 82 | void internal_urcu_unlock(void) |
41718ff9 MD |
83 | { |
84 | int ret; | |
85 | ||
86 | ret = pthread_mutex_unlock(&urcu_mutex); | |
87 | if (ret) { | |
88 | perror("Error in pthread mutex unlock"); | |
89 | exit(-1); | |
90 | } | |
91 | } | |
92 | ||
27b012e2 MD |
93 | /* |
94 | * called with urcu_mutex held. | |
95 | */ | |
1430ee0b | 96 | static void switch_next_urcu_qparity(void) |
27b012e2 | 97 | { |
b0d5e790 | 98 | STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr ^ RCU_GP_CTR_BIT); |
27b012e2 MD |
99 | } |
100 | ||
bb488185 | 101 | #ifdef DEBUG_FULL_MB |
09a9f986 | 102 | static void force_mb_single_thread(struct reader_registry *index) |
40e140c9 MD |
103 | { |
104 | smp_mb(); | |
105 | } | |
106 | ||
bb488185 MD |
107 | static void force_mb_all_threads(void) |
108 | { | |
b715b99e | 109 | smp_mb(); |
bb488185 MD |
110 | } |
111 | #else | |
40e140c9 | 112 | |
09a9f986 | 113 | static void force_mb_single_thread(struct reader_registry *index) |
40e140c9 | 114 | { |
0a52082b | 115 | assert(registry); |
157dca95 MD |
116 | /* |
117 | * pthread_kill has a smp_mb(). But beware, we assume it performs | |
118 | * a cache flush on architectures with non-coherent cache. Let's play | |
119 | * safe and don't assume anything : we use smp_mc() to make sure the | |
120 | * cache flush is enforced. | |
157dca95 | 121 | */ |
09a9f986 PM |
122 | *index->need_mb = 1; |
123 | smp_mc(); /* write ->need_mb before sending the signals */ | |
124 | pthread_kill(index->tid, SIGURCU); | |
125 | smp_mb(); | |
40e140c9 MD |
126 | /* |
127 | * Wait for sighandler (and thus mb()) to execute on every thread. | |
128 | * BUSY-LOOP. | |
129 | */ | |
09a9f986 PM |
130 | while (*index->need_mb) { |
131 | poll(NULL, 0, 1); | |
132 | } | |
133 | smp_mb(); /* read ->need_mb before ending the barrier */ | |
40e140c9 MD |
134 | } |
135 | ||
27b012e2 MD |
136 | static void force_mb_all_threads(void) |
137 | { | |
0a52082b | 138 | struct reader_registry *index; |
27b012e2 | 139 | /* |
b715b99e | 140 | * Ask for each threads to execute a smp_mb() so we can consider the |
27b012e2 MD |
141 | * compiler barriers around rcu read lock as real memory barriers. |
142 | */ | |
0a52082b | 143 | if (!registry) |
27b012e2 | 144 | return; |
3a86deba MD |
145 | /* |
146 | * pthread_kill has a smp_mb(). But beware, we assume it performs | |
157dca95 MD |
147 | * a cache flush on architectures with non-coherent cache. Let's play |
148 | * safe and don't assume anything : we use smp_mc() to make sure the | |
149 | * cache flush is enforced. | |
3a86deba | 150 | */ |
09a9f986 PM |
151 | for (index = registry; index < registry + num_readers; index++) { |
152 | *index->need_mb = 1; | |
153 | smp_mc(); /* write need_mb before sending the signal */ | |
f69f195a | 154 | pthread_kill(index->tid, SIGURCU); |
09a9f986 | 155 | } |
27b012e2 MD |
156 | /* |
157 | * Wait for sighandler (and thus mb()) to execute on every thread. | |
09a9f986 PM |
158 | * |
159 | * Note that the pthread_kill() will never be executed on systems | |
160 | * that correctly deliver signals in a timely manner. However, it | |
161 | * is not uncommon for kernels to have bugs that can result in | |
162 | * lost or unduly delayed signals. | |
163 | * | |
164 | * If you are seeing the below pthread_kill() executing much at | |
165 | * all, we suggest testing the underlying kernel and filing the | |
166 | * relevant bug report. For Linux kernels, we recommend getting | |
167 | * the Linux Test Project (LTP). | |
27b012e2 | 168 | */ |
09a9f986 PM |
169 | for (index = registry; index < registry + num_readers; index++) { |
170 | while (*index->need_mb) { | |
171 | pthread_kill(index->tid, SIGURCU); | |
172 | poll(NULL, 0, 1); | |
173 | } | |
174 | } | |
175 | smp_mb(); /* read ->need_mb before ending the barrier */ | |
27b012e2 | 176 | } |
bb488185 | 177 | #endif |
27b012e2 | 178 | |
1430ee0b | 179 | void wait_for_quiescent_state(void) |
27b012e2 | 180 | { |
0a52082b | 181 | struct reader_registry *index; |
27b012e2 | 182 | |
0a52082b | 183 | if (!registry) |
27b012e2 | 184 | return; |
40e140c9 MD |
185 | /* |
186 | * Wait for each thread urcu_active_readers count to become 0. | |
27b012e2 | 187 | */ |
0a52082b | 188 | for (index = registry; index < registry + num_readers; index++) { |
40e140c9 | 189 | int wait_loops = 0; |
27b012e2 | 190 | /* |
40e140c9 MD |
191 | * BUSY-LOOP. Force the reader thread to commit its |
192 | * urcu_active_readers update to memory if we wait for too long. | |
27b012e2 | 193 | */ |
40e140c9 MD |
194 | while (rcu_old_gp_ongoing(index->urcu_active_readers)) { |
195 | if (wait_loops++ == KICK_READER_LOOPS) { | |
09a9f986 | 196 | force_mb_single_thread(index); |
40e140c9 | 197 | wait_loops = 0; |
3b55dbf4 MD |
198 | } else { |
199 | cpu_relax(); | |
40e140c9 MD |
200 | } |
201 | } | |
27b012e2 | 202 | } |
27b012e2 MD |
203 | } |
204 | ||
9598a481 | 205 | void synchronize_rcu(void) |
2bc59bd7 | 206 | { |
135530fd MD |
207 | internal_urcu_lock(); |
208 | ||
9598a481 | 209 | /* All threads should read qparity before accessing data structure |
135530fd MD |
210 | * where new ptr points to. Must be done within internal_urcu_lock |
211 | * because it iterates on reader threads.*/ | |
9598a481 | 212 | /* Write new ptr before changing the qparity */ |
2bc59bd7 | 213 | force_mb_all_threads(); |
9598a481 | 214 | |
9598a481 | 215 | switch_next_urcu_qparity(); /* 0 -> 1 */ |
2bc59bd7 PM |
216 | |
217 | /* | |
9598a481 MD |
218 | * Must commit qparity update to memory before waiting for parity |
219 | * 0 quiescent state. Failure to do so could result in the writer | |
220 | * waiting forever while new readers are always accessing data (no | |
221 | * progress). | |
b0d5e790 | 222 | * Ensured by STORE_SHARED and LOAD_SHARED. |
2bc59bd7 | 223 | */ |
2bc59bd7 | 224 | |
9598a481 MD |
225 | /* |
226 | * Wait for previous parity to be empty of readers. | |
227 | */ | |
228 | wait_for_quiescent_state(); /* Wait readers in parity 0 */ | |
9598a481 MD |
229 | |
230 | /* | |
231 | * Must finish waiting for quiescent state for parity 0 before | |
232 | * committing qparity update to memory. Failure to do so could result in | |
233 | * the writer waiting forever while new readers are always accessing | |
234 | * data (no progress). | |
b0d5e790 | 235 | * Ensured by STORE_SHARED and LOAD_SHARED. |
9598a481 | 236 | */ |
9598a481 MD |
237 | |
238 | switch_next_urcu_qparity(); /* 1 -> 0 */ | |
9598a481 MD |
239 | |
240 | /* | |
241 | * Must commit qparity update to memory before waiting for parity | |
242 | * 1 quiescent state. Failure to do so could result in the writer | |
243 | * waiting forever while new readers are always accessing data (no | |
244 | * progress). | |
b0d5e790 | 245 | * Ensured by STORE_SHARED and LOAD_SHARED. |
9598a481 | 246 | */ |
9598a481 MD |
247 | |
248 | /* | |
249 | * Wait for previous parity to be empty of readers. | |
250 | */ | |
251 | wait_for_quiescent_state(); /* Wait readers in parity 1 */ | |
9598a481 | 252 | |
9598a481 | 253 | /* Finish waiting for reader threads before letting the old ptr being |
135530fd MD |
254 | * freed. Must be done within internal_urcu_lock because it iterates on |
255 | * reader threads. */ | |
9598a481 | 256 | force_mb_all_threads(); |
135530fd MD |
257 | |
258 | internal_urcu_unlock(); | |
2bc59bd7 PM |
259 | } |
260 | ||
27b012e2 MD |
261 | void urcu_add_reader(pthread_t id) |
262 | { | |
0a52082b | 263 | struct reader_registry *oldarray; |
f69f195a | 264 | |
0a52082b | 265 | if (!registry) { |
27b012e2 | 266 | alloc_readers = INIT_NUM_THREADS; |
f69f195a | 267 | num_readers = 0; |
0a52082b MD |
268 | registry = |
269 | malloc(sizeof(struct reader_registry) * alloc_readers); | |
27b012e2 MD |
270 | } |
271 | if (alloc_readers < num_readers + 1) { | |
0a52082b MD |
272 | oldarray = registry; |
273 | registry = malloc(sizeof(struct reader_registry) | |
27b012e2 | 274 | * (alloc_readers << 1)); |
0a52082b MD |
275 | memcpy(registry, oldarray, |
276 | sizeof(struct reader_registry) * alloc_readers); | |
27b012e2 MD |
277 | alloc_readers <<= 1; |
278 | free(oldarray); | |
279 | } | |
0a52082b | 280 | registry[num_readers].tid = id; |
27b012e2 | 281 | /* reference to the TLS of _this_ reader thread. */ |
0a52082b | 282 | registry[num_readers].urcu_active_readers = &urcu_active_readers; |
09a9f986 | 283 | registry[num_readers].need_mb = &need_mb; |
27b012e2 MD |
284 | num_readers++; |
285 | } | |
286 | ||
287 | /* | |
288 | * Never shrink (implementation limitation). | |
289 | * This is O(nb threads). Eventually use a hash table. | |
290 | */ | |
291 | void urcu_remove_reader(pthread_t id) | |
292 | { | |
0a52082b | 293 | struct reader_registry *index; |
27b012e2 | 294 | |
0a52082b MD |
295 | assert(registry != NULL); |
296 | for (index = registry; index < registry + num_readers; index++) { | |
e6d6e2dc | 297 | if (pthread_equal(index->tid, id)) { |
0a52082b MD |
298 | memcpy(index, ®istry[num_readers - 1], |
299 | sizeof(struct reader_registry)); | |
300 | registry[num_readers - 1].tid = 0; | |
301 | registry[num_readers - 1].urcu_active_readers = NULL; | |
27b012e2 MD |
302 | num_readers--; |
303 | return; | |
304 | } | |
305 | } | |
306 | /* Hrm not found, forgot to register ? */ | |
307 | assert(0); | |
308 | } | |
309 | ||
310 | void urcu_register_thread(void) | |
311 | { | |
c265818b | 312 | internal_urcu_lock(); |
41718ff9 | 313 | urcu_add_reader(pthread_self()); |
c265818b | 314 | internal_urcu_unlock(); |
27b012e2 MD |
315 | } |
316 | ||
f69f195a | 317 | void urcu_unregister_thread(void) |
27b012e2 | 318 | { |
c265818b | 319 | internal_urcu_lock(); |
41718ff9 | 320 | urcu_remove_reader(pthread_self()); |
c265818b | 321 | internal_urcu_unlock(); |
27b012e2 MD |
322 | } |
323 | ||
bb488185 | 324 | #ifndef DEBUG_FULL_MB |
f69f195a | 325 | void sigurcu_handler(int signo, siginfo_t *siginfo, void *context) |
27b012e2 | 326 | { |
40e140c9 MD |
327 | /* |
328 | * Executing this smp_mb() is the only purpose of this signal handler. | |
329 | * It punctually promotes barrier() into smp_mb() on every thread it is | |
330 | * executed on. | |
331 | */ | |
b715b99e | 332 | smp_mb(); |
09a9f986 PM |
333 | need_mb = 0; |
334 | smp_mb(); | |
27b012e2 MD |
335 | } |
336 | ||
337 | void __attribute__((constructor)) urcu_init(void) | |
338 | { | |
339 | struct sigaction act; | |
340 | int ret; | |
341 | ||
342 | act.sa_sigaction = sigurcu_handler; | |
343 | ret = sigaction(SIGURCU, &act, NULL); | |
f69f195a MD |
344 | if (ret) { |
345 | perror("Error in sigaction"); | |
27b012e2 MD |
346 | exit(-1); |
347 | } | |
348 | } | |
349 | ||
350 | void __attribute__((destructor)) urcu_exit(void) | |
351 | { | |
352 | struct sigaction act; | |
353 | int ret; | |
354 | ||
355 | ret = sigaction(SIGURCU, NULL, &act); | |
f69f195a MD |
356 | if (ret) { |
357 | perror("Error in sigaction"); | |
27b012e2 MD |
358 | exit(-1); |
359 | } | |
360 | assert(act.sa_sigaction == sigurcu_handler); | |
0a52082b | 361 | free(registry); |
27b012e2 | 362 | } |
bb488185 | 363 | #endif |