Commit | Line | Data |
---|---|---|
b257a10b MD |
1 | /* |
2 | * urcu.c | |
3 | * | |
4 | * Userspace RCU library | |
5 | * | |
6 | * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> | |
7 | * | |
8 | * Distributed under GPLv2 | |
9 | */ | |
10 | ||
27b012e2 MD |
11 | #include <stdio.h> |
12 | #include <pthread.h> | |
13 | #include <signal.h> | |
14 | #include <assert.h> | |
f69f195a MD |
15 | #include <stdlib.h> |
16 | #include <string.h> | |
27b012e2 MD |
17 | |
18 | #include "urcu.h" | |
19 | ||
20 | pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER; | |
21 | ||
128166c9 MD |
22 | /* |
23 | * Global grace period counter. | |
24 | * Contains the current RCU_GP_CTR_BIT. | |
25 | * Also has a RCU_GP_CTR_BIT of 1, to accelerate the reader fast path. | |
b0d5e790 | 26 | * Written to only by writer with mutex taken. Read by both writer and readers. |
128166c9 MD |
27 | */ |
28 | long urcu_gp_ctr = RCU_GP_COUNT; | |
27b012e2 | 29 | |
b0d5e790 MD |
30 | /* |
31 | * Written to only by each individual reader. Read by both the reader and the | |
32 | * writers. | |
33 | */ | |
6e8b8429 | 34 | long __thread urcu_active_readers; |
27b012e2 MD |
35 | |
36 | /* Thread IDs of registered readers */ | |
37 | #define INIT_NUM_THREADS 4 | |
38 | ||
39 | struct reader_data { | |
40 | pthread_t tid; | |
128166c9 | 41 | long *urcu_active_readers; |
27b012e2 MD |
42 | }; |
43 | ||
cf380c2f | 44 | #ifdef DEBUG_YIELD |
9d335088 MD |
45 | unsigned int yield_active; |
46 | unsigned int __thread rand_yield; | |
cf380c2f MD |
47 | #endif |
48 | ||
27b012e2 MD |
49 | static struct reader_data *reader_data; |
50 | static int num_readers, alloc_readers; | |
b715b99e | 51 | #ifndef DEBUG_FULL_MB |
27b012e2 | 52 | static int sig_done; |
b715b99e | 53 | #endif |
27b012e2 | 54 | |
c265818b | 55 | void internal_urcu_lock(void) |
41718ff9 | 56 | { |
5873cd17 | 57 | #if 0 |
41718ff9 | 58 | int ret; |
5873cd17 | 59 | /* Mutex sleeping does not play well with busy-waiting loop. */ |
41718ff9 MD |
60 | ret = pthread_mutex_lock(&urcu_mutex); |
61 | if (ret) { | |
62 | perror("Error in pthread mutex lock"); | |
63 | exit(-1); | |
64 | } | |
5873cd17 MD |
65 | #endif |
66 | while (pthread_mutex_trylock(&urcu_mutex) != 0) | |
67 | cpu_relax(); | |
41718ff9 MD |
68 | } |
69 | ||
c265818b | 70 | void internal_urcu_unlock(void) |
41718ff9 MD |
71 | { |
72 | int ret; | |
73 | ||
74 | ret = pthread_mutex_unlock(&urcu_mutex); | |
75 | if (ret) { | |
76 | perror("Error in pthread mutex unlock"); | |
77 | exit(-1); | |
78 | } | |
79 | } | |
80 | ||
27b012e2 MD |
81 | /* |
82 | * called with urcu_mutex held. | |
83 | */ | |
1430ee0b | 84 | static void switch_next_urcu_qparity(void) |
27b012e2 | 85 | { |
b0d5e790 | 86 | STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr ^ RCU_GP_CTR_BIT); |
27b012e2 MD |
87 | } |
88 | ||
bb488185 | 89 | #ifdef DEBUG_FULL_MB |
40e140c9 MD |
90 | static void force_mb_single_thread(pthread_t tid) |
91 | { | |
92 | smp_mb(); | |
93 | } | |
94 | ||
bb488185 MD |
95 | static void force_mb_all_threads(void) |
96 | { | |
b715b99e | 97 | smp_mb(); |
bb488185 MD |
98 | } |
99 | #else | |
40e140c9 MD |
100 | |
101 | static void force_mb_single_thread(pthread_t tid) | |
102 | { | |
103 | assert(reader_data); | |
104 | sig_done = 0; | |
157dca95 MD |
105 | /* |
106 | * pthread_kill has a smp_mb(). But beware, we assume it performs | |
107 | * a cache flush on architectures with non-coherent cache. Let's play | |
108 | * safe and don't assume anything : we use smp_mc() to make sure the | |
109 | * cache flush is enforced. | |
110 | * smp_mb(); write sig_done before sending the signals | |
111 | */ | |
112 | smp_mc(); /* write sig_done before sending the signals */ | |
40e140c9 MD |
113 | pthread_kill(tid, SIGURCU); |
114 | /* | |
115 | * Wait for sighandler (and thus mb()) to execute on every thread. | |
116 | * BUSY-LOOP. | |
117 | */ | |
8895e525 | 118 | while (LOAD_SHARED(sig_done) < 1) |
3a86deba | 119 | cpu_relax(); |
40e140c9 MD |
120 | smp_mb(); /* read sig_done before ending the barrier */ |
121 | } | |
122 | ||
27b012e2 MD |
123 | static void force_mb_all_threads(void) |
124 | { | |
f69f195a | 125 | struct reader_data *index; |
27b012e2 | 126 | /* |
b715b99e | 127 | * Ask for each threads to execute a smp_mb() so we can consider the |
27b012e2 MD |
128 | * compiler barriers around rcu read lock as real memory barriers. |
129 | */ | |
130 | if (!reader_data) | |
131 | return; | |
27b012e2 | 132 | sig_done = 0; |
3a86deba MD |
133 | /* |
134 | * pthread_kill has a smp_mb(). But beware, we assume it performs | |
157dca95 MD |
135 | * a cache flush on architectures with non-coherent cache. Let's play |
136 | * safe and don't assume anything : we use smp_mc() to make sure the | |
137 | * cache flush is enforced. | |
3a86deba MD |
138 | * smp_mb(); write sig_done before sending the signals |
139 | */ | |
157dca95 | 140 | smp_mc(); /* write sig_done before sending the signals */ |
ae878d0d | 141 | for (index = reader_data; index < reader_data + num_readers; index++) |
f69f195a | 142 | pthread_kill(index->tid, SIGURCU); |
27b012e2 MD |
143 | /* |
144 | * Wait for sighandler (and thus mb()) to execute on every thread. | |
145 | * BUSY-LOOP. | |
146 | */ | |
8895e525 | 147 | while (LOAD_SHARED(sig_done) < num_readers) |
3a86deba | 148 | cpu_relax(); |
b715b99e | 149 | smp_mb(); /* read sig_done before ending the barrier */ |
27b012e2 | 150 | } |
bb488185 | 151 | #endif |
27b012e2 | 152 | |
1430ee0b | 153 | void wait_for_quiescent_state(void) |
27b012e2 | 154 | { |
f69f195a | 155 | struct reader_data *index; |
27b012e2 MD |
156 | |
157 | if (!reader_data) | |
158 | return; | |
40e140c9 MD |
159 | /* |
160 | * Wait for each thread urcu_active_readers count to become 0. | |
27b012e2 | 161 | */ |
f69f195a | 162 | for (index = reader_data; index < reader_data + num_readers; index++) { |
40e140c9 | 163 | int wait_loops = 0; |
27b012e2 | 164 | /* |
40e140c9 MD |
165 | * BUSY-LOOP. Force the reader thread to commit its |
166 | * urcu_active_readers update to memory if we wait for too long. | |
27b012e2 | 167 | */ |
40e140c9 MD |
168 | while (rcu_old_gp_ongoing(index->urcu_active_readers)) { |
169 | if (wait_loops++ == KICK_READER_LOOPS) { | |
170 | force_mb_single_thread(index->tid); | |
171 | wait_loops = 0; | |
3b55dbf4 MD |
172 | } else { |
173 | cpu_relax(); | |
40e140c9 MD |
174 | } |
175 | } | |
27b012e2 | 176 | } |
27b012e2 MD |
177 | } |
178 | ||
9598a481 | 179 | void synchronize_rcu(void) |
2bc59bd7 | 180 | { |
135530fd MD |
181 | internal_urcu_lock(); |
182 | ||
9598a481 | 183 | /* All threads should read qparity before accessing data structure |
135530fd MD |
184 | * where new ptr points to. Must be done within internal_urcu_lock |
185 | * because it iterates on reader threads.*/ | |
9598a481 | 186 | /* Write new ptr before changing the qparity */ |
2bc59bd7 | 187 | force_mb_all_threads(); |
9598a481 | 188 | |
9598a481 | 189 | switch_next_urcu_qparity(); /* 0 -> 1 */ |
2bc59bd7 PM |
190 | |
191 | /* | |
9598a481 MD |
192 | * Must commit qparity update to memory before waiting for parity |
193 | * 0 quiescent state. Failure to do so could result in the writer | |
194 | * waiting forever while new readers are always accessing data (no | |
195 | * progress). | |
b0d5e790 | 196 | * Ensured by STORE_SHARED and LOAD_SHARED. |
2bc59bd7 | 197 | */ |
2bc59bd7 | 198 | |
9598a481 MD |
199 | /* |
200 | * Wait for previous parity to be empty of readers. | |
201 | */ | |
202 | wait_for_quiescent_state(); /* Wait readers in parity 0 */ | |
9598a481 MD |
203 | |
204 | /* | |
205 | * Must finish waiting for quiescent state for parity 0 before | |
206 | * committing qparity update to memory. Failure to do so could result in | |
207 | * the writer waiting forever while new readers are always accessing | |
208 | * data (no progress). | |
b0d5e790 | 209 | * Ensured by STORE_SHARED and LOAD_SHARED. |
9598a481 | 210 | */ |
9598a481 MD |
211 | |
212 | switch_next_urcu_qparity(); /* 1 -> 0 */ | |
9598a481 MD |
213 | |
214 | /* | |
215 | * Must commit qparity update to memory before waiting for parity | |
216 | * 1 quiescent state. Failure to do so could result in the writer | |
217 | * waiting forever while new readers are always accessing data (no | |
218 | * progress). | |
b0d5e790 | 219 | * Ensured by STORE_SHARED and LOAD_SHARED. |
9598a481 | 220 | */ |
9598a481 MD |
221 | |
222 | /* | |
223 | * Wait for previous parity to be empty of readers. | |
224 | */ | |
225 | wait_for_quiescent_state(); /* Wait readers in parity 1 */ | |
9598a481 | 226 | |
9598a481 | 227 | /* Finish waiting for reader threads before letting the old ptr being |
135530fd MD |
228 | * freed. Must be done within internal_urcu_lock because it iterates on |
229 | * reader threads. */ | |
9598a481 | 230 | force_mb_all_threads(); |
135530fd MD |
231 | |
232 | internal_urcu_unlock(); | |
2bc59bd7 PM |
233 | } |
234 | ||
27b012e2 MD |
235 | void urcu_add_reader(pthread_t id) |
236 | { | |
f69f195a MD |
237 | struct reader_data *oldarray; |
238 | ||
27b012e2 MD |
239 | if (!reader_data) { |
240 | alloc_readers = INIT_NUM_THREADS; | |
f69f195a | 241 | num_readers = 0; |
27b012e2 MD |
242 | reader_data = |
243 | malloc(sizeof(struct reader_data) * alloc_readers); | |
27b012e2 MD |
244 | } |
245 | if (alloc_readers < num_readers + 1) { | |
27b012e2 MD |
246 | oldarray = reader_data; |
247 | reader_data = malloc(sizeof(struct reader_data) | |
248 | * (alloc_readers << 1)); | |
249 | memcpy(reader_data, oldarray, | |
250 | sizeof(struct reader_data) * alloc_readers); | |
251 | alloc_readers <<= 1; | |
252 | free(oldarray); | |
253 | } | |
254 | reader_data[num_readers].tid = id; | |
255 | /* reference to the TLS of _this_ reader thread. */ | |
1430ee0b | 256 | reader_data[num_readers].urcu_active_readers = &urcu_active_readers; |
27b012e2 MD |
257 | num_readers++; |
258 | } | |
259 | ||
260 | /* | |
261 | * Never shrink (implementation limitation). | |
262 | * This is O(nb threads). Eventually use a hash table. | |
263 | */ | |
264 | void urcu_remove_reader(pthread_t id) | |
265 | { | |
266 | struct reader_data *index; | |
267 | ||
268 | assert(reader_data != NULL); | |
269 | for (index = reader_data; index < reader_data + num_readers; index++) { | |
e6d6e2dc | 270 | if (pthread_equal(index->tid, id)) { |
27b012e2 MD |
271 | memcpy(index, &reader_data[num_readers - 1], |
272 | sizeof(struct reader_data)); | |
273 | reader_data[num_readers - 1].tid = 0; | |
274 | reader_data[num_readers - 1].urcu_active_readers = NULL; | |
275 | num_readers--; | |
276 | return; | |
277 | } | |
278 | } | |
279 | /* Hrm not found, forgot to register ? */ | |
280 | assert(0); | |
281 | } | |
282 | ||
283 | void urcu_register_thread(void) | |
284 | { | |
c265818b | 285 | internal_urcu_lock(); |
41718ff9 | 286 | urcu_add_reader(pthread_self()); |
c265818b | 287 | internal_urcu_unlock(); |
27b012e2 MD |
288 | } |
289 | ||
f69f195a | 290 | void urcu_unregister_thread(void) |
27b012e2 | 291 | { |
c265818b | 292 | internal_urcu_lock(); |
41718ff9 | 293 | urcu_remove_reader(pthread_self()); |
c265818b | 294 | internal_urcu_unlock(); |
27b012e2 MD |
295 | } |
296 | ||
bb488185 | 297 | #ifndef DEBUG_FULL_MB |
f69f195a | 298 | void sigurcu_handler(int signo, siginfo_t *siginfo, void *context) |
27b012e2 | 299 | { |
40e140c9 MD |
300 | /* |
301 | * Executing this smp_mb() is the only purpose of this signal handler. | |
302 | * It punctually promotes barrier() into smp_mb() on every thread it is | |
303 | * executed on. | |
304 | */ | |
b715b99e | 305 | smp_mb(); |
27b012e2 MD |
306 | atomic_inc(&sig_done); |
307 | } | |
308 | ||
309 | void __attribute__((constructor)) urcu_init(void) | |
310 | { | |
311 | struct sigaction act; | |
312 | int ret; | |
313 | ||
314 | act.sa_sigaction = sigurcu_handler; | |
315 | ret = sigaction(SIGURCU, &act, NULL); | |
f69f195a MD |
316 | if (ret) { |
317 | perror("Error in sigaction"); | |
27b012e2 MD |
318 | exit(-1); |
319 | } | |
320 | } | |
321 | ||
322 | void __attribute__((destructor)) urcu_exit(void) | |
323 | { | |
324 | struct sigaction act; | |
325 | int ret; | |
326 | ||
327 | ret = sigaction(SIGURCU, NULL, &act); | |
f69f195a MD |
328 | if (ret) { |
329 | perror("Error in sigaction"); | |
27b012e2 MD |
330 | exit(-1); |
331 | } | |
332 | assert(act.sa_sigaction == sigurcu_handler); | |
333 | free(reader_data); | |
334 | } | |
bb488185 | 335 | #endif |