Commit | Line | Data |
---|---|---|
b257a10b MD |
1 | /* |
2 | * urcu.c | |
3 | * | |
4 | * Userspace RCU library | |
5 | * | |
6 | * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> | |
7 | * | |
8 | * Distributed under GPLv2 | |
9 | */ | |
10 | ||
27b012e2 MD |
11 | #include <stdio.h> |
12 | #include <pthread.h> | |
13 | #include <signal.h> | |
14 | #include <assert.h> | |
f69f195a MD |
15 | #include <stdlib.h> |
16 | #include <string.h> | |
27b012e2 MD |
17 | |
18 | #include "urcu.h" | |
19 | ||
20 | pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER; | |
21 | ||
128166c9 MD |
22 | /* |
23 | * Global grace period counter. | |
24 | * Contains the current RCU_GP_CTR_BIT. | |
25 | * Also has a RCU_GP_CTR_BIT of 1, to accelerate the reader fast path. | |
b0d5e790 | 26 | * Written to only by writer with mutex taken. Read by both writer and readers. |
128166c9 MD |
27 | */ |
28 | long urcu_gp_ctr = RCU_GP_COUNT; | |
27b012e2 | 29 | |
b0d5e790 MD |
30 | /* |
31 | * Written to only by each individual reader. Read by both the reader and the | |
32 | * writers. | |
33 | */ | |
6e8b8429 | 34 | long __thread urcu_active_readers; |
27b012e2 MD |
35 | |
36 | /* Thread IDs of registered readers */ | |
37 | #define INIT_NUM_THREADS 4 | |
38 | ||
39 | struct reader_data { | |
40 | pthread_t tid; | |
128166c9 | 41 | long *urcu_active_readers; |
27b012e2 MD |
42 | }; |
43 | ||
cf380c2f | 44 | #ifdef DEBUG_YIELD |
9d335088 MD |
45 | unsigned int yield_active; |
46 | unsigned int __thread rand_yield; | |
cf380c2f MD |
47 | #endif |
48 | ||
27b012e2 MD |
49 | static struct reader_data *reader_data; |
50 | static int num_readers, alloc_readers; | |
b715b99e | 51 | #ifndef DEBUG_FULL_MB |
27b012e2 | 52 | static int sig_done; |
b715b99e | 53 | #endif |
27b012e2 | 54 | |
c265818b | 55 | void internal_urcu_lock(void) |
41718ff9 MD |
56 | { |
57 | int ret; | |
58 | ret = pthread_mutex_lock(&urcu_mutex); | |
59 | if (ret) { | |
60 | perror("Error in pthread mutex lock"); | |
61 | exit(-1); | |
62 | } | |
63 | } | |
64 | ||
c265818b | 65 | void internal_urcu_unlock(void) |
41718ff9 MD |
66 | { |
67 | int ret; | |
68 | ||
69 | ret = pthread_mutex_unlock(&urcu_mutex); | |
70 | if (ret) { | |
71 | perror("Error in pthread mutex unlock"); | |
72 | exit(-1); | |
73 | } | |
74 | } | |
75 | ||
27b012e2 MD |
76 | /* |
77 | * called with urcu_mutex held. | |
78 | */ | |
1430ee0b | 79 | static void switch_next_urcu_qparity(void) |
27b012e2 | 80 | { |
b0d5e790 | 81 | STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr ^ RCU_GP_CTR_BIT); |
27b012e2 MD |
82 | } |
83 | ||
bb488185 | 84 | #ifdef DEBUG_FULL_MB |
40e140c9 MD |
85 | static void force_mb_single_thread(pthread_t tid) |
86 | { | |
87 | smp_mb(); | |
88 | } | |
89 | ||
bb488185 MD |
90 | static void force_mb_all_threads(void) |
91 | { | |
b715b99e | 92 | smp_mb(); |
bb488185 MD |
93 | } |
94 | #else | |
40e140c9 MD |
95 | |
96 | static void force_mb_single_thread(pthread_t tid) | |
97 | { | |
98 | assert(reader_data); | |
99 | sig_done = 0; | |
157dca95 MD |
100 | /* |
101 | * pthread_kill has a smp_mb(). But beware, we assume it performs | |
102 | * a cache flush on architectures with non-coherent cache. Let's play | |
103 | * safe and don't assume anything : we use smp_mc() to make sure the | |
104 | * cache flush is enforced. | |
105 | * smp_mb(); write sig_done before sending the signals | |
106 | */ | |
107 | smp_mc(); /* write sig_done before sending the signals */ | |
40e140c9 MD |
108 | pthread_kill(tid, SIGURCU); |
109 | /* | |
110 | * Wait for sighandler (and thus mb()) to execute on every thread. | |
111 | * BUSY-LOOP. | |
112 | */ | |
8895e525 | 113 | while (LOAD_SHARED(sig_done) < 1) |
3a86deba | 114 | cpu_relax(); |
40e140c9 MD |
115 | smp_mb(); /* read sig_done before ending the barrier */ |
116 | } | |
117 | ||
27b012e2 MD |
118 | static void force_mb_all_threads(void) |
119 | { | |
f69f195a | 120 | struct reader_data *index; |
27b012e2 | 121 | /* |
b715b99e | 122 | * Ask for each threads to execute a smp_mb() so we can consider the |
27b012e2 MD |
123 | * compiler barriers around rcu read lock as real memory barriers. |
124 | */ | |
125 | if (!reader_data) | |
126 | return; | |
27b012e2 | 127 | sig_done = 0; |
3a86deba MD |
128 | /* |
129 | * pthread_kill has a smp_mb(). But beware, we assume it performs | |
157dca95 MD |
130 | * a cache flush on architectures with non-coherent cache. Let's play |
131 | * safe and don't assume anything : we use smp_mc() to make sure the | |
132 | * cache flush is enforced. | |
3a86deba MD |
133 | * smp_mb(); write sig_done before sending the signals |
134 | */ | |
157dca95 | 135 | smp_mc(); /* write sig_done before sending the signals */ |
ae878d0d | 136 | for (index = reader_data; index < reader_data + num_readers; index++) |
f69f195a | 137 | pthread_kill(index->tid, SIGURCU); |
27b012e2 MD |
138 | /* |
139 | * Wait for sighandler (and thus mb()) to execute on every thread. | |
140 | * BUSY-LOOP. | |
141 | */ | |
8895e525 | 142 | while (LOAD_SHARED(sig_done) < num_readers) |
3a86deba | 143 | cpu_relax(); |
b715b99e | 144 | smp_mb(); /* read sig_done before ending the barrier */ |
27b012e2 | 145 | } |
bb488185 | 146 | #endif |
27b012e2 | 147 | |
1430ee0b | 148 | void wait_for_quiescent_state(void) |
27b012e2 | 149 | { |
f69f195a | 150 | struct reader_data *index; |
27b012e2 MD |
151 | |
152 | if (!reader_data) | |
153 | return; | |
40e140c9 MD |
154 | /* |
155 | * Wait for each thread urcu_active_readers count to become 0. | |
27b012e2 | 156 | */ |
f69f195a | 157 | for (index = reader_data; index < reader_data + num_readers; index++) { |
40e140c9 | 158 | int wait_loops = 0; |
27b012e2 | 159 | /* |
40e140c9 MD |
160 | * BUSY-LOOP. Force the reader thread to commit its |
161 | * urcu_active_readers update to memory if we wait for too long. | |
27b012e2 | 162 | */ |
40e140c9 MD |
163 | while (rcu_old_gp_ongoing(index->urcu_active_readers)) { |
164 | if (wait_loops++ == KICK_READER_LOOPS) { | |
165 | force_mb_single_thread(index->tid); | |
166 | wait_loops = 0; | |
3b55dbf4 MD |
167 | } else { |
168 | cpu_relax(); | |
40e140c9 MD |
169 | } |
170 | } | |
27b012e2 | 171 | } |
27b012e2 MD |
172 | } |
173 | ||
9598a481 | 174 | void synchronize_rcu(void) |
2bc59bd7 | 175 | { |
135530fd MD |
176 | internal_urcu_lock(); |
177 | ||
9598a481 | 178 | /* All threads should read qparity before accessing data structure |
135530fd MD |
179 | * where new ptr points to. Must be done within internal_urcu_lock |
180 | * because it iterates on reader threads.*/ | |
9598a481 | 181 | /* Write new ptr before changing the qparity */ |
2bc59bd7 | 182 | force_mb_all_threads(); |
9598a481 | 183 | |
9598a481 | 184 | switch_next_urcu_qparity(); /* 0 -> 1 */ |
2bc59bd7 PM |
185 | |
186 | /* | |
9598a481 MD |
187 | * Must commit qparity update to memory before waiting for parity |
188 | * 0 quiescent state. Failure to do so could result in the writer | |
189 | * waiting forever while new readers are always accessing data (no | |
190 | * progress). | |
b0d5e790 | 191 | * Ensured by STORE_SHARED and LOAD_SHARED. |
2bc59bd7 | 192 | */ |
2bc59bd7 | 193 | |
9598a481 MD |
194 | /* |
195 | * Wait for previous parity to be empty of readers. | |
196 | */ | |
197 | wait_for_quiescent_state(); /* Wait readers in parity 0 */ | |
9598a481 MD |
198 | |
199 | /* | |
200 | * Must finish waiting for quiescent state for parity 0 before | |
201 | * committing qparity update to memory. Failure to do so could result in | |
202 | * the writer waiting forever while new readers are always accessing | |
203 | * data (no progress). | |
b0d5e790 | 204 | * Ensured by STORE_SHARED and LOAD_SHARED. |
9598a481 | 205 | */ |
9598a481 MD |
206 | |
207 | switch_next_urcu_qparity(); /* 1 -> 0 */ | |
9598a481 MD |
208 | |
209 | /* | |
210 | * Must commit qparity update to memory before waiting for parity | |
211 | * 1 quiescent state. Failure to do so could result in the writer | |
212 | * waiting forever while new readers are always accessing data (no | |
213 | * progress). | |
b0d5e790 | 214 | * Ensured by STORE_SHARED and LOAD_SHARED. |
9598a481 | 215 | */ |
9598a481 MD |
216 | |
217 | /* | |
218 | * Wait for previous parity to be empty of readers. | |
219 | */ | |
220 | wait_for_quiescent_state(); /* Wait readers in parity 1 */ | |
9598a481 | 221 | |
9598a481 | 222 | /* Finish waiting for reader threads before letting the old ptr being |
135530fd MD |
223 | * freed. Must be done within internal_urcu_lock because it iterates on |
224 | * reader threads. */ | |
9598a481 | 225 | force_mb_all_threads(); |
135530fd MD |
226 | |
227 | internal_urcu_unlock(); | |
2bc59bd7 PM |
228 | } |
229 | ||
27b012e2 MD |
230 | void urcu_add_reader(pthread_t id) |
231 | { | |
f69f195a MD |
232 | struct reader_data *oldarray; |
233 | ||
27b012e2 MD |
234 | if (!reader_data) { |
235 | alloc_readers = INIT_NUM_THREADS; | |
f69f195a | 236 | num_readers = 0; |
27b012e2 MD |
237 | reader_data = |
238 | malloc(sizeof(struct reader_data) * alloc_readers); | |
27b012e2 MD |
239 | } |
240 | if (alloc_readers < num_readers + 1) { | |
27b012e2 MD |
241 | oldarray = reader_data; |
242 | reader_data = malloc(sizeof(struct reader_data) | |
243 | * (alloc_readers << 1)); | |
244 | memcpy(reader_data, oldarray, | |
245 | sizeof(struct reader_data) * alloc_readers); | |
246 | alloc_readers <<= 1; | |
247 | free(oldarray); | |
248 | } | |
249 | reader_data[num_readers].tid = id; | |
250 | /* reference to the TLS of _this_ reader thread. */ | |
1430ee0b | 251 | reader_data[num_readers].urcu_active_readers = &urcu_active_readers; |
27b012e2 MD |
252 | num_readers++; |
253 | } | |
254 | ||
255 | /* | |
256 | * Never shrink (implementation limitation). | |
257 | * This is O(nb threads). Eventually use a hash table. | |
258 | */ | |
259 | void urcu_remove_reader(pthread_t id) | |
260 | { | |
261 | struct reader_data *index; | |
262 | ||
263 | assert(reader_data != NULL); | |
264 | for (index = reader_data; index < reader_data + num_readers; index++) { | |
e6d6e2dc | 265 | if (pthread_equal(index->tid, id)) { |
27b012e2 MD |
266 | memcpy(index, &reader_data[num_readers - 1], |
267 | sizeof(struct reader_data)); | |
268 | reader_data[num_readers - 1].tid = 0; | |
269 | reader_data[num_readers - 1].urcu_active_readers = NULL; | |
270 | num_readers--; | |
271 | return; | |
272 | } | |
273 | } | |
274 | /* Hrm not found, forgot to register ? */ | |
275 | assert(0); | |
276 | } | |
277 | ||
278 | void urcu_register_thread(void) | |
279 | { | |
c265818b | 280 | internal_urcu_lock(); |
41718ff9 | 281 | urcu_add_reader(pthread_self()); |
c265818b | 282 | internal_urcu_unlock(); |
27b012e2 MD |
283 | } |
284 | ||
f69f195a | 285 | void urcu_unregister_thread(void) |
27b012e2 | 286 | { |
c265818b | 287 | internal_urcu_lock(); |
41718ff9 | 288 | urcu_remove_reader(pthread_self()); |
c265818b | 289 | internal_urcu_unlock(); |
27b012e2 MD |
290 | } |
291 | ||
bb488185 | 292 | #ifndef DEBUG_FULL_MB |
f69f195a | 293 | void sigurcu_handler(int signo, siginfo_t *siginfo, void *context) |
27b012e2 | 294 | { |
40e140c9 MD |
295 | /* |
296 | * Executing this smp_mb() is the only purpose of this signal handler. | |
297 | * It punctually promotes barrier() into smp_mb() on every thread it is | |
298 | * executed on. | |
299 | */ | |
b715b99e | 300 | smp_mb(); |
27b012e2 MD |
301 | atomic_inc(&sig_done); |
302 | } | |
303 | ||
304 | void __attribute__((constructor)) urcu_init(void) | |
305 | { | |
306 | struct sigaction act; | |
307 | int ret; | |
308 | ||
309 | act.sa_sigaction = sigurcu_handler; | |
310 | ret = sigaction(SIGURCU, &act, NULL); | |
f69f195a MD |
311 | if (ret) { |
312 | perror("Error in sigaction"); | |
27b012e2 MD |
313 | exit(-1); |
314 | } | |
315 | } | |
316 | ||
317 | void __attribute__((destructor)) urcu_exit(void) | |
318 | { | |
319 | struct sigaction act; | |
320 | int ret; | |
321 | ||
322 | ret = sigaction(SIGURCU, NULL, &act); | |
f69f195a MD |
323 | if (ret) { |
324 | perror("Error in sigaction"); | |
27b012e2 MD |
325 | exit(-1); |
326 | } | |
327 | assert(act.sa_sigaction == sigurcu_handler); | |
328 | free(reader_data); | |
329 | } | |
bb488185 | 330 | #endif |