Commit | Line | Data |
---|---|---|
fdee2e6d MD |
1 | /* |
2 | * urcu-bp.c | |
3 | * | |
4 | * Userspace RCU library, "bulletproof" version. | |
5 | * | |
6982d6d7 | 6 | * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
fdee2e6d MD |
7 | * Copyright (c) 2009 Paul E. McKenney, IBM Corporation. |
8 | * | |
9 | * This library is free software; you can redistribute it and/or | |
10 | * modify it under the terms of the GNU Lesser General Public | |
11 | * License as published by the Free Software Foundation; either | |
12 | * version 2.1 of the License, or (at your option) any later version. | |
13 | * | |
14 | * This library is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * Lesser General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU Lesser General Public | |
20 | * License along with this library; if not, write to the Free Software | |
21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
22 | * | |
23 | * IBM's contributions to this file may be relicensed under LGPLv2 or later. | |
24 | */ | |
25 | ||
0617bf4c | 26 | #define _GNU_SOURCE |
fdee2e6d MD |
27 | #include <stdio.h> |
28 | #include <pthread.h> | |
29 | #include <signal.h> | |
30 | #include <assert.h> | |
31 | #include <stdlib.h> | |
32 | #include <string.h> | |
33 | #include <errno.h> | |
34 | #include <poll.h> | |
35 | #include <unistd.h> | |
36 | #include <sys/mman.h> | |
37 | ||
57760d44 | 38 | #include "urcu/map/urcu-bp.h" |
5e77fc1f | 39 | |
af7c2dbe | 40 | #include "urcu/static/urcu-bp.h" |
fdee2e6d MD |
41 | /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */ |
42 | #include "urcu-bp.h" | |
43 | ||
4c1ae2ea MD |
44 | #ifndef MAP_ANONYMOUS |
45 | #define MAP_ANONYMOUS MAP_ANON | |
46 | #endif | |
47 | ||
fdee2e6d MD |
48 | /* Sleep delay in us */ |
49 | #define RCU_SLEEP_DELAY 1000 | |
50 | #define ARENA_INIT_ALLOC 16 | |
51 | ||
02be5561 | 52 | void __attribute__((destructor)) rcu_bp_exit(void); |
fdee2e6d | 53 | |
6abb4bd5 | 54 | static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER; |
fdee2e6d MD |
55 | |
56 | #ifdef DEBUG_YIELD | |
57 | unsigned int yield_active; | |
58 | unsigned int __thread rand_yield; | |
59 | #endif | |
60 | ||
61 | /* | |
62 | * Global grace period counter. | |
02be5561 | 63 | * Contains the current RCU_GP_CTR_PHASE. |
fdee2e6d MD |
64 | * Also has a RCU_GP_COUNT of 1, to accelerate the reader fast path. |
65 | * Written to only by writer with mutex taken. Read by both writer and readers. | |
66 | */ | |
02be5561 | 67 | long rcu_gp_ctr = RCU_GP_COUNT; |
fdee2e6d MD |
68 | |
69 | /* | |
70 | * Pointer to registry elements. Written to only by each individual reader. Read | |
71 | * by both the reader and the writers. | |
72 | */ | |
02be5561 | 73 | struct rcu_reader __thread *rcu_reader; |
fdee2e6d | 74 | |
16aa9ee8 | 75 | static CDS_LIST_HEAD(registry); |
fdee2e6d MD |
76 | |
77 | struct registry_arena { | |
78 | void *p; | |
79 | size_t len; | |
80 | size_t used; | |
81 | }; | |
82 | ||
83 | static struct registry_arena registry_arena; | |
84 | ||
4cf1675f MD |
85 | /* Saved fork signal mask, protected by rcu_gp_lock */ |
86 | static sigset_t saved_fork_signal_mask; | |
87 | ||
fdee2e6d MD |
88 | static void rcu_gc_registry(void); |
89 | ||
6abb4bd5 | 90 | static void mutex_lock(pthread_mutex_t *mutex) |
fdee2e6d MD |
91 | { |
92 | int ret; | |
93 | ||
94 | #ifndef DISTRUST_SIGNALS_EXTREME | |
6abb4bd5 | 95 | ret = pthread_mutex_lock(mutex); |
fdee2e6d MD |
96 | if (ret) { |
97 | perror("Error in pthread mutex lock"); | |
98 | exit(-1); | |
99 | } | |
100 | #else /* #ifndef DISTRUST_SIGNALS_EXTREME */ | |
6abb4bd5 | 101 | while ((ret = pthread_mutex_trylock(mutex)) != 0) { |
fdee2e6d MD |
102 | if (ret != EBUSY && ret != EINTR) { |
103 | printf("ret = %d, errno = %d\n", ret, errno); | |
104 | perror("Error in pthread mutex lock"); | |
105 | exit(-1); | |
106 | } | |
fdee2e6d MD |
107 | poll(NULL,0,10); |
108 | } | |
109 | #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */ | |
110 | } | |
111 | ||
6abb4bd5 | 112 | static void mutex_unlock(pthread_mutex_t *mutex) |
fdee2e6d MD |
113 | { |
114 | int ret; | |
115 | ||
6abb4bd5 | 116 | ret = pthread_mutex_unlock(mutex); |
fdee2e6d MD |
117 | if (ret) { |
118 | perror("Error in pthread mutex unlock"); | |
119 | exit(-1); | |
120 | } | |
121 | } | |
122 | ||
2dfb8b5e | 123 | void update_counter_and_wait(void) |
fdee2e6d | 124 | { |
16aa9ee8 | 125 | CDS_LIST_HEAD(qsreaders); |
fdee2e6d | 126 | int wait_loops = 0; |
02be5561 | 127 | struct rcu_reader *index, *tmp; |
fdee2e6d | 128 | |
32c15e4e | 129 | /* Switch parity: 0 -> 1, 1 -> 0 */ |
6cf3827c | 130 | CMM_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE); |
2dfb8b5e MD |
131 | |
132 | /* | |
133 | * Must commit qparity update to memory before waiting for other parity | |
134 | * quiescent state. Failure to do so could result in the writer waiting | |
135 | * forever while new readers are always accessing data (no progress). | |
6cf3827c | 136 | * Ensured by CMM_STORE_SHARED and CMM_LOAD_SHARED. |
2dfb8b5e MD |
137 | */ |
138 | ||
139 | /* | |
5481ddb3 | 140 | * Adding a cmm_smp_mb() which is _not_ formally required, but makes the |
2dfb8b5e MD |
141 | * model easier to understand. It does not have a big performance impact |
142 | * anyway, given this is the write-side. | |
143 | */ | |
5481ddb3 | 144 | cmm_smp_mb(); |
2dfb8b5e | 145 | |
fdee2e6d | 146 | /* |
02be5561 | 147 | * Wait for each thread rcu_reader.ctr count to become 0. |
fdee2e6d MD |
148 | */ |
149 | for (;;) { | |
150 | wait_loops++; | |
16aa9ee8 | 151 | cds_list_for_each_entry_safe(index, tmp, ®istry, node) { |
fdee2e6d | 152 | if (!rcu_old_gp_ongoing(&index->ctr)) |
16aa9ee8 | 153 | cds_list_move(&index->node, &qsreaders); |
fdee2e6d MD |
154 | } |
155 | ||
16aa9ee8 | 156 | if (cds_list_empty(®istry)) { |
fdee2e6d MD |
157 | break; |
158 | } else { | |
159 | if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) | |
160 | usleep(RCU_SLEEP_DELAY); | |
161 | else | |
06f22bdb | 162 | caa_cpu_relax(); |
fdee2e6d MD |
163 | } |
164 | } | |
165 | /* put back the reader list in the registry */ | |
16aa9ee8 | 166 | cds_list_splice(&qsreaders, ®istry); |
fdee2e6d MD |
167 | } |
168 | ||
169 | void synchronize_rcu(void) | |
170 | { | |
171 | sigset_t newmask, oldmask; | |
172 | int ret; | |
173 | ||
174 | ret = sigemptyset(&newmask); | |
175 | assert(!ret); | |
176 | ret = pthread_sigmask(SIG_SETMASK, &newmask, &oldmask); | |
177 | assert(!ret); | |
178 | ||
6abb4bd5 | 179 | mutex_lock(&rcu_gp_lock); |
fdee2e6d | 180 | |
16aa9ee8 | 181 | if (cds_list_empty(®istry)) |
2dfb8b5e | 182 | goto out; |
fdee2e6d MD |
183 | |
184 | /* All threads should read qparity before accessing data structure | |
2dfb8b5e | 185 | * where new ptr points to. */ |
fdee2e6d | 186 | /* Write new ptr before changing the qparity */ |
5481ddb3 | 187 | cmm_smp_mb(); |
fdee2e6d | 188 | |
2dfb8b5e MD |
189 | /* Remove old registry elements */ |
190 | rcu_gc_registry(); | |
fdee2e6d MD |
191 | |
192 | /* | |
193 | * Wait for previous parity to be empty of readers. | |
194 | */ | |
2dfb8b5e | 195 | update_counter_and_wait(); /* 0 -> 1, wait readers in parity 0 */ |
fdee2e6d MD |
196 | |
197 | /* | |
5481ddb3 | 198 | * Adding a cmm_smp_mb() which is _not_ formally required, but makes the |
fdee2e6d MD |
199 | * model easier to understand. It does not have a big performance impact |
200 | * anyway, given this is the write-side. | |
201 | */ | |
5481ddb3 | 202 | cmm_smp_mb(); |
fdee2e6d | 203 | |
fdee2e6d | 204 | /* |
2dfb8b5e | 205 | * Wait for previous parity to be empty of readers. |
fdee2e6d | 206 | */ |
2dfb8b5e | 207 | update_counter_and_wait(); /* 1 -> 0, wait readers in parity 1 */ |
fdee2e6d MD |
208 | |
209 | /* | |
2dfb8b5e MD |
210 | * Finish waiting for reader threads before letting the old ptr being |
211 | * freed. | |
fdee2e6d | 212 | */ |
5481ddb3 | 213 | cmm_smp_mb(); |
2dfb8b5e | 214 | out: |
6abb4bd5 | 215 | mutex_unlock(&rcu_gp_lock); |
fdee2e6d MD |
216 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); |
217 | assert(!ret); | |
218 | } | |
219 | ||
220 | /* | |
221 | * library wrappers to be used by non-LGPL compatible source code. | |
222 | */ | |
223 | ||
224 | void rcu_read_lock(void) | |
225 | { | |
226 | _rcu_read_lock(); | |
227 | } | |
228 | ||
229 | void rcu_read_unlock(void) | |
230 | { | |
231 | _rcu_read_unlock(); | |
232 | } | |
233 | ||
234 | /* | |
235 | * only grow for now. | |
236 | */ | |
237 | static void resize_arena(struct registry_arena *arena, size_t len) | |
238 | { | |
239 | void *new_arena; | |
240 | ||
0617bf4c MD |
241 | if (!arena->p) |
242 | new_arena = mmap(arena->p, len, | |
243 | PROT_READ | PROT_WRITE, | |
244 | MAP_ANONYMOUS | MAP_PRIVATE, | |
245 | -1, 0); | |
246 | else | |
247 | new_arena = mremap(arena->p, arena->len, | |
248 | len, MREMAP_MAYMOVE); | |
249 | assert(new_arena != MAP_FAILED); | |
250 | ||
fdee2e6d MD |
251 | /* |
252 | * re-used the same region ? | |
253 | */ | |
254 | if (new_arena == arena->p) | |
255 | return; | |
256 | ||
257 | memcpy(new_arena, arena->p, arena->len); | |
258 | bzero(new_arena + arena->len, len - arena->len); | |
259 | arena->p = new_arena; | |
260 | } | |
261 | ||
262 | /* Called with signals off and mutex locked */ | |
263 | static void add_thread(void) | |
264 | { | |
02be5561 | 265 | struct rcu_reader *rcu_reader_reg; |
fdee2e6d MD |
266 | |
267 | if (registry_arena.len | |
02be5561 | 268 | < registry_arena.used + sizeof(struct rcu_reader)) |
fdee2e6d MD |
269 | resize_arena(®istry_arena, |
270 | max(registry_arena.len << 1, ARENA_INIT_ALLOC)); | |
271 | /* | |
272 | * Find a free spot. | |
273 | */ | |
02be5561 MD |
274 | for (rcu_reader_reg = registry_arena.p; |
275 | (void *)rcu_reader_reg < registry_arena.p + registry_arena.len; | |
276 | rcu_reader_reg++) { | |
277 | if (!rcu_reader_reg->alloc) | |
fdee2e6d MD |
278 | break; |
279 | } | |
02be5561 MD |
280 | rcu_reader_reg->alloc = 1; |
281 | registry_arena.used += sizeof(struct rcu_reader); | |
fdee2e6d MD |
282 | |
283 | /* Add to registry */ | |
02be5561 MD |
284 | rcu_reader_reg->tid = pthread_self(); |
285 | assert(rcu_reader_reg->ctr == 0); | |
16aa9ee8 | 286 | cds_list_add(&rcu_reader_reg->node, ®istry); |
02be5561 | 287 | rcu_reader = rcu_reader_reg; |
fdee2e6d MD |
288 | } |
289 | ||
290 | /* Called with signals off and mutex locked */ | |
291 | static void rcu_gc_registry(void) | |
292 | { | |
02be5561 | 293 | struct rcu_reader *rcu_reader_reg; |
fdee2e6d MD |
294 | pthread_t tid; |
295 | int ret; | |
296 | ||
02be5561 MD |
297 | for (rcu_reader_reg = registry_arena.p; |
298 | (void *)rcu_reader_reg < registry_arena.p + registry_arena.len; | |
299 | rcu_reader_reg++) { | |
300 | if (!rcu_reader_reg->alloc) | |
fdee2e6d | 301 | continue; |
02be5561 | 302 | tid = rcu_reader_reg->tid; |
fdee2e6d MD |
303 | ret = pthread_kill(tid, 0); |
304 | assert(ret != EINVAL); | |
305 | if (ret == ESRCH) { | |
16aa9ee8 | 306 | cds_list_del(&rcu_reader_reg->node); |
79266051 | 307 | rcu_reader_reg->ctr = 0; |
02be5561 MD |
308 | rcu_reader_reg->alloc = 0; |
309 | registry_arena.used -= sizeof(struct rcu_reader); | |
fdee2e6d MD |
310 | } |
311 | } | |
312 | } | |
313 | ||
314 | /* Disable signals, take mutex, add to registry */ | |
315 | void rcu_bp_register(void) | |
316 | { | |
317 | sigset_t newmask, oldmask; | |
318 | int ret; | |
319 | ||
320 | ret = sigemptyset(&newmask); | |
321 | assert(!ret); | |
322 | ret = pthread_sigmask(SIG_SETMASK, &newmask, &oldmask); | |
323 | assert(!ret); | |
324 | ||
325 | /* | |
326 | * Check if a signal concurrently registered our thread since | |
327 | * the check in rcu_read_lock(). */ | |
02be5561 | 328 | if (rcu_reader) |
fdee2e6d MD |
329 | goto end; |
330 | ||
6abb4bd5 | 331 | mutex_lock(&rcu_gp_lock); |
fdee2e6d | 332 | add_thread(); |
6abb4bd5 | 333 | mutex_unlock(&rcu_gp_lock); |
fdee2e6d MD |
334 | end: |
335 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); | |
336 | assert(!ret); | |
337 | } | |
338 | ||
02be5561 | 339 | void rcu_bp_exit() |
fdee2e6d MD |
340 | { |
341 | munmap(registry_arena.p, registry_arena.len); | |
342 | } | |
4cf1675f MD |
343 | |
344 | /* | |
345 | * Holding the rcu_gp_lock across fork will make sure we fork() don't race with | |
346 | * a concurrent thread executing with this same lock held. This ensures that the | |
347 | * registry is in a coherent state in the child. | |
348 | */ | |
349 | void rcu_bp_before_fork(void) | |
350 | { | |
351 | sigset_t newmask, oldmask; | |
352 | int ret; | |
353 | ||
354 | ret = sigemptyset(&newmask); | |
355 | assert(!ret); | |
356 | ret = pthread_sigmask(SIG_SETMASK, &newmask, &oldmask); | |
357 | assert(!ret); | |
358 | mutex_lock(&rcu_gp_lock); | |
359 | saved_fork_signal_mask = oldmask; | |
360 | } | |
361 | ||
362 | void rcu_bp_after_fork_parent(void) | |
363 | { | |
364 | sigset_t oldmask; | |
365 | int ret; | |
366 | ||
367 | oldmask = saved_fork_signal_mask; | |
368 | mutex_unlock(&rcu_gp_lock); | |
369 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); | |
370 | assert(!ret); | |
371 | } | |
372 | ||
373 | void rcu_bp_after_fork_child(void) | |
374 | { | |
375 | sigset_t oldmask; | |
376 | int ret; | |
377 | ||
378 | rcu_gc_registry(); | |
379 | oldmask = saved_fork_signal_mask; | |
380 | mutex_unlock(&rcu_gp_lock); | |
381 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); | |
382 | assert(!ret); | |
383 | } | |
5e77fc1f PM |
384 | |
385 | #include "urcu-call-rcu-impl.h" | |
0376e7b2 | 386 | #include "urcu-defer-impl.h" |