Commit | Line | Data |
---|---|---|
b257a10b MD |
1 | /* |
2 | * urcu.c | |
3 | * | |
4 | * Userspace RCU library | |
5 | * | |
6982d6d7 | 6 | * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
af02d47e | 7 | * Copyright (c) 2009 Paul E. McKenney, IBM Corporation. |
b257a10b | 8 | * |
af02d47e MD |
9 | * This library is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU Lesser General Public | |
11 | * License as published by the Free Software Foundation; either | |
12 | * version 2.1 of the License, or (at your option) any later version. | |
13 | * | |
14 | * This library is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * Lesser General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU Lesser General Public | |
20 | * License along with this library; if not, write to the Free Software | |
21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
54843abc PM |
22 | * |
23 | * IBM's contributions to this file may be relicensed under LGPLv2 or later. | |
b257a10b MD |
24 | */ |
25 | ||
e37faee1 | 26 | #define URCU_NO_COMPAT_IDENTIFIERS |
fdf01eed | 27 | #define _BSD_SOURCE |
71c811bf | 28 | #define _LGPL_SOURCE |
82d50e1a | 29 | #define _DEFAULT_SOURCE |
27b012e2 MD |
30 | #include <stdio.h> |
31 | #include <pthread.h> | |
32 | #include <signal.h> | |
f69f195a | 33 | #include <stdlib.h> |
6d841bc2 | 34 | #include <stdint.h> |
f69f195a | 35 | #include <string.h> |
09a9f986 | 36 | #include <errno.h> |
c0bb9f69 | 37 | #include <stdbool.h> |
e8043c1b | 38 | #include <poll.h> |
27b012e2 | 39 | |
375db287 | 40 | #include <urcu/config.h> |
01477510 | 41 | #include <urcu/assert.h> |
4477a870 MD |
42 | #include <urcu/arch.h> |
43 | #include <urcu/wfcqueue.h> | |
44 | #include <urcu/map/urcu.h> | |
45 | #include <urcu/static/urcu.h> | |
46 | #include <urcu/pointer.h> | |
47 | #include <urcu/tls-compat.h> | |
71c811bf | 48 | |
4a6d7378 | 49 | #include "urcu-die.h" |
5bffdd5d | 50 | #include "urcu-wait.h" |
4477a870 | 51 | #include "urcu-utils.h" |
4a6d7378 | 52 | |
4477a870 | 53 | #define URCU_API_MAP |
121a5d44 | 54 | /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */ |
71c811bf | 55 | #undef _LGPL_SOURCE |
4477a870 | 56 | #include <urcu/urcu.h> |
71c811bf | 57 | #define _LGPL_SOURCE |
27b012e2 | 58 | |
3a71751e PB |
59 | /* |
60 | * If a reader is really non-cooperative and refuses to commit its | |
61 | * rcu_active_readers count to memory (there is no barrier in the reader | |
9340c38d | 62 | * per-se), kick it after 10 loops waiting for it. |
3a71751e | 63 | */ |
9340c38d | 64 | #define KICK_READER_LOOPS 10 |
3a71751e PB |
65 | |
66 | /* | |
67 | * Active attempts to check for reader Q.S. before calling futex(). | |
68 | */ | |
69 | #define RCU_QS_ACTIVE_ATTEMPTS 100 | |
70 | ||
999991c6 MD |
71 | /* If the headers do not support membarrier system call, fall back on RCU_MB */ |
72 | #ifdef __NR_membarrier | |
73 | # define membarrier(...) syscall(__NR_membarrier, __VA_ARGS__) | |
553b7eb9 MD |
74 | #else |
75 | # define membarrier(...) -ENOSYS | |
76 | #endif | |
77 | ||
64f469e6 | 78 | enum membarrier_cmd { |
c0bb9f69 MD |
79 | MEMBARRIER_CMD_QUERY = 0, |
80 | MEMBARRIER_CMD_SHARED = (1 << 0), | |
81 | /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */ | |
82 | /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */ | |
83 | MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3), | |
84 | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4), | |
64f469e6 | 85 | }; |
553b7eb9 | 86 | |
fdf01eed | 87 | #ifdef RCU_MEMBARRIER |
834a45ba | 88 | static int init_done; |
4477a870 | 89 | static int urcu_memb_has_sys_membarrier_private_expedited; |
c0bb9f69 | 90 | |
d8d9a340 | 91 | #ifndef CONFIG_RCU_FORCE_SYS_MEMBARRIER |
4477a870 MD |
92 | /* |
93 | * Explicitly initialize to zero because we can't alias a non-static | |
94 | * uninitialized variable. | |
95 | */ | |
96 | int urcu_memb_has_sys_membarrier = 0; | |
d8d9a340 | 97 | #endif |
834a45ba | 98 | |
02be5561 | 99 | void __attribute__((constructor)) rcu_init(void); |
fdf01eed MD |
100 | #endif |
101 | ||
102 | #ifdef RCU_MB | |
02be5561 | 103 | void rcu_init(void) |
e90a6e9c MD |
104 | { |
105 | } | |
106 | #endif | |
8a5fb4c9 | 107 | |
fdf01eed MD |
108 | #ifdef RCU_SIGNAL |
109 | static int init_done; | |
110 | ||
111 | void __attribute__((constructor)) rcu_init(void); | |
ea3a28a3 MD |
112 | |
113 | static DEFINE_URCU_TLS(int, rcu_signal_was_blocked); | |
fdf01eed MD |
114 | #endif |
115 | ||
90f72b8c MD |
116 | void __attribute__((destructor)) rcu_exit(void); |
117 | static void urcu_call_rcu_exit(void); | |
118 | ||
731ccb96 MD |
119 | /* |
120 | * rcu_gp_lock ensures mutual exclusion between threads calling | |
121 | * synchronize_rcu(). | |
122 | */ | |
6abb4bd5 | 123 | static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER; |
731ccb96 MD |
124 | /* |
125 | * rcu_registry_lock ensures mutual exclusion between threads | |
126 | * registering and unregistering themselves to/from the registry, and | |
127 | * with threads reading that registry from synchronize_rcu(). However, | |
128 | * this lock is not held all the way through the completion of awaiting | |
129 | * for the grace period. It is sporadically released between iterations | |
130 | * on the registry. | |
131 | * rcu_registry_lock may nest inside rcu_gp_lock. | |
132 | */ | |
133 | static pthread_mutex_t rcu_registry_lock = PTHREAD_MUTEX_INITIALIZER; | |
4477a870 | 134 | struct urcu_gp rcu_gp = { .ctr = URCU_GP_COUNT }; |
27b012e2 | 135 | |
b0d5e790 MD |
136 | /* |
137 | * Written to only by each individual reader. Read by both the reader and the | |
138 | * writers. | |
139 | */ | |
4477a870 | 140 | DEFINE_URCU_TLS(struct urcu_reader, rcu_reader); |
27b012e2 | 141 | |
16aa9ee8 | 142 | static CDS_LIST_HEAD(registry); |
27b012e2 | 143 | |
5bffdd5d MD |
144 | /* |
145 | * Queue keeping threads awaiting to wait for a grace period. Contains | |
146 | * struct gp_waiters_thread objects. | |
147 | */ | |
148 | static DEFINE_URCU_WAIT_QUEUE(gp_waiters); | |
149 | ||
6abb4bd5 | 150 | static void mutex_lock(pthread_mutex_t *mutex) |
41718ff9 MD |
151 | { |
152 | int ret; | |
09a9f986 PM |
153 | |
154 | #ifndef DISTRUST_SIGNALS_EXTREME | |
6abb4bd5 | 155 | ret = pthread_mutex_lock(mutex); |
4a6d7378 MD |
156 | if (ret) |
157 | urcu_die(ret); | |
09a9f986 | 158 | #else /* #ifndef DISTRUST_SIGNALS_EXTREME */ |
6abb4bd5 | 159 | while ((ret = pthread_mutex_trylock(mutex)) != 0) { |
4a6d7378 MD |
160 | if (ret != EBUSY && ret != EINTR) |
161 | urcu_die(ret); | |
bd252a04 | 162 | if (CMM_LOAD_SHARED(URCU_TLS(rcu_reader).need_mb)) { |
5481ddb3 | 163 | cmm_smp_mb(); |
bd252a04 | 164 | _CMM_STORE_SHARED(URCU_TLS(rcu_reader).need_mb, 0); |
5481ddb3 | 165 | cmm_smp_mb(); |
09a9f986 | 166 | } |
8999a9ee | 167 | (void) poll(NULL, 0, 10); |
09a9f986 PM |
168 | } |
169 | #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */ | |
41718ff9 MD |
170 | } |
171 | ||
6abb4bd5 | 172 | static void mutex_unlock(pthread_mutex_t *mutex) |
41718ff9 MD |
173 | { |
174 | int ret; | |
175 | ||
6abb4bd5 | 176 | ret = pthread_mutex_unlock(mutex); |
4a6d7378 MD |
177 | if (ret) |
178 | urcu_die(ret); | |
41718ff9 MD |
179 | } |
180 | ||
fdf01eed | 181 | #ifdef RCU_MEMBARRIER |
a4922ed9 | 182 | static void smp_mb_master(void) |
fdf01eed | 183 | { |
4477a870 MD |
184 | if (caa_likely(urcu_memb_has_sys_membarrier)) { |
185 | if (membarrier(urcu_memb_has_sys_membarrier_private_expedited ? | |
c0bb9f69 MD |
186 | MEMBARRIER_CMD_PRIVATE_EXPEDITED : |
187 | MEMBARRIER_CMD_SHARED, 0)) | |
188 | urcu_die(errno); | |
189 | } else { | |
5481ddb3 | 190 | cmm_smp_mb(); |
c0bb9f69 | 191 | } |
fdf01eed MD |
192 | } |
193 | #endif | |
194 | ||
02be5561 | 195 | #ifdef RCU_MB |
a4922ed9 | 196 | static void smp_mb_master(void) |
40e140c9 | 197 | { |
5481ddb3 | 198 | cmm_smp_mb(); |
40e140c9 | 199 | } |
fdf01eed MD |
200 | #endif |
201 | ||
202 | #ifdef RCU_SIGNAL | |
78ff9419 | 203 | static void force_mb_all_readers(void) |
27b012e2 | 204 | { |
4477a870 | 205 | struct urcu_reader *index; |
e3b0cef0 | 206 | |
27b012e2 | 207 | /* |
5481ddb3 | 208 | * Ask for each threads to execute a cmm_smp_mb() so we can consider the |
27b012e2 MD |
209 | * compiler barriers around rcu read lock as real memory barriers. |
210 | */ | |
16aa9ee8 | 211 | if (cds_list_empty(®istry)) |
27b012e2 | 212 | return; |
3a86deba | 213 | /* |
5481ddb3 | 214 | * pthread_kill has a cmm_smp_mb(). But beware, we assume it performs |
157dca95 | 215 | * a cache flush on architectures with non-coherent cache. Let's play |
5481ddb3 | 216 | * safe and don't assume anything : we use cmm_smp_mc() to make sure the |
157dca95 | 217 | * cache flush is enforced. |
3a86deba | 218 | */ |
16aa9ee8 | 219 | cds_list_for_each_entry(index, ®istry, node) { |
6cf3827c | 220 | CMM_STORE_SHARED(index->need_mb, 1); |
02be5561 | 221 | pthread_kill(index->tid, SIGRCU); |
09a9f986 | 222 | } |
27b012e2 MD |
223 | /* |
224 | * Wait for sighandler (and thus mb()) to execute on every thread. | |
09a9f986 PM |
225 | * |
226 | * Note that the pthread_kill() will never be executed on systems | |
227 | * that correctly deliver signals in a timely manner. However, it | |
228 | * is not uncommon for kernels to have bugs that can result in | |
229 | * lost or unduly delayed signals. | |
230 | * | |
231 | * If you are seeing the below pthread_kill() executing much at | |
232 | * all, we suggest testing the underlying kernel and filing the | |
233 | * relevant bug report. For Linux kernels, we recommend getting | |
234 | * the Linux Test Project (LTP). | |
27b012e2 | 235 | */ |
16aa9ee8 | 236 | cds_list_for_each_entry(index, ®istry, node) { |
6cf3827c | 237 | while (CMM_LOAD_SHARED(index->need_mb)) { |
02be5561 | 238 | pthread_kill(index->tid, SIGRCU); |
8999a9ee | 239 | (void) poll(NULL, 0, 1); |
09a9f986 PM |
240 | } |
241 | } | |
5481ddb3 | 242 | cmm_smp_mb(); /* read ->need_mb before ending the barrier */ |
27b012e2 | 243 | } |
9d7e3f89 | 244 | |
a4922ed9 | 245 | static void smp_mb_master(void) |
9d7e3f89 MD |
246 | { |
247 | force_mb_all_readers(); | |
248 | } | |
fdf01eed | 249 | #endif /* #ifdef RCU_SIGNAL */ |
27b012e2 | 250 | |
bc6c15bb MD |
251 | /* |
252 | * synchronize_rcu() waiting. Single thread. | |
fca9fb96 MD |
253 | * Always called with rcu_registry lock held. Releases this lock and |
254 | * grabs it again. Holds the lock when it returns. | |
bc6c15bb | 255 | */ |
cfe78e25 | 256 | static void wait_gp(void) |
bc6c15bb | 257 | { |
fca9fb96 MD |
258 | /* |
259 | * Read reader_gp before read futex. smp_mb_master() needs to | |
260 | * be called with the rcu registry lock held in RCU_SIGNAL | |
261 | * flavor. | |
262 | */ | |
a4922ed9 | 263 | smp_mb_master(); |
fca9fb96 MD |
264 | /* Temporarily unlock the registry lock. */ |
265 | mutex_unlock(&rcu_registry_lock); | |
a18d0428 MD |
266 | while (uatomic_read(&rcu_gp.futex) == -1) { |
267 | if (!futex_async(&rcu_gp.futex, FUTEX_WAIT, -1, NULL, NULL, 0)) { | |
268 | /* | |
269 | * Prior queued wakeups queued by unrelated code | |
270 | * using the same address can cause futex wait to | |
271 | * return 0 even through the futex value is still | |
272 | * -1 (spurious wakeups). Check the value again | |
273 | * in user-space to validate whether it really | |
274 | * differs from -1. | |
275 | */ | |
276 | continue; | |
277 | } | |
b0a841b4 | 278 | switch (errno) { |
a18d0428 | 279 | case EAGAIN: |
b0a841b4 | 280 | /* Value already changed. */ |
fca9fb96 | 281 | goto end; |
b0a841b4 MD |
282 | case EINTR: |
283 | /* Retry if interrupted by signal. */ | |
a18d0428 | 284 | break; /* Get out of switch. Check again. */ |
b0a841b4 MD |
285 | default: |
286 | /* Unexpected error. */ | |
287 | urcu_die(errno); | |
288 | } | |
289 | } | |
fca9fb96 MD |
290 | end: |
291 | /* | |
292 | * Re-lock the registry lock before the next loop. | |
293 | */ | |
294 | mutex_lock(&rcu_registry_lock); | |
bc6c15bb MD |
295 | } |
296 | ||
731ccb96 MD |
297 | /* |
298 | * Always called with rcu_registry lock held. Releases this lock between | |
299 | * iterations and grabs it again. Holds the lock when it returns. | |
300 | */ | |
fd189fa5 MD |
301 | static void wait_for_readers(struct cds_list_head *input_readers, |
302 | struct cds_list_head *cur_snap_readers, | |
303 | struct cds_list_head *qsreaders) | |
27b012e2 | 304 | { |
9340c38d | 305 | unsigned int wait_loops = 0; |
4477a870 | 306 | struct urcu_reader *index, *tmp; |
9340c38d MD |
307 | #ifdef HAS_INCOHERENT_CACHES |
308 | unsigned int wait_gp_loops = 0; | |
309 | #endif /* HAS_INCOHERENT_CACHES */ | |
27b012e2 | 310 | |
40e140c9 | 311 | /* |
c9488684 MD |
312 | * Wait for each thread URCU_TLS(rcu_reader).ctr to either |
313 | * indicate quiescence (not nested), or observe the current | |
ed1b099e | 314 | * rcu_gp.ctr value. |
27b012e2 | 315 | */ |
cfe78e25 | 316 | for (;;) { |
5e81fed7 MD |
317 | if (wait_loops < RCU_QS_ACTIVE_ATTEMPTS) |
318 | wait_loops++; | |
9340c38d | 319 | if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) { |
ed1b099e | 320 | uatomic_dec(&rcu_gp.futex); |
cfe78e25 | 321 | /* Write futex before read reader_gp */ |
a4922ed9 | 322 | smp_mb_master(); |
cfe78e25 MD |
323 | } |
324 | ||
fd189fa5 | 325 | cds_list_for_each_entry_safe(index, tmp, input_readers, node) { |
4477a870 MD |
326 | switch (urcu_common_reader_state(&rcu_gp, &index->ctr)) { |
327 | case URCU_READER_ACTIVE_CURRENT: | |
fd189fa5 MD |
328 | if (cur_snap_readers) { |
329 | cds_list_move(&index->node, | |
330 | cur_snap_readers); | |
331 | break; | |
332 | } | |
333 | /* Fall-through */ | |
4477a870 | 334 | case URCU_READER_INACTIVE: |
fd189fa5 MD |
335 | cds_list_move(&index->node, qsreaders); |
336 | break; | |
4477a870 | 337 | case URCU_READER_ACTIVE_OLD: |
fd189fa5 MD |
338 | /* |
339 | * Old snapshot. Leaving node in | |
340 | * input_readers will make us busy-loop | |
341 | * until the snapshot becomes current or | |
342 | * the reader becomes inactive. | |
343 | */ | |
344 | break; | |
345 | } | |
cfe78e25 MD |
346 | } |
347 | ||
e8043c1b | 348 | #ifndef HAS_INCOHERENT_CACHES |
fd189fa5 | 349 | if (cds_list_empty(input_readers)) { |
9340c38d | 350 | if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) { |
cfe78e25 | 351 | /* Read reader_gp before write futex */ |
a4922ed9 | 352 | smp_mb_master(); |
ed1b099e | 353 | uatomic_set(&rcu_gp.futex, 0); |
bc6c15bb | 354 | } |
cfe78e25 MD |
355 | break; |
356 | } else { | |
fca9fb96 MD |
357 | if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) { |
358 | /* wait_gp unlocks/locks registry lock. */ | |
cfe78e25 | 359 | wait_gp(); |
fca9fb96 MD |
360 | } else { |
361 | /* Temporarily unlock the registry lock. */ | |
362 | mutex_unlock(&rcu_registry_lock); | |
06f22bdb | 363 | caa_cpu_relax(); |
fca9fb96 MD |
364 | /* |
365 | * Re-lock the registry lock before the | |
366 | * next loop. | |
367 | */ | |
368 | mutex_lock(&rcu_registry_lock); | |
369 | } | |
bc6c15bb | 370 | } |
e8043c1b | 371 | #else /* #ifndef HAS_INCOHERENT_CACHES */ |
27b012e2 | 372 | /* |
40e140c9 | 373 | * BUSY-LOOP. Force the reader thread to commit its |
bd252a04 MD |
374 | * URCU_TLS(rcu_reader).ctr update to memory if we wait |
375 | * for too long. | |
27b012e2 | 376 | */ |
fd189fa5 | 377 | if (cds_list_empty(input_readers)) { |
9340c38d | 378 | if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) { |
cfe78e25 | 379 | /* Read reader_gp before write futex */ |
a4922ed9 | 380 | smp_mb_master(); |
ed1b099e | 381 | uatomic_set(&rcu_gp.futex, 0); |
cfe78e25 MD |
382 | } |
383 | break; | |
384 | } else { | |
9340c38d | 385 | if (wait_gp_loops == KICK_READER_LOOPS) { |
a4922ed9 | 386 | smp_mb_master(); |
9340c38d MD |
387 | wait_gp_loops = 0; |
388 | } | |
389 | if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) { | |
fca9fb96 | 390 | /* wait_gp unlocks/locks registry lock. */ |
9340c38d MD |
391 | wait_gp(); |
392 | wait_gp_loops++; | |
393 | } else { | |
fca9fb96 MD |
394 | /* Temporarily unlock the registry lock. */ |
395 | mutex_unlock(&rcu_registry_lock); | |
06f22bdb | 396 | caa_cpu_relax(); |
fca9fb96 MD |
397 | /* |
398 | * Re-lock the registry lock before the | |
399 | * next loop. | |
400 | */ | |
401 | mutex_lock(&rcu_registry_lock); | |
40e140c9 MD |
402 | } |
403 | } | |
e8043c1b | 404 | #endif /* #else #ifndef HAS_INCOHERENT_CACHES */ |
27b012e2 | 405 | } |
27b012e2 MD |
406 | } |
407 | ||
9598a481 | 408 | void synchronize_rcu(void) |
2bc59bd7 | 409 | { |
fd189fa5 MD |
410 | CDS_LIST_HEAD(cur_snap_readers); |
411 | CDS_LIST_HEAD(qsreaders); | |
5bffdd5d MD |
412 | DEFINE_URCU_WAIT_NODE(wait, URCU_WAIT_WAITING); |
413 | struct urcu_waiters waiters; | |
414 | ||
415 | /* | |
416 | * Add ourself to gp_waiters queue of threads awaiting to wait | |
417 | * for a grace period. Proceed to perform the grace period only | |
418 | * if we are the first thread added into the queue. | |
419 | * The implicit memory barrier before urcu_wait_add() | |
420 | * orders prior memory accesses of threads put into the wait | |
421 | * queue before their insertion into the wait queue. | |
422 | */ | |
423 | if (urcu_wait_add(&gp_waiters, &wait) != 0) { | |
424 | /* Not first in queue: will be awakened by another thread. */ | |
425 | urcu_adaptative_busy_wait(&wait); | |
426 | /* Order following memory accesses after grace period. */ | |
427 | cmm_smp_mb(); | |
428 | return; | |
429 | } | |
430 | /* We won't need to wake ourself up */ | |
431 | urcu_wait_set_state(&wait, URCU_WAIT_RUNNING); | |
fd189fa5 | 432 | |
6abb4bd5 | 433 | mutex_lock(&rcu_gp_lock); |
135530fd | 434 | |
5bffdd5d MD |
435 | /* |
436 | * Move all waiters into our local queue. | |
437 | */ | |
438 | urcu_move_waiters(&waiters, &gp_waiters); | |
439 | ||
731ccb96 MD |
440 | mutex_lock(&rcu_registry_lock); |
441 | ||
16aa9ee8 | 442 | if (cds_list_empty(®istry)) |
2dfb8b5e MD |
443 | goto out; |
444 | ||
731ccb96 MD |
445 | /* |
446 | * All threads should read qparity before accessing data structure | |
447 | * where new ptr points to. Must be done within rcu_registry_lock | |
448 | * because it iterates on reader threads. | |
449 | */ | |
9598a481 | 450 | /* Write new ptr before changing the qparity */ |
a4922ed9 | 451 | smp_mb_master(); |
9598a481 | 452 | |
9598a481 | 453 | /* |
c9488684 | 454 | * Wait for readers to observe original parity or be quiescent. |
731ccb96 | 455 | * wait_for_readers() can release and grab again rcu_registry_lock |
f99c6e92 | 456 | * internally. |
9598a481 | 457 | */ |
fd189fa5 | 458 | wait_for_readers(®istry, &cur_snap_readers, &qsreaders); |
9598a481 MD |
459 | |
460 | /* | |
c9488684 | 461 | * Must finish waiting for quiescent state for original parity before |
ed1b099e | 462 | * committing next rcu_gp.ctr update to memory. Failure to do so could |
d40fde2c MD |
463 | * result in the writer waiting forever while new readers are always |
464 | * accessing data (no progress). Enforce compiler-order of load | |
ed1b099e | 465 | * URCU_TLS(rcu_reader).ctr before store to rcu_gp.ctr. |
9598a481 | 466 | */ |
5481ddb3 | 467 | cmm_barrier(); |
9598a481 | 468 | |
5dba80f9 | 469 | /* |
5481ddb3 | 470 | * Adding a cmm_smp_mb() which is _not_ formally required, but makes the |
5dba80f9 MD |
471 | * model easier to understand. It does not have a big performance impact |
472 | * anyway, given this is the write-side. | |
473 | */ | |
5481ddb3 | 474 | cmm_smp_mb(); |
67c2d80b | 475 | |
c9488684 | 476 | /* Switch parity: 0 -> 1, 1 -> 0 */ |
4477a870 | 477 | CMM_STORE_SHARED(rcu_gp.ctr, rcu_gp.ctr ^ URCU_GP_CTR_PHASE); |
c9488684 MD |
478 | |
479 | /* | |
ed1b099e | 480 | * Must commit rcu_gp.ctr update to memory before waiting for quiescent |
c9488684 MD |
481 | * state. Failure to do so could result in the writer waiting forever |
482 | * while new readers are always accessing data (no progress). Enforce | |
ed1b099e | 483 | * compiler-order of store to rcu_gp.ctr before load rcu_reader ctr. |
c9488684 MD |
484 | */ |
485 | cmm_barrier(); | |
486 | ||
487 | /* | |
488 | * | |
489 | * Adding a cmm_smp_mb() which is _not_ formally required, but makes the | |
490 | * model easier to understand. It does not have a big performance impact | |
491 | * anyway, given this is the write-side. | |
492 | */ | |
493 | cmm_smp_mb(); | |
494 | ||
9598a481 | 495 | /* |
c9488684 | 496 | * Wait for readers to observe new parity or be quiescent. |
731ccb96 | 497 | * wait_for_readers() can release and grab again rcu_registry_lock |
f99c6e92 | 498 | * internally. |
9598a481 | 499 | */ |
fd189fa5 MD |
500 | wait_for_readers(&cur_snap_readers, NULL, &qsreaders); |
501 | ||
502 | /* | |
503 | * Put quiescent reader list back into registry. | |
504 | */ | |
505 | cds_list_splice(&qsreaders, ®istry); | |
9598a481 | 506 | |
731ccb96 MD |
507 | /* |
508 | * Finish waiting for reader threads before letting the old ptr | |
509 | * being freed. Must be done within rcu_registry_lock because it | |
510 | * iterates on reader threads. | |
511 | */ | |
a4922ed9 | 512 | smp_mb_master(); |
2dfb8b5e | 513 | out: |
731ccb96 | 514 | mutex_unlock(&rcu_registry_lock); |
6abb4bd5 | 515 | mutex_unlock(&rcu_gp_lock); |
5bffdd5d MD |
516 | |
517 | /* | |
518 | * Wakeup waiters only after we have completed the grace period | |
519 | * and have ensured the memory barriers at the end of the grace | |
520 | * period have been issued. | |
521 | */ | |
522 | urcu_wake_all_waiters(&waiters); | |
2bc59bd7 PM |
523 | } |
524 | ||
121a5d44 MD |
525 | /* |
526 | * library wrappers to be used by non-LGPL compatible source code. | |
527 | */ | |
528 | ||
529 | void rcu_read_lock(void) | |
530 | { | |
531 | _rcu_read_lock(); | |
532 | } | |
533 | ||
534 | void rcu_read_unlock(void) | |
535 | { | |
536 | _rcu_read_unlock(); | |
537 | } | |
538 | ||
882f3357 MD |
539 | int rcu_read_ongoing(void) |
540 | { | |
541 | return _rcu_read_ongoing(); | |
542 | } | |
543 | ||
ea3a28a3 MD |
544 | #ifdef RCU_SIGNAL |
545 | /* | |
546 | * Make sure the signal used by the urcu-signal flavor is unblocked | |
547 | * while the thread is registered. | |
548 | */ | |
549 | static | |
550 | void urcu_signal_unblock(void) | |
551 | { | |
552 | sigset_t mask, oldmask; | |
553 | int ret; | |
554 | ||
555 | ret = sigemptyset(&mask); | |
556 | urcu_posix_assert(!ret); | |
557 | ret = sigaddset(&mask, SIGRCU); | |
558 | urcu_posix_assert(!ret); | |
559 | ret = pthread_sigmask(SIG_UNBLOCK, &mask, &oldmask); | |
560 | urcu_posix_assert(!ret); | |
561 | URCU_TLS(rcu_signal_was_blocked) = sigismember(&oldmask, SIGRCU); | |
562 | } | |
563 | ||
564 | static | |
565 | void urcu_signal_restore(void) | |
566 | { | |
567 | sigset_t mask; | |
568 | int ret; | |
569 | ||
570 | if (!URCU_TLS(rcu_signal_was_blocked)) | |
571 | return; | |
572 | ret = sigemptyset(&mask); | |
573 | urcu_posix_assert(!ret); | |
574 | ret = sigaddset(&mask, SIGRCU); | |
575 | urcu_posix_assert(!ret); | |
576 | ret = pthread_sigmask(SIG_BLOCK, &mask, NULL); | |
577 | urcu_posix_assert(!ret); | |
578 | } | |
579 | #else | |
580 | static | |
581 | void urcu_signal_unblock(void) { } | |
582 | static | |
583 | void urcu_signal_restore(void) { } | |
584 | #endif | |
585 | ||
121a5d44 | 586 | void rcu_register_thread(void) |
27b012e2 | 587 | { |
ea3a28a3 MD |
588 | urcu_signal_unblock(); |
589 | ||
bd252a04 | 590 | URCU_TLS(rcu_reader).tid = pthread_self(); |
01477510 FD |
591 | urcu_posix_assert(URCU_TLS(rcu_reader).need_mb == 0); |
592 | urcu_posix_assert(!(URCU_TLS(rcu_reader).ctr & URCU_GP_CTR_NEST_MASK)); | |
02be5561 | 593 | |
731ccb96 | 594 | mutex_lock(&rcu_registry_lock); |
01477510 | 595 | urcu_posix_assert(!URCU_TLS(rcu_reader).registered); |
a77f7d82 | 596 | URCU_TLS(rcu_reader).registered = 1; |
02be5561 | 597 | rcu_init(); /* In case gcc does not support constructor attribute */ |
bd252a04 | 598 | cds_list_add(&URCU_TLS(rcu_reader).node, ®istry); |
731ccb96 | 599 | mutex_unlock(&rcu_registry_lock); |
27b012e2 MD |
600 | } |
601 | ||
121a5d44 | 602 | void rcu_unregister_thread(void) |
27b012e2 | 603 | { |
731ccb96 | 604 | mutex_lock(&rcu_registry_lock); |
01477510 | 605 | urcu_posix_assert(URCU_TLS(rcu_reader).registered); |
a77f7d82 | 606 | URCU_TLS(rcu_reader).registered = 0; |
bd252a04 | 607 | cds_list_del(&URCU_TLS(rcu_reader).node); |
731ccb96 | 608 | mutex_unlock(&rcu_registry_lock); |
ea3a28a3 MD |
609 | |
610 | urcu_signal_restore(); | |
27b012e2 MD |
611 | } |
612 | ||
fdf01eed | 613 | #ifdef RCU_MEMBARRIER |
d8d9a340 MD |
614 | |
615 | #ifdef CONFIG_RCU_FORCE_SYS_MEMBARRIER | |
616 | static | |
c0bb9f69 | 617 | void rcu_sys_membarrier_status(bool available) |
d8d9a340 MD |
618 | { |
619 | if (!available) | |
620 | abort(); | |
621 | } | |
622 | #else | |
623 | static | |
c0bb9f69 | 624 | void rcu_sys_membarrier_status(bool available) |
d8d9a340 | 625 | { |
c0bb9f69 MD |
626 | if (!available) |
627 | return; | |
4477a870 | 628 | urcu_memb_has_sys_membarrier = 1; |
d8d9a340 MD |
629 | } |
630 | #endif | |
631 | ||
c0bb9f69 MD |
632 | static |
633 | void rcu_sys_membarrier_init(void) | |
fdf01eed | 634 | { |
c0bb9f69 MD |
635 | bool available = false; |
636 | int mask; | |
637 | ||
638 | mask = membarrier(MEMBARRIER_CMD_QUERY, 0); | |
639 | if (mask >= 0) { | |
640 | if (mask & MEMBARRIER_CMD_PRIVATE_EXPEDITED) { | |
641 | if (membarrier(MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED, 0)) | |
642 | urcu_die(errno); | |
4477a870 | 643 | urcu_memb_has_sys_membarrier_private_expedited = 1; |
c0bb9f69 MD |
644 | available = true; |
645 | } else if (mask & MEMBARRIER_CMD_SHARED) { | |
646 | available = true; | |
647 | } | |
648 | } | |
649 | rcu_sys_membarrier_status(available); | |
650 | } | |
64f469e6 | 651 | |
c0bb9f69 MD |
652 | void rcu_init(void) |
653 | { | |
fdf01eed MD |
654 | if (init_done) |
655 | return; | |
656 | init_done = 1; | |
c0bb9f69 | 657 | rcu_sys_membarrier_init(); |
fdf01eed MD |
658 | } |
659 | #endif | |
660 | ||
661 | #ifdef RCU_SIGNAL | |
70469b43 MJ |
662 | static void sigrcu_handler(int signo __attribute__((unused)), |
663 | siginfo_t *siginfo __attribute__((unused)), | |
664 | void *context __attribute__((unused))) | |
27b012e2 | 665 | { |
40e140c9 | 666 | /* |
5481ddb3 DG |
667 | * Executing this cmm_smp_mb() is the only purpose of this signal handler. |
668 | * It punctually promotes cmm_barrier() into cmm_smp_mb() on every thread it is | |
40e140c9 MD |
669 | * executed on. |
670 | */ | |
5481ddb3 | 671 | cmm_smp_mb(); |
bd252a04 | 672 | _CMM_STORE_SHARED(URCU_TLS(rcu_reader).need_mb, 0); |
5481ddb3 | 673 | cmm_smp_mb(); |
27b012e2 MD |
674 | } |
675 | ||
8a5fb4c9 | 676 | /* |
02be5561 | 677 | * rcu_init constructor. Called when the library is linked, but also when |
8a5fb4c9 MD |
678 | * reader threads are calling rcu_register_thread(). |
679 | * Should only be called by a single thread at a given time. This is ensured by | |
731ccb96 MD |
680 | * holing the rcu_registry_lock from rcu_register_thread() or by running |
681 | * at library load time, which should not be executed by multiple | |
682 | * threads nor concurrently with rcu_register_thread() anyway. | |
8a5fb4c9 | 683 | */ |
02be5561 | 684 | void rcu_init(void) |
27b012e2 MD |
685 | { |
686 | struct sigaction act; | |
687 | int ret; | |
688 | ||
8a5fb4c9 MD |
689 | if (init_done) |
690 | return; | |
691 | init_done = 1; | |
692 | ||
02be5561 | 693 | act.sa_sigaction = sigrcu_handler; |
dd052bd3 | 694 | act.sa_flags = SA_SIGINFO | SA_RESTART; |
c297c21c | 695 | sigemptyset(&act.sa_mask); |
02be5561 | 696 | ret = sigaction(SIGRCU, &act, NULL); |
4a6d7378 MD |
697 | if (ret) |
698 | urcu_die(errno); | |
27b012e2 MD |
699 | } |
700 | ||
90f72b8c MD |
701 | /* |
702 | * Don't unregister the SIGRCU signal handler anymore, because | |
703 | * call_rcu threads could still be using it shortly before the | |
704 | * application exits. | |
705 | * Assertion disabled because call_rcu threads are now rcu | |
706 | * readers, and left running at exit. | |
707 | * urcu_posix_assert(cds_list_empty(®istry)); | |
708 | */ | |
709 | ||
710 | #endif /* #ifdef RCU_SIGNAL */ | |
711 | ||
02be5561 | 712 | void rcu_exit(void) |
27b012e2 | 713 | { |
90f72b8c | 714 | urcu_call_rcu_exit(); |
27b012e2 | 715 | } |
5e77fc1f | 716 | |
5e6b23a6 | 717 | DEFINE_RCU_FLAVOR(rcu_flavor); |
541d828d | 718 | |
5e77fc1f | 719 | #include "urcu-call-rcu-impl.h" |
0376e7b2 | 720 | #include "urcu-defer-impl.h" |
111bda8f | 721 | #include "urcu-poll-impl.h" |