Commit | Line | Data |
---|---|---|
10544ee8 | 1 | /* |
c0c0989a | 2 | * SPDX-License-Identifier: LGPL-2.1-or-later |
10544ee8 MD |
3 | * |
4 | * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
5 | * Copyright (c) 2009 Paul E. McKenney, IBM Corporation. | |
6 | * | |
c0c0989a | 7 | * Userspace RCU library for LTTng-UST, derived from liburcu "bulletproof" version. |
10544ee8 MD |
8 | */ |
9 | ||
10 | #define _LGPL_SOURCE | |
11 | #include <stdio.h> | |
12 | #include <pthread.h> | |
13 | #include <signal.h> | |
14 | #include <assert.h> | |
15 | #include <stdlib.h> | |
16 | #include <string.h> | |
17 | #include <errno.h> | |
18 | #include <poll.h> | |
19 | #include <unistd.h> | |
20 | #include <stdbool.h> | |
21 | #include <sys/mman.h> | |
22 | ||
23 | #include <urcu/arch.h> | |
24 | #include <urcu/wfcqueue.h> | |
25 | #include <lttng/urcu/static/urcu-ust.h> | |
26 | #include <lttng/urcu/pointer.h> | |
27 | #include <urcu/tls-compat.h> | |
28 | ||
29 | /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */ | |
30 | #undef _LGPL_SOURCE | |
31 | #include <lttng/urcu/urcu-ust.h> | |
32 | #define _LGPL_SOURCE | |
33 | ||
34 | #ifndef MAP_ANONYMOUS | |
35 | #define MAP_ANONYMOUS MAP_ANON | |
36 | #endif | |
37 | ||
38 | #ifdef __linux__ | |
39 | static | |
40 | void *mremap_wrapper(void *old_address, size_t old_size, | |
41 | size_t new_size, int flags) | |
42 | { | |
43 | return mremap(old_address, old_size, new_size, flags); | |
44 | } | |
45 | #else | |
46 | ||
47 | #define MREMAP_MAYMOVE 1 | |
48 | #define MREMAP_FIXED 2 | |
49 | ||
50 | /* | |
51 | * mremap wrapper for non-Linux systems not allowing MAYMOVE. | |
52 | * This is not generic. | |
53 | */ | |
54 | static | |
e2a195a6 MJ |
55 | void *mremap_wrapper(void *old_address __attribute__((unused)), |
56 | size_t old_size __attribute__((unused)), | |
57 | size_t new_size __attribute__((unused)), | |
58 | int flags) | |
10544ee8 MD |
59 | { |
60 | assert(!(flags & MREMAP_MAYMOVE)); | |
61 | ||
62 | return MAP_FAILED; | |
63 | } | |
64 | #endif | |
65 | ||
66 | /* Sleep delay in ms */ | |
67 | #define RCU_SLEEP_DELAY_MS 10 | |
d1a0fad8 | 68 | #define INIT_READER_COUNT 8 |
10544ee8 MD |
69 | |
70 | /* | |
71 | * Active attempts to check for reader Q.S. before calling sleep(). | |
72 | */ | |
73 | #define RCU_QS_ACTIVE_ATTEMPTS 100 | |
74 | ||
75 | static | |
76 | int lttng_ust_urcu_refcount; | |
77 | ||
78 | /* If the headers do not support membarrier system call, fall back smp_mb. */ | |
79 | #ifdef __NR_membarrier | |
80 | # define membarrier(...) syscall(__NR_membarrier, __VA_ARGS__) | |
81 | #else | |
82 | # define membarrier(...) -ENOSYS | |
83 | #endif | |
84 | ||
85 | enum membarrier_cmd { | |
86 | MEMBARRIER_CMD_QUERY = 0, | |
87 | MEMBARRIER_CMD_SHARED = (1 << 0), | |
88 | /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */ | |
89 | /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */ | |
90 | MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3), | |
91 | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4), | |
92 | }; | |
93 | ||
94 | static | |
465a0d04 MJ |
95 | void _lttng_ust_urcu_init(void) |
96 | __attribute__((constructor)); | |
10544ee8 | 97 | static |
c589eca2 MJ |
98 | void lttng_ust_urcu_exit(void) |
99 | __attribute__((destructor)); | |
10544ee8 MD |
100 | |
101 | #ifndef CONFIG_RCU_FORCE_SYS_MEMBARRIER | |
102 | int lttng_ust_urcu_has_sys_membarrier; | |
103 | #endif | |
104 | ||
105 | /* | |
106 | * rcu_gp_lock ensures mutual exclusion between threads calling | |
107 | * synchronize_rcu(). | |
108 | */ | |
109 | static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER; | |
110 | /* | |
111 | * rcu_registry_lock ensures mutual exclusion between threads | |
112 | * registering and unregistering themselves to/from the registry, and | |
113 | * with threads reading that registry from synchronize_rcu(). However, | |
114 | * this lock is not held all the way through the completion of awaiting | |
115 | * for the grace period. It is sporadically released between iterations | |
116 | * on the registry. | |
117 | * rcu_registry_lock may nest inside rcu_gp_lock. | |
118 | */ | |
119 | static pthread_mutex_t rcu_registry_lock = PTHREAD_MUTEX_INITIALIZER; | |
120 | ||
121 | static pthread_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER; | |
122 | static int initialized; | |
123 | ||
124 | static pthread_key_t lttng_ust_urcu_key; | |
125 | ||
126 | struct lttng_ust_urcu_gp lttng_ust_urcu_gp = { .ctr = LTTNG_UST_URCU_GP_COUNT }; | |
127 | ||
128 | /* | |
129 | * Pointer to registry elements. Written to only by each individual reader. Read | |
130 | * by both the reader and the writers. | |
131 | */ | |
132 | DEFINE_URCU_TLS(struct lttng_ust_urcu_reader *, lttng_ust_urcu_reader); | |
133 | ||
134 | static CDS_LIST_HEAD(registry); | |
135 | ||
136 | struct registry_chunk { | |
d1a0fad8 JG |
137 | size_t capacity; /* capacity of this chunk (in elements) */ |
138 | size_t used; /* count of elements used */ | |
10544ee8 | 139 | struct cds_list_head node; /* chunk_list node */ |
d1a0fad8 | 140 | struct lttng_ust_urcu_reader readers[]; |
10544ee8 MD |
141 | }; |
142 | ||
143 | struct registry_arena { | |
144 | struct cds_list_head chunk_list; | |
145 | }; | |
146 | ||
147 | static struct registry_arena registry_arena = { | |
148 | .chunk_list = CDS_LIST_HEAD_INIT(registry_arena.chunk_list), | |
149 | }; | |
150 | ||
151 | /* Saved fork signal mask, protected by rcu_gp_lock */ | |
152 | static sigset_t saved_fork_signal_mask; | |
153 | ||
154 | static void mutex_lock(pthread_mutex_t *mutex) | |
155 | { | |
156 | int ret; | |
157 | ||
158 | #ifndef DISTRUST_SIGNALS_EXTREME | |
159 | ret = pthread_mutex_lock(mutex); | |
160 | if (ret) | |
161 | abort(); | |
162 | #else /* #ifndef DISTRUST_SIGNALS_EXTREME */ | |
163 | while ((ret = pthread_mutex_trylock(mutex)) != 0) { | |
164 | if (ret != EBUSY && ret != EINTR) | |
165 | abort(); | |
166 | poll(NULL,0,10); | |
167 | } | |
168 | #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */ | |
169 | } | |
170 | ||
171 | static void mutex_unlock(pthread_mutex_t *mutex) | |
172 | { | |
173 | int ret; | |
174 | ||
175 | ret = pthread_mutex_unlock(mutex); | |
176 | if (ret) | |
177 | abort(); | |
178 | } | |
179 | ||
180 | static void smp_mb_master(void) | |
181 | { | |
182 | if (caa_likely(lttng_ust_urcu_has_sys_membarrier)) { | |
183 | if (membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED, 0)) | |
184 | abort(); | |
185 | } else { | |
186 | cmm_smp_mb(); | |
187 | } | |
188 | } | |
189 | ||
d1a0fad8 JG |
190 | /* Get the size of a chunk's allocation from its capacity (an element count). */ |
191 | static size_t chunk_allocation_size(size_t capacity) | |
192 | { | |
193 | return (capacity * sizeof(struct lttng_ust_urcu_reader)) + | |
194 | sizeof(struct registry_chunk); | |
195 | } | |
196 | ||
10544ee8 MD |
197 | /* |
198 | * Always called with rcu_registry lock held. Releases this lock between | |
199 | * iterations and grabs it again. Holds the lock when it returns. | |
200 | */ | |
201 | static void wait_for_readers(struct cds_list_head *input_readers, | |
202 | struct cds_list_head *cur_snap_readers, | |
203 | struct cds_list_head *qsreaders) | |
204 | { | |
205 | unsigned int wait_loops = 0; | |
206 | struct lttng_ust_urcu_reader *index, *tmp; | |
207 | ||
208 | /* | |
209 | * Wait for each thread URCU_TLS(lttng_ust_urcu_reader).ctr to either | |
210 | * indicate quiescence (not nested), or observe the current | |
211 | * rcu_gp.ctr value. | |
212 | */ | |
213 | for (;;) { | |
214 | if (wait_loops < RCU_QS_ACTIVE_ATTEMPTS) | |
215 | wait_loops++; | |
216 | ||
217 | cds_list_for_each_entry_safe(index, tmp, input_readers, node) { | |
218 | switch (lttng_ust_urcu_reader_state(&index->ctr)) { | |
219 | case LTTNG_UST_URCU_READER_ACTIVE_CURRENT: | |
220 | if (cur_snap_readers) { | |
221 | cds_list_move(&index->node, | |
222 | cur_snap_readers); | |
223 | break; | |
224 | } | |
225 | /* Fall-through */ | |
226 | case LTTNG_UST_URCU_READER_INACTIVE: | |
227 | cds_list_move(&index->node, qsreaders); | |
228 | break; | |
229 | case LTTNG_UST_URCU_READER_ACTIVE_OLD: | |
230 | /* | |
231 | * Old snapshot. Leaving node in | |
232 | * input_readers will make us busy-loop | |
233 | * until the snapshot becomes current or | |
234 | * the reader becomes inactive. | |
235 | */ | |
236 | break; | |
237 | } | |
238 | } | |
239 | ||
240 | if (cds_list_empty(input_readers)) { | |
241 | break; | |
242 | } else { | |
243 | /* Temporarily unlock the registry lock. */ | |
244 | mutex_unlock(&rcu_registry_lock); | |
245 | if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) | |
246 | (void) poll(NULL, 0, RCU_SLEEP_DELAY_MS); | |
247 | else | |
248 | caa_cpu_relax(); | |
249 | /* Re-lock the registry lock before the next loop. */ | |
250 | mutex_lock(&rcu_registry_lock); | |
251 | } | |
252 | } | |
253 | } | |
254 | ||
255 | void lttng_ust_urcu_synchronize_rcu(void) | |
256 | { | |
257 | CDS_LIST_HEAD(cur_snap_readers); | |
258 | CDS_LIST_HEAD(qsreaders); | |
259 | sigset_t newmask, oldmask; | |
260 | int ret; | |
261 | ||
262 | ret = sigfillset(&newmask); | |
263 | assert(!ret); | |
264 | ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask); | |
265 | assert(!ret); | |
266 | ||
267 | mutex_lock(&rcu_gp_lock); | |
268 | ||
269 | mutex_lock(&rcu_registry_lock); | |
270 | ||
271 | if (cds_list_empty(®istry)) | |
272 | goto out; | |
273 | ||
274 | /* All threads should read qparity before accessing data structure | |
275 | * where new ptr points to. */ | |
276 | /* Write new ptr before changing the qparity */ | |
277 | smp_mb_master(); | |
278 | ||
279 | /* | |
280 | * Wait for readers to observe original parity or be quiescent. | |
281 | * wait_for_readers() can release and grab again rcu_registry_lock | |
2fbda51c | 282 | * internally. |
10544ee8 MD |
283 | */ |
284 | wait_for_readers(®istry, &cur_snap_readers, &qsreaders); | |
285 | ||
286 | /* | |
287 | * Adding a cmm_smp_mb() which is _not_ formally required, but makes the | |
288 | * model easier to understand. It does not have a big performance impact | |
289 | * anyway, given this is the write-side. | |
290 | */ | |
291 | cmm_smp_mb(); | |
292 | ||
293 | /* Switch parity: 0 -> 1, 1 -> 0 */ | |
294 | CMM_STORE_SHARED(lttng_ust_urcu_gp.ctr, lttng_ust_urcu_gp.ctr ^ LTTNG_UST_URCU_GP_CTR_PHASE); | |
295 | ||
296 | /* | |
297 | * Must commit qparity update to memory before waiting for other parity | |
298 | * quiescent state. Failure to do so could result in the writer waiting | |
299 | * forever while new readers are always accessing data (no progress). | |
300 | * Ensured by CMM_STORE_SHARED and CMM_LOAD_SHARED. | |
301 | */ | |
302 | ||
303 | /* | |
304 | * Adding a cmm_smp_mb() which is _not_ formally required, but makes the | |
305 | * model easier to understand. It does not have a big performance impact | |
306 | * anyway, given this is the write-side. | |
307 | */ | |
308 | cmm_smp_mb(); | |
309 | ||
310 | /* | |
311 | * Wait for readers to observe new parity or be quiescent. | |
312 | * wait_for_readers() can release and grab again rcu_registry_lock | |
2fbda51c | 313 | * internally. |
10544ee8 MD |
314 | */ |
315 | wait_for_readers(&cur_snap_readers, NULL, &qsreaders); | |
316 | ||
317 | /* | |
318 | * Put quiescent reader list back into registry. | |
319 | */ | |
320 | cds_list_splice(&qsreaders, ®istry); | |
321 | ||
322 | /* | |
323 | * Finish waiting for reader threads before letting the old ptr being | |
324 | * freed. | |
325 | */ | |
326 | smp_mb_master(); | |
327 | out: | |
328 | mutex_unlock(&rcu_registry_lock); | |
329 | mutex_unlock(&rcu_gp_lock); | |
330 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); | |
331 | assert(!ret); | |
332 | } | |
333 | ||
334 | /* | |
335 | * library wrappers to be used by non-LGPL compatible source code. | |
336 | */ | |
337 | ||
338 | void lttng_ust_urcu_read_lock(void) | |
339 | { | |
340 | _lttng_ust_urcu_read_lock(); | |
341 | } | |
342 | ||
343 | void lttng_ust_urcu_read_unlock(void) | |
344 | { | |
345 | _lttng_ust_urcu_read_unlock(); | |
346 | } | |
347 | ||
348 | int lttng_ust_urcu_read_ongoing(void) | |
349 | { | |
350 | return _lttng_ust_urcu_read_ongoing(); | |
351 | } | |
352 | ||
353 | /* | |
354 | * Only grow for now. If empty, allocate a ARENA_INIT_ALLOC sized chunk. | |
355 | * Else, try expanding the last chunk. If this fails, allocate a new | |
356 | * chunk twice as big as the last chunk. | |
357 | * Memory used by chunks _never_ moves. A chunk could theoretically be | |
358 | * freed when all "used" slots are released, but we don't do it at this | |
359 | * point. | |
360 | */ | |
361 | static | |
362 | void expand_arena(struct registry_arena *arena) | |
363 | { | |
364 | struct registry_chunk *new_chunk, *last_chunk; | |
d1a0fad8 | 365 | size_t old_chunk_size_bytes, new_chunk_size_bytes, new_capacity; |
10544ee8 MD |
366 | |
367 | /* No chunk. */ | |
368 | if (cds_list_empty(&arena->chunk_list)) { | |
d1a0fad8 | 369 | new_chunk_size_bytes = chunk_allocation_size(INIT_READER_COUNT); |
10544ee8 | 370 | new_chunk = (struct registry_chunk *) mmap(NULL, |
d1a0fad8 | 371 | new_chunk_size_bytes, |
10544ee8 MD |
372 | PROT_READ | PROT_WRITE, |
373 | MAP_ANONYMOUS | MAP_PRIVATE, | |
374 | -1, 0); | |
375 | if (new_chunk == MAP_FAILED) | |
376 | abort(); | |
d1a0fad8 JG |
377 | memset(new_chunk, 0, new_chunk_size_bytes); |
378 | new_chunk->capacity = INIT_READER_COUNT; | |
10544ee8 MD |
379 | cds_list_add_tail(&new_chunk->node, &arena->chunk_list); |
380 | return; /* We're done. */ | |
381 | } | |
382 | ||
383 | /* Try expanding last chunk. */ | |
384 | last_chunk = cds_list_entry(arena->chunk_list.prev, | |
385 | struct registry_chunk, node); | |
d1a0fad8 JG |
386 | old_chunk_size_bytes = chunk_allocation_size(last_chunk->capacity); |
387 | new_capacity = last_chunk->capacity << 1; | |
388 | new_chunk_size_bytes = chunk_allocation_size(new_capacity); | |
10544ee8 | 389 | |
d1a0fad8 JG |
390 | /* Don't allow memory mapping to move, just expand. */ |
391 | new_chunk = mremap_wrapper(last_chunk, old_chunk_size_bytes, | |
392 | new_chunk_size_bytes, 0); | |
10544ee8 MD |
393 | if (new_chunk != MAP_FAILED) { |
394 | /* Should not have moved. */ | |
395 | assert(new_chunk == last_chunk); | |
d1a0fad8 JG |
396 | memset((char *) last_chunk + old_chunk_size_bytes, 0, |
397 | new_chunk_size_bytes - old_chunk_size_bytes); | |
398 | last_chunk->capacity = new_capacity; | |
10544ee8 MD |
399 | return; /* We're done. */ |
400 | } | |
401 | ||
402 | /* Remap did not succeed, we need to add a new chunk. */ | |
403 | new_chunk = (struct registry_chunk *) mmap(NULL, | |
d1a0fad8 | 404 | new_chunk_size_bytes, |
10544ee8 MD |
405 | PROT_READ | PROT_WRITE, |
406 | MAP_ANONYMOUS | MAP_PRIVATE, | |
407 | -1, 0); | |
408 | if (new_chunk == MAP_FAILED) | |
409 | abort(); | |
d1a0fad8 JG |
410 | memset(new_chunk, 0, new_chunk_size_bytes); |
411 | new_chunk->capacity = new_capacity; | |
10544ee8 MD |
412 | cds_list_add_tail(&new_chunk->node, &arena->chunk_list); |
413 | } | |
414 | ||
415 | static | |
416 | struct lttng_ust_urcu_reader *arena_alloc(struct registry_arena *arena) | |
417 | { | |
418 | struct registry_chunk *chunk; | |
10544ee8 | 419 | int expand_done = 0; /* Only allow to expand once per alloc */ |
10544ee8 MD |
420 | |
421 | retry: | |
422 | cds_list_for_each_entry(chunk, &arena->chunk_list, node) { | |
d1a0fad8 JG |
423 | size_t spot_idx; |
424 | ||
425 | /* Skip fully used chunks. */ | |
426 | if (chunk->used == chunk->capacity) { | |
10544ee8 | 427 | continue; |
d1a0fad8 JG |
428 | } |
429 | ||
430 | /* Find a spot. */ | |
431 | for (spot_idx = 0; spot_idx < chunk->capacity; spot_idx++) { | |
432 | if (!chunk->readers[spot_idx].alloc) { | |
433 | chunk->readers[spot_idx].alloc = 1; | |
434 | chunk->used++; | |
435 | return &chunk->readers[spot_idx]; | |
10544ee8 MD |
436 | } |
437 | } | |
438 | } | |
439 | ||
440 | if (!expand_done) { | |
441 | expand_arena(arena); | |
442 | expand_done = 1; | |
443 | goto retry; | |
444 | } | |
445 | ||
446 | return NULL; | |
447 | } | |
448 | ||
449 | /* Called with signals off and mutex locked */ | |
450 | static | |
451 | void add_thread(void) | |
452 | { | |
453 | struct lttng_ust_urcu_reader *rcu_reader_reg; | |
454 | int ret; | |
455 | ||
456 | rcu_reader_reg = arena_alloc(®istry_arena); | |
457 | if (!rcu_reader_reg) | |
458 | abort(); | |
459 | ret = pthread_setspecific(lttng_ust_urcu_key, rcu_reader_reg); | |
460 | if (ret) | |
461 | abort(); | |
462 | ||
463 | /* Add to registry */ | |
464 | rcu_reader_reg->tid = pthread_self(); | |
465 | assert(rcu_reader_reg->ctr == 0); | |
466 | cds_list_add(&rcu_reader_reg->node, ®istry); | |
467 | /* | |
468 | * Reader threads are pointing to the reader registry. This is | |
469 | * why its memory should never be relocated. | |
470 | */ | |
471 | URCU_TLS(lttng_ust_urcu_reader) = rcu_reader_reg; | |
472 | } | |
473 | ||
474 | /* Called with mutex locked */ | |
475 | static | |
476 | void cleanup_thread(struct registry_chunk *chunk, | |
477 | struct lttng_ust_urcu_reader *rcu_reader_reg) | |
478 | { | |
479 | rcu_reader_reg->ctr = 0; | |
480 | cds_list_del(&rcu_reader_reg->node); | |
481 | rcu_reader_reg->tid = 0; | |
482 | rcu_reader_reg->alloc = 0; | |
d1a0fad8 | 483 | chunk->used--; |
10544ee8 MD |
484 | } |
485 | ||
486 | static | |
487 | struct registry_chunk *find_chunk(struct lttng_ust_urcu_reader *rcu_reader_reg) | |
488 | { | |
489 | struct registry_chunk *chunk; | |
490 | ||
491 | cds_list_for_each_entry(chunk, ®istry_arena.chunk_list, node) { | |
d1a0fad8 | 492 | if (rcu_reader_reg < (struct lttng_ust_urcu_reader *) &chunk->readers[0]) |
10544ee8 | 493 | continue; |
d1a0fad8 | 494 | if (rcu_reader_reg >= (struct lttng_ust_urcu_reader *) &chunk->readers[chunk->capacity]) |
10544ee8 MD |
495 | continue; |
496 | return chunk; | |
497 | } | |
498 | return NULL; | |
499 | } | |
500 | ||
501 | /* Called with signals off and mutex locked */ | |
502 | static | |
503 | void remove_thread(struct lttng_ust_urcu_reader *rcu_reader_reg) | |
504 | { | |
505 | cleanup_thread(find_chunk(rcu_reader_reg), rcu_reader_reg); | |
506 | URCU_TLS(lttng_ust_urcu_reader) = NULL; | |
507 | } | |
508 | ||
509 | /* Disable signals, take mutex, add to registry */ | |
510 | void lttng_ust_urcu_register(void) | |
511 | { | |
512 | sigset_t newmask, oldmask; | |
513 | int ret; | |
514 | ||
515 | ret = sigfillset(&newmask); | |
516 | if (ret) | |
517 | abort(); | |
518 | ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask); | |
519 | if (ret) | |
520 | abort(); | |
521 | ||
522 | /* | |
523 | * Check if a signal concurrently registered our thread since | |
524 | * the check in rcu_read_lock(). | |
525 | */ | |
526 | if (URCU_TLS(lttng_ust_urcu_reader)) | |
527 | goto end; | |
528 | ||
529 | /* | |
530 | * Take care of early registration before lttng_ust_urcu constructor. | |
531 | */ | |
532 | _lttng_ust_urcu_init(); | |
533 | ||
534 | mutex_lock(&rcu_registry_lock); | |
535 | add_thread(); | |
536 | mutex_unlock(&rcu_registry_lock); | |
537 | end: | |
538 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); | |
539 | if (ret) | |
540 | abort(); | |
541 | } | |
542 | ||
543 | void lttng_ust_urcu_register_thread(void) | |
544 | { | |
545 | if (caa_unlikely(!URCU_TLS(lttng_ust_urcu_reader))) | |
546 | lttng_ust_urcu_register(); /* If not yet registered. */ | |
547 | } | |
548 | ||
549 | /* Disable signals, take mutex, remove from registry */ | |
550 | static | |
551 | void lttng_ust_urcu_unregister(struct lttng_ust_urcu_reader *rcu_reader_reg) | |
552 | { | |
553 | sigset_t newmask, oldmask; | |
554 | int ret; | |
555 | ||
556 | ret = sigfillset(&newmask); | |
557 | if (ret) | |
558 | abort(); | |
559 | ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask); | |
560 | if (ret) | |
561 | abort(); | |
562 | ||
563 | mutex_lock(&rcu_registry_lock); | |
564 | remove_thread(rcu_reader_reg); | |
565 | mutex_unlock(&rcu_registry_lock); | |
566 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); | |
567 | if (ret) | |
568 | abort(); | |
569 | lttng_ust_urcu_exit(); | |
570 | } | |
571 | ||
572 | /* | |
573 | * Remove thread from the registry when it exits, and flag it as | |
574 | * destroyed so garbage collection can take care of it. | |
575 | */ | |
576 | static | |
577 | void lttng_ust_urcu_thread_exit_notifier(void *rcu_key) | |
578 | { | |
579 | lttng_ust_urcu_unregister(rcu_key); | |
580 | } | |
581 | ||
582 | #ifdef CONFIG_RCU_FORCE_SYS_MEMBARRIER | |
583 | static | |
584 | void lttng_ust_urcu_sys_membarrier_status(bool available) | |
585 | { | |
586 | if (!available) | |
587 | abort(); | |
588 | } | |
589 | #else | |
590 | static | |
591 | void lttng_ust_urcu_sys_membarrier_status(bool available) | |
592 | { | |
593 | if (!available) | |
594 | return; | |
595 | lttng_ust_urcu_has_sys_membarrier = 1; | |
596 | } | |
597 | #endif | |
598 | ||
599 | static | |
600 | void lttng_ust_urcu_sys_membarrier_init(void) | |
601 | { | |
602 | bool available = false; | |
603 | int mask; | |
604 | ||
605 | mask = membarrier(MEMBARRIER_CMD_QUERY, 0); | |
606 | if (mask >= 0) { | |
607 | if (mask & MEMBARRIER_CMD_PRIVATE_EXPEDITED) { | |
608 | if (membarrier(MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED, 0)) | |
609 | abort(); | |
610 | available = true; | |
611 | } | |
612 | } | |
613 | lttng_ust_urcu_sys_membarrier_status(available); | |
614 | } | |
615 | ||
616 | static | |
617 | void _lttng_ust_urcu_init(void) | |
618 | { | |
619 | mutex_lock(&init_lock); | |
620 | if (!lttng_ust_urcu_refcount++) { | |
621 | int ret; | |
622 | ||
623 | ret = pthread_key_create(<tng_ust_urcu_key, | |
624 | lttng_ust_urcu_thread_exit_notifier); | |
625 | if (ret) | |
626 | abort(); | |
627 | lttng_ust_urcu_sys_membarrier_init(); | |
628 | initialized = 1; | |
629 | } | |
630 | mutex_unlock(&init_lock); | |
631 | } | |
632 | ||
633 | static | |
634 | void lttng_ust_urcu_exit(void) | |
635 | { | |
636 | mutex_lock(&init_lock); | |
637 | if (!--lttng_ust_urcu_refcount) { | |
638 | struct registry_chunk *chunk, *tmp; | |
639 | int ret; | |
640 | ||
641 | cds_list_for_each_entry_safe(chunk, tmp, | |
642 | ®istry_arena.chunk_list, node) { | |
d1a0fad8 | 643 | munmap((void *) chunk, chunk_allocation_size(chunk->capacity)); |
10544ee8 MD |
644 | } |
645 | CDS_INIT_LIST_HEAD(®istry_arena.chunk_list); | |
646 | ret = pthread_key_delete(lttng_ust_urcu_key); | |
647 | if (ret) | |
648 | abort(); | |
649 | } | |
650 | mutex_unlock(&init_lock); | |
651 | } | |
652 | ||
653 | /* | |
654 | * Holding the rcu_gp_lock and rcu_registry_lock across fork will make | |
655 | * sure we fork() don't race with a concurrent thread executing with | |
656 | * any of those locks held. This ensures that the registry and data | |
657 | * protected by rcu_gp_lock are in a coherent state in the child. | |
658 | */ | |
659 | void lttng_ust_urcu_before_fork(void) | |
660 | { | |
661 | sigset_t newmask, oldmask; | |
662 | int ret; | |
663 | ||
664 | ret = sigfillset(&newmask); | |
665 | assert(!ret); | |
666 | ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask); | |
667 | assert(!ret); | |
668 | mutex_lock(&rcu_gp_lock); | |
669 | mutex_lock(&rcu_registry_lock); | |
670 | saved_fork_signal_mask = oldmask; | |
671 | } | |
672 | ||
673 | void lttng_ust_urcu_after_fork_parent(void) | |
674 | { | |
675 | sigset_t oldmask; | |
676 | int ret; | |
677 | ||
678 | oldmask = saved_fork_signal_mask; | |
679 | mutex_unlock(&rcu_registry_lock); | |
680 | mutex_unlock(&rcu_gp_lock); | |
681 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); | |
682 | assert(!ret); | |
683 | } | |
684 | ||
685 | /* | |
686 | * Prune all entries from registry except our own thread. Fits the Linux | |
687 | * fork behavior. Called with rcu_gp_lock and rcu_registry_lock held. | |
688 | */ | |
689 | static | |
690 | void lttng_ust_urcu_prune_registry(void) | |
691 | { | |
692 | struct registry_chunk *chunk; | |
10544ee8 MD |
693 | |
694 | cds_list_for_each_entry(chunk, ®istry_arena.chunk_list, node) { | |
d1a0fad8 JG |
695 | size_t spot_idx; |
696 | ||
697 | for (spot_idx = 0; spot_idx < chunk->capacity; spot_idx++) { | |
698 | struct lttng_ust_urcu_reader *reader = &chunk->readers[spot_idx]; | |
699 | ||
700 | if (!reader->alloc) | |
10544ee8 | 701 | continue; |
d1a0fad8 | 702 | if (reader->tid == pthread_self()) |
10544ee8 | 703 | continue; |
d1a0fad8 | 704 | cleanup_thread(chunk, reader); |
10544ee8 MD |
705 | } |
706 | } | |
707 | } | |
708 | ||
709 | void lttng_ust_urcu_after_fork_child(void) | |
710 | { | |
711 | sigset_t oldmask; | |
712 | int ret; | |
713 | ||
714 | lttng_ust_urcu_prune_registry(); | |
715 | oldmask = saved_fork_signal_mask; | |
716 | mutex_unlock(&rcu_registry_lock); | |
717 | mutex_unlock(&rcu_gp_lock); | |
718 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); | |
719 | assert(!ret); | |
720 | } |