4 * Userspace RCU library for LTTng-UST, derived from liburcu "bulletproof" version.
6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
39 #include <urcu/arch.h>
40 #include <urcu/wfcqueue.h>
41 #include <lttng/urcu/static/urcu-ust.h>
42 #include <lttng/urcu/pointer.h>
43 #include <urcu/tls-compat.h>
45 /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
47 #include <lttng/urcu/urcu-ust.h>
51 #define MAP_ANONYMOUS MAP_ANON
56 void *mremap_wrapper(void *old_address
, size_t old_size
,
57 size_t new_size
, int flags
)
59 return mremap(old_address
, old_size
, new_size
, flags
);
63 #define MREMAP_MAYMOVE 1
64 #define MREMAP_FIXED 2
67 * mremap wrapper for non-Linux systems not allowing MAYMOVE.
68 * This is not generic.
71 void *mremap_wrapper(void *old_address
, size_t old_size
,
72 size_t new_size
, int flags
)
74 assert(!(flags
& MREMAP_MAYMOVE
));
80 /* Sleep delay in ms */
81 #define RCU_SLEEP_DELAY_MS 10
82 #define INIT_NR_THREADS 8
83 #define ARENA_INIT_ALLOC \
84 sizeof(struct registry_chunk) \
85 + INIT_NR_THREADS * sizeof(struct lttng_ust_urcu_reader)
88 * Active attempts to check for reader Q.S. before calling sleep().
90 #define RCU_QS_ACTIVE_ATTEMPTS 100
93 int lttng_ust_urcu_refcount
;
95 /* If the headers do not support membarrier system call, fall back smp_mb. */
96 #ifdef __NR_membarrier
97 # define membarrier(...) syscall(__NR_membarrier, __VA_ARGS__)
99 # define membarrier(...) -ENOSYS
102 enum membarrier_cmd
{
103 MEMBARRIER_CMD_QUERY
= 0,
104 MEMBARRIER_CMD_SHARED
= (1 << 0),
105 /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */
106 /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */
107 MEMBARRIER_CMD_PRIVATE_EXPEDITED
= (1 << 3),
108 MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED
= (1 << 4),
112 void __attribute__((constructor
)) _lttng_ust_urcu_init(void);
114 void __attribute__((destructor
)) lttng_ust_urcu_exit(void);
116 #ifndef CONFIG_RCU_FORCE_SYS_MEMBARRIER
117 int lttng_ust_urcu_has_sys_membarrier
;
121 * rcu_gp_lock ensures mutual exclusion between threads calling
124 static pthread_mutex_t rcu_gp_lock
= PTHREAD_MUTEX_INITIALIZER
;
126 * rcu_registry_lock ensures mutual exclusion between threads
127 * registering and unregistering themselves to/from the registry, and
128 * with threads reading that registry from synchronize_rcu(). However,
129 * this lock is not held all the way through the completion of awaiting
130 * for the grace period. It is sporadically released between iterations
132 * rcu_registry_lock may nest inside rcu_gp_lock.
134 static pthread_mutex_t rcu_registry_lock
= PTHREAD_MUTEX_INITIALIZER
;
136 static pthread_mutex_t init_lock
= PTHREAD_MUTEX_INITIALIZER
;
137 static int initialized
;
139 static pthread_key_t lttng_ust_urcu_key
;
141 struct lttng_ust_urcu_gp lttng_ust_urcu_gp
= { .ctr
= LTTNG_UST_URCU_GP_COUNT
};
144 * Pointer to registry elements. Written to only by each individual reader. Read
145 * by both the reader and the writers.
147 DEFINE_URCU_TLS(struct lttng_ust_urcu_reader
*, lttng_ust_urcu_reader
);
149 static CDS_LIST_HEAD(registry
);
151 struct registry_chunk
{
152 size_t data_len
; /* data length */
153 size_t used
; /* amount of data used */
154 struct cds_list_head node
; /* chunk_list node */
158 struct registry_arena
{
159 struct cds_list_head chunk_list
;
162 static struct registry_arena registry_arena
= {
163 .chunk_list
= CDS_LIST_HEAD_INIT(registry_arena
.chunk_list
),
166 /* Saved fork signal mask, protected by rcu_gp_lock */
167 static sigset_t saved_fork_signal_mask
;
169 static void mutex_lock(pthread_mutex_t
*mutex
)
173 #ifndef DISTRUST_SIGNALS_EXTREME
174 ret
= pthread_mutex_lock(mutex
);
177 #else /* #ifndef DISTRUST_SIGNALS_EXTREME */
178 while ((ret
= pthread_mutex_trylock(mutex
)) != 0) {
179 if (ret
!= EBUSY
&& ret
!= EINTR
)
183 #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
186 static void mutex_unlock(pthread_mutex_t
*mutex
)
190 ret
= pthread_mutex_unlock(mutex
);
195 static void smp_mb_master(void)
197 if (caa_likely(lttng_ust_urcu_has_sys_membarrier
)) {
198 if (membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED
, 0))
206 * Always called with rcu_registry lock held. Releases this lock between
207 * iterations and grabs it again. Holds the lock when it returns.
209 static void wait_for_readers(struct cds_list_head
*input_readers
,
210 struct cds_list_head
*cur_snap_readers
,
211 struct cds_list_head
*qsreaders
)
213 unsigned int wait_loops
= 0;
214 struct lttng_ust_urcu_reader
*index
, *tmp
;
217 * Wait for each thread URCU_TLS(lttng_ust_urcu_reader).ctr to either
218 * indicate quiescence (not nested), or observe the current
222 if (wait_loops
< RCU_QS_ACTIVE_ATTEMPTS
)
225 cds_list_for_each_entry_safe(index
, tmp
, input_readers
, node
) {
226 switch (lttng_ust_urcu_reader_state(&index
->ctr
)) {
227 case LTTNG_UST_URCU_READER_ACTIVE_CURRENT
:
228 if (cur_snap_readers
) {
229 cds_list_move(&index
->node
,
234 case LTTNG_UST_URCU_READER_INACTIVE
:
235 cds_list_move(&index
->node
, qsreaders
);
237 case LTTNG_UST_URCU_READER_ACTIVE_OLD
:
239 * Old snapshot. Leaving node in
240 * input_readers will make us busy-loop
241 * until the snapshot becomes current or
242 * the reader becomes inactive.
248 if (cds_list_empty(input_readers
)) {
251 /* Temporarily unlock the registry lock. */
252 mutex_unlock(&rcu_registry_lock
);
253 if (wait_loops
>= RCU_QS_ACTIVE_ATTEMPTS
)
254 (void) poll(NULL
, 0, RCU_SLEEP_DELAY_MS
);
257 /* Re-lock the registry lock before the next loop. */
258 mutex_lock(&rcu_registry_lock
);
263 void lttng_ust_urcu_synchronize_rcu(void)
265 CDS_LIST_HEAD(cur_snap_readers
);
266 CDS_LIST_HEAD(qsreaders
);
267 sigset_t newmask
, oldmask
;
270 ret
= sigfillset(&newmask
);
272 ret
= pthread_sigmask(SIG_BLOCK
, &newmask
, &oldmask
);
275 mutex_lock(&rcu_gp_lock
);
277 mutex_lock(&rcu_registry_lock
);
279 if (cds_list_empty(®istry
))
282 /* All threads should read qparity before accessing data structure
283 * where new ptr points to. */
284 /* Write new ptr before changing the qparity */
288 * Wait for readers to observe original parity or be quiescent.
289 * wait_for_readers() can release and grab again rcu_registry_lock
292 wait_for_readers(®istry
, &cur_snap_readers
, &qsreaders
);
295 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
296 * model easier to understand. It does not have a big performance impact
297 * anyway, given this is the write-side.
301 /* Switch parity: 0 -> 1, 1 -> 0 */
302 CMM_STORE_SHARED(lttng_ust_urcu_gp
.ctr
, lttng_ust_urcu_gp
.ctr
^ LTTNG_UST_URCU_GP_CTR_PHASE
);
305 * Must commit qparity update to memory before waiting for other parity
306 * quiescent state. Failure to do so could result in the writer waiting
307 * forever while new readers are always accessing data (no progress).
308 * Ensured by CMM_STORE_SHARED and CMM_LOAD_SHARED.
312 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
313 * model easier to understand. It does not have a big performance impact
314 * anyway, given this is the write-side.
319 * Wait for readers to observe new parity or be quiescent.
320 * wait_for_readers() can release and grab again rcu_registry_lock
323 wait_for_readers(&cur_snap_readers
, NULL
, &qsreaders
);
326 * Put quiescent reader list back into registry.
328 cds_list_splice(&qsreaders
, ®istry
);
331 * Finish waiting for reader threads before letting the old ptr being
336 mutex_unlock(&rcu_registry_lock
);
337 mutex_unlock(&rcu_gp_lock
);
338 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
343 * library wrappers to be used by non-LGPL compatible source code.
346 void lttng_ust_urcu_read_lock(void)
348 _lttng_ust_urcu_read_lock();
351 void lttng_ust_urcu_read_unlock(void)
353 _lttng_ust_urcu_read_unlock();
356 int lttng_ust_urcu_read_ongoing(void)
358 return _lttng_ust_urcu_read_ongoing();
362 * Only grow for now. If empty, allocate a ARENA_INIT_ALLOC sized chunk.
363 * Else, try expanding the last chunk. If this fails, allocate a new
364 * chunk twice as big as the last chunk.
365 * Memory used by chunks _never_ moves. A chunk could theoretically be
366 * freed when all "used" slots are released, but we don't do it at this
370 void expand_arena(struct registry_arena
*arena
)
372 struct registry_chunk
*new_chunk
, *last_chunk
;
373 size_t old_chunk_len
, new_chunk_len
;
376 if (cds_list_empty(&arena
->chunk_list
)) {
377 assert(ARENA_INIT_ALLOC
>=
378 sizeof(struct registry_chunk
)
379 + sizeof(struct lttng_ust_urcu_reader
));
380 new_chunk_len
= ARENA_INIT_ALLOC
;
381 new_chunk
= (struct registry_chunk
*) mmap(NULL
,
383 PROT_READ
| PROT_WRITE
,
384 MAP_ANONYMOUS
| MAP_PRIVATE
,
386 if (new_chunk
== MAP_FAILED
)
388 memset(new_chunk
, 0, new_chunk_len
);
389 new_chunk
->data_len
=
390 new_chunk_len
- sizeof(struct registry_chunk
);
391 cds_list_add_tail(&new_chunk
->node
, &arena
->chunk_list
);
392 return; /* We're done. */
395 /* Try expanding last chunk. */
396 last_chunk
= cds_list_entry(arena
->chunk_list
.prev
,
397 struct registry_chunk
, node
);
399 last_chunk
->data_len
+ sizeof(struct registry_chunk
);
400 new_chunk_len
= old_chunk_len
<< 1;
402 /* Don't allow memory mapping to move, just expand. */
403 new_chunk
= mremap_wrapper(last_chunk
, old_chunk_len
,
405 if (new_chunk
!= MAP_FAILED
) {
406 /* Should not have moved. */
407 assert(new_chunk
== last_chunk
);
408 memset((char *) last_chunk
+ old_chunk_len
, 0,
409 new_chunk_len
- old_chunk_len
);
410 last_chunk
->data_len
=
411 new_chunk_len
- sizeof(struct registry_chunk
);
412 return; /* We're done. */
415 /* Remap did not succeed, we need to add a new chunk. */
416 new_chunk
= (struct registry_chunk
*) mmap(NULL
,
418 PROT_READ
| PROT_WRITE
,
419 MAP_ANONYMOUS
| MAP_PRIVATE
,
421 if (new_chunk
== MAP_FAILED
)
423 memset(new_chunk
, 0, new_chunk_len
);
424 new_chunk
->data_len
=
425 new_chunk_len
- sizeof(struct registry_chunk
);
426 cds_list_add_tail(&new_chunk
->node
, &arena
->chunk_list
);
430 struct lttng_ust_urcu_reader
*arena_alloc(struct registry_arena
*arena
)
432 struct registry_chunk
*chunk
;
433 struct lttng_ust_urcu_reader
*rcu_reader_reg
;
434 int expand_done
= 0; /* Only allow to expand once per alloc */
435 size_t len
= sizeof(struct lttng_ust_urcu_reader
);
438 cds_list_for_each_entry(chunk
, &arena
->chunk_list
, node
) {
439 if (chunk
->data_len
- chunk
->used
< len
)
442 for (rcu_reader_reg
= (struct lttng_ust_urcu_reader
*) &chunk
->data
[0];
443 rcu_reader_reg
< (struct lttng_ust_urcu_reader
*) &chunk
->data
[chunk
->data_len
];
445 if (!rcu_reader_reg
->alloc
) {
446 rcu_reader_reg
->alloc
= 1;
448 return rcu_reader_reg
;
462 /* Called with signals off and mutex locked */
464 void add_thread(void)
466 struct lttng_ust_urcu_reader
*rcu_reader_reg
;
469 rcu_reader_reg
= arena_alloc(®istry_arena
);
472 ret
= pthread_setspecific(lttng_ust_urcu_key
, rcu_reader_reg
);
476 /* Add to registry */
477 rcu_reader_reg
->tid
= pthread_self();
478 assert(rcu_reader_reg
->ctr
== 0);
479 cds_list_add(&rcu_reader_reg
->node
, ®istry
);
481 * Reader threads are pointing to the reader registry. This is
482 * why its memory should never be relocated.
484 URCU_TLS(lttng_ust_urcu_reader
) = rcu_reader_reg
;
487 /* Called with mutex locked */
489 void cleanup_thread(struct registry_chunk
*chunk
,
490 struct lttng_ust_urcu_reader
*rcu_reader_reg
)
492 rcu_reader_reg
->ctr
= 0;
493 cds_list_del(&rcu_reader_reg
->node
);
494 rcu_reader_reg
->tid
= 0;
495 rcu_reader_reg
->alloc
= 0;
496 chunk
->used
-= sizeof(struct lttng_ust_urcu_reader
);
500 struct registry_chunk
*find_chunk(struct lttng_ust_urcu_reader
*rcu_reader_reg
)
502 struct registry_chunk
*chunk
;
504 cds_list_for_each_entry(chunk
, ®istry_arena
.chunk_list
, node
) {
505 if (rcu_reader_reg
< (struct lttng_ust_urcu_reader
*) &chunk
->data
[0])
507 if (rcu_reader_reg
>= (struct lttng_ust_urcu_reader
*) &chunk
->data
[chunk
->data_len
])
514 /* Called with signals off and mutex locked */
516 void remove_thread(struct lttng_ust_urcu_reader
*rcu_reader_reg
)
518 cleanup_thread(find_chunk(rcu_reader_reg
), rcu_reader_reg
);
519 URCU_TLS(lttng_ust_urcu_reader
) = NULL
;
522 /* Disable signals, take mutex, add to registry */
523 void lttng_ust_urcu_register(void)
525 sigset_t newmask
, oldmask
;
528 ret
= sigfillset(&newmask
);
531 ret
= pthread_sigmask(SIG_BLOCK
, &newmask
, &oldmask
);
536 * Check if a signal concurrently registered our thread since
537 * the check in rcu_read_lock().
539 if (URCU_TLS(lttng_ust_urcu_reader
))
543 * Take care of early registration before lttng_ust_urcu constructor.
545 _lttng_ust_urcu_init();
547 mutex_lock(&rcu_registry_lock
);
549 mutex_unlock(&rcu_registry_lock
);
551 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
556 void lttng_ust_urcu_register_thread(void)
558 if (caa_unlikely(!URCU_TLS(lttng_ust_urcu_reader
)))
559 lttng_ust_urcu_register(); /* If not yet registered. */
562 /* Disable signals, take mutex, remove from registry */
564 void lttng_ust_urcu_unregister(struct lttng_ust_urcu_reader
*rcu_reader_reg
)
566 sigset_t newmask
, oldmask
;
569 ret
= sigfillset(&newmask
);
572 ret
= pthread_sigmask(SIG_BLOCK
, &newmask
, &oldmask
);
576 mutex_lock(&rcu_registry_lock
);
577 remove_thread(rcu_reader_reg
);
578 mutex_unlock(&rcu_registry_lock
);
579 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
582 lttng_ust_urcu_exit();
586 * Remove thread from the registry when it exits, and flag it as
587 * destroyed so garbage collection can take care of it.
590 void lttng_ust_urcu_thread_exit_notifier(void *rcu_key
)
592 lttng_ust_urcu_unregister(rcu_key
);
595 #ifdef CONFIG_RCU_FORCE_SYS_MEMBARRIER
597 void lttng_ust_urcu_sys_membarrier_status(bool available
)
604 void lttng_ust_urcu_sys_membarrier_status(bool available
)
608 lttng_ust_urcu_has_sys_membarrier
= 1;
613 void lttng_ust_urcu_sys_membarrier_init(void)
615 bool available
= false;
618 mask
= membarrier(MEMBARRIER_CMD_QUERY
, 0);
620 if (mask
& MEMBARRIER_CMD_PRIVATE_EXPEDITED
) {
621 if (membarrier(MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED
, 0))
626 lttng_ust_urcu_sys_membarrier_status(available
);
630 void _lttng_ust_urcu_init(void)
632 mutex_lock(&init_lock
);
633 if (!lttng_ust_urcu_refcount
++) {
636 ret
= pthread_key_create(<tng_ust_urcu_key
,
637 lttng_ust_urcu_thread_exit_notifier
);
640 lttng_ust_urcu_sys_membarrier_init();
643 mutex_unlock(&init_lock
);
647 void lttng_ust_urcu_exit(void)
649 mutex_lock(&init_lock
);
650 if (!--lttng_ust_urcu_refcount
) {
651 struct registry_chunk
*chunk
, *tmp
;
654 cds_list_for_each_entry_safe(chunk
, tmp
,
655 ®istry_arena
.chunk_list
, node
) {
656 munmap((void *) chunk
, chunk
->data_len
657 + sizeof(struct registry_chunk
));
659 CDS_INIT_LIST_HEAD(®istry_arena
.chunk_list
);
660 ret
= pthread_key_delete(lttng_ust_urcu_key
);
664 mutex_unlock(&init_lock
);
668 * Holding the rcu_gp_lock and rcu_registry_lock across fork will make
669 * sure we fork() don't race with a concurrent thread executing with
670 * any of those locks held. This ensures that the registry and data
671 * protected by rcu_gp_lock are in a coherent state in the child.
673 void lttng_ust_urcu_before_fork(void)
675 sigset_t newmask
, oldmask
;
678 ret
= sigfillset(&newmask
);
680 ret
= pthread_sigmask(SIG_BLOCK
, &newmask
, &oldmask
);
682 mutex_lock(&rcu_gp_lock
);
683 mutex_lock(&rcu_registry_lock
);
684 saved_fork_signal_mask
= oldmask
;
687 void lttng_ust_urcu_after_fork_parent(void)
692 oldmask
= saved_fork_signal_mask
;
693 mutex_unlock(&rcu_registry_lock
);
694 mutex_unlock(&rcu_gp_lock
);
695 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
700 * Prune all entries from registry except our own thread. Fits the Linux
701 * fork behavior. Called with rcu_gp_lock and rcu_registry_lock held.
704 void lttng_ust_urcu_prune_registry(void)
706 struct registry_chunk
*chunk
;
707 struct lttng_ust_urcu_reader
*rcu_reader_reg
;
709 cds_list_for_each_entry(chunk
, ®istry_arena
.chunk_list
, node
) {
710 for (rcu_reader_reg
= (struct lttng_ust_urcu_reader
*) &chunk
->data
[0];
711 rcu_reader_reg
< (struct lttng_ust_urcu_reader
*) &chunk
->data
[chunk
->data_len
];
713 if (!rcu_reader_reg
->alloc
)
715 if (rcu_reader_reg
->tid
== pthread_self())
717 cleanup_thread(chunk
, rcu_reader_reg
);
722 void lttng_ust_urcu_after_fork_child(void)
727 lttng_ust_urcu_prune_registry();
728 oldmask
= saved_fork_signal_mask
;
729 mutex_unlock(&rcu_registry_lock
);
730 mutex_unlock(&rcu_gp_lock
);
731 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);