2 * SPDX-License-Identifier: LGPL-2.1-only
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 * Ring Buffer Library Synchronization Header (API).
8 * See ring_buffer_frontend.c for more information on wait-free algorithms.
11 #ifndef _LTTNG_RING_BUFFER_FRONTEND_H
12 #define _LTTNG_RING_BUFFER_FRONTEND_H
17 #include <urcu/compiler.h>
18 #include <urcu/uatomic.h>
21 #include "ust-helper.h"
23 /* Internal helpers */
24 #include "frontend_internal.h"
26 /* Buffer creation/removal and setup operations */
29 * switch_timer_interval is the time interval (in us) to fill sub-buffers with
30 * padding to let readers get those sub-buffers. Used for live streaming.
32 * read_timer_interval is the time interval (in us) to wake up pending readers.
34 * buf_addr is a pointer the the beginning of the preallocated buffer contiguous
35 * address mapping. It is used only by RING_BUFFER_STATIC configuration. It can
36 * be set to NULL for other backends.
38 * priv_data (output) is set to a pointer into a "priv_data_len"-sized
39 * memory area for client-specific data. This memory is managed by lib
40 * ring buffer. priv_data_align is the alignment required for the
46 struct lttng_ust_shm_handle
*channel_create(const struct lttng_ust_lib_ring_buffer_config
*config
,
49 size_t priv_data_align
,
50 size_t priv_data_size
,
53 size_t subbuf_size
, size_t num_subbuf
,
54 unsigned int switch_timer_interval
,
55 unsigned int read_timer_interval
,
56 const int *stream_fds
, int nr_stream_fds
,
57 int64_t blocking_timeout
);
60 * channel_destroy finalizes all channel's buffers, waits for readers to
61 * release all references, and destroys the channel.
65 void channel_destroy(struct channel
*chan
, struct lttng_ust_shm_handle
*handle
,
69 /* Buffer read operations */
72 * Iteration on channel cpumask needs to issue a read barrier to match the write
73 * barrier in cpu hotplug. It orders the cpumask read before read of per-cpu
74 * buffer data. The per-cpu buffer is never removed by cpu hotplug; teardown is
75 * only performed at channel destruction.
77 #define for_each_channel_cpu(cpu, chan) \
78 for_each_possible_cpu(cpu)
81 extern struct lttng_ust_lib_ring_buffer
*channel_get_ring_buffer(
82 const struct lttng_ust_lib_ring_buffer_config
*config
,
83 struct channel
*chan
, int cpu
,
84 struct lttng_ust_shm_handle
*handle
,
85 int *shm_fd
, int *wait_fd
,
87 uint64_t *memory_map_size
);
90 int ring_buffer_channel_close_wait_fd(const struct lttng_ust_lib_ring_buffer_config
*config
,
92 struct lttng_ust_shm_handle
*handle
);
95 int ring_buffer_channel_close_wakeup_fd(const struct lttng_ust_lib_ring_buffer_config
*config
,
97 struct lttng_ust_shm_handle
*handle
);
100 int ring_buffer_stream_close_wait_fd(const struct lttng_ust_lib_ring_buffer_config
*config
,
101 struct channel
*chan
,
102 struct lttng_ust_shm_handle
*handle
,
106 int ring_buffer_stream_close_wakeup_fd(const struct lttng_ust_lib_ring_buffer_config
*config
,
107 struct channel
*chan
,
108 struct lttng_ust_shm_handle
*handle
,
112 extern int lib_ring_buffer_open_read(struct lttng_ust_lib_ring_buffer
*buf
,
113 struct lttng_ust_shm_handle
*handle
);
115 extern void lib_ring_buffer_release_read(struct lttng_ust_lib_ring_buffer
*buf
,
116 struct lttng_ust_shm_handle
*handle
);
119 * Initialize signals for ring buffer. Should be called early e.g. by
120 * main() in the program to affect all threads.
123 void lib_ringbuffer_signal_init(void);
126 * Read sequence: snapshot, many get_subbuf/put_subbuf, move_consumer.
129 extern int lib_ring_buffer_snapshot(struct lttng_ust_lib_ring_buffer
*buf
,
130 unsigned long *consumed
,
131 unsigned long *produced
,
132 struct lttng_ust_shm_handle
*handle
);
134 extern int lib_ring_buffer_snapshot_sample_positions(
135 struct lttng_ust_lib_ring_buffer
*buf
,
136 unsigned long *consumed
,
137 unsigned long *produced
,
138 struct lttng_ust_shm_handle
*handle
);
140 extern void lib_ring_buffer_move_consumer(struct lttng_ust_lib_ring_buffer
*buf
,
141 unsigned long consumed_new
,
142 struct lttng_ust_shm_handle
*handle
);
145 extern int lib_ring_buffer_get_subbuf(struct lttng_ust_lib_ring_buffer
*buf
,
146 unsigned long consumed
,
147 struct lttng_ust_shm_handle
*handle
);
149 extern void lib_ring_buffer_put_subbuf(struct lttng_ust_lib_ring_buffer
*buf
,
150 struct lttng_ust_shm_handle
*handle
);
153 * lib_ring_buffer_get_next_subbuf/lib_ring_buffer_put_next_subbuf are helpers
154 * to read sub-buffers sequentially.
156 static inline int lib_ring_buffer_get_next_subbuf(struct lttng_ust_lib_ring_buffer
*buf
,
157 struct lttng_ust_shm_handle
*handle
)
161 ret
= lib_ring_buffer_snapshot(buf
, &buf
->cons_snapshot
,
162 &buf
->prod_snapshot
, handle
);
165 ret
= lib_ring_buffer_get_subbuf(buf
, buf
->cons_snapshot
, handle
);
170 void lib_ring_buffer_put_next_subbuf(struct lttng_ust_lib_ring_buffer
*buf
,
171 struct lttng_ust_shm_handle
*handle
)
173 struct channel
*chan
;
175 chan
= shmp(handle
, buf
->backend
.chan
);
178 lib_ring_buffer_put_subbuf(buf
, handle
);
179 lib_ring_buffer_move_consumer(buf
, subbuf_align(buf
->cons_snapshot
, chan
),
184 extern void channel_reset(struct channel
*chan
);
186 extern void lib_ring_buffer_reset(struct lttng_ust_lib_ring_buffer
*buf
,
187 struct lttng_ust_shm_handle
*handle
);
190 unsigned long lib_ring_buffer_get_offset(const struct lttng_ust_lib_ring_buffer_config
*config
,
191 struct lttng_ust_lib_ring_buffer
*buf
)
193 return v_read(config
, &buf
->offset
);
197 unsigned long lib_ring_buffer_get_consumed(const struct lttng_ust_lib_ring_buffer_config
*config
,
198 struct lttng_ust_lib_ring_buffer
*buf
)
200 return uatomic_read(&buf
->consumed
);
204 * Must call lib_ring_buffer_is_finalized before reading counters (memory
205 * ordering enforced with respect to trace teardown).
208 int lib_ring_buffer_is_finalized(const struct lttng_ust_lib_ring_buffer_config
*config
,
209 struct lttng_ust_lib_ring_buffer
*buf
)
211 int finalized
= CMM_ACCESS_ONCE(buf
->finalized
);
213 * Read finalized before counters.
220 int lib_ring_buffer_channel_is_finalized(const struct channel
*chan
)
222 return chan
->finalized
;
226 int lib_ring_buffer_channel_is_disabled(const struct channel
*chan
)
228 return uatomic_read(&chan
->record_disabled
);
232 unsigned long lib_ring_buffer_get_read_data_size(
233 const struct lttng_ust_lib_ring_buffer_config
*config
,
234 struct lttng_ust_lib_ring_buffer
*buf
,
235 struct lttng_ust_shm_handle
*handle
)
237 return subbuffer_get_read_data_size(config
, &buf
->backend
, handle
);
241 unsigned long lib_ring_buffer_get_records_count(
242 const struct lttng_ust_lib_ring_buffer_config
*config
,
243 struct lttng_ust_lib_ring_buffer
*buf
)
245 return v_read(config
, &buf
->records_count
);
249 unsigned long lib_ring_buffer_get_records_overrun(
250 const struct lttng_ust_lib_ring_buffer_config
*config
,
251 struct lttng_ust_lib_ring_buffer
*buf
)
253 return v_read(config
, &buf
->records_overrun
);
257 unsigned long lib_ring_buffer_get_records_lost_full(
258 const struct lttng_ust_lib_ring_buffer_config
*config
,
259 struct lttng_ust_lib_ring_buffer
*buf
)
261 return v_read(config
, &buf
->records_lost_full
);
265 unsigned long lib_ring_buffer_get_records_lost_wrap(
266 const struct lttng_ust_lib_ring_buffer_config
*config
,
267 struct lttng_ust_lib_ring_buffer
*buf
)
269 return v_read(config
, &buf
->records_lost_wrap
);
273 unsigned long lib_ring_buffer_get_records_lost_big(
274 const struct lttng_ust_lib_ring_buffer_config
*config
,
275 struct lttng_ust_lib_ring_buffer
*buf
)
277 return v_read(config
, &buf
->records_lost_big
);
281 unsigned long lib_ring_buffer_get_records_read(
282 const struct lttng_ust_lib_ring_buffer_config
*config
,
283 struct lttng_ust_lib_ring_buffer
*buf
)
285 return v_read(config
, &buf
->backend
.records_read
);
288 #endif /* _LTTNG_RING_BUFFER_FRONTEND_H */