2 * SPDX-License-Identifier: LGPL-2.1-only
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 * Ring Buffer Library Synchronization Header (API).
8 * See ring_buffer_frontend.c for more information on wait-free algorithms.
11 #ifndef _LTTNG_RING_BUFFER_FRONTEND_H
12 #define _LTTNG_RING_BUFFER_FRONTEND_H
17 #include <urcu/compiler.h>
18 #include <urcu/uatomic.h>
21 /* Internal helpers */
22 #include "frontend_internal.h"
24 /* Buffer creation/removal and setup operations */
27 * switch_timer_interval is the time interval (in us) to fill sub-buffers with
28 * padding to let readers get those sub-buffers. Used for live streaming.
30 * read_timer_interval is the time interval (in us) to wake up pending readers.
32 * buf_addr is a pointer the the beginning of the preallocated buffer contiguous
33 * address mapping. It is used only by RING_BUFFER_STATIC configuration. It can
34 * be set to NULL for other backends.
36 * priv_data (output) is set to a pointer into a "priv_data_len"-sized
37 * memory area for client-specific data. This memory is managed by lib
38 * ring buffer. priv_data_align is the alignment required for the
43 struct lttng_ust_shm_handle
*channel_create(const struct lttng_ust_lib_ring_buffer_config
*config
,
46 size_t priv_data_align
,
47 size_t priv_data_size
,
50 size_t subbuf_size
, size_t num_subbuf
,
51 unsigned int switch_timer_interval
,
52 unsigned int read_timer_interval
,
53 const int *stream_fds
, int nr_stream_fds
,
54 int64_t blocking_timeout
);
57 * channel_destroy finalizes all channel's buffers, waits for readers to
58 * release all references, and destroys the channel.
61 void channel_destroy(struct channel
*chan
, struct lttng_ust_shm_handle
*handle
,
65 /* Buffer read operations */
68 * Iteration on channel cpumask needs to issue a read barrier to match the write
69 * barrier in cpu hotplug. It orders the cpumask read before read of per-cpu
70 * buffer data. The per-cpu buffer is never removed by cpu hotplug; teardown is
71 * only performed at channel destruction.
73 #define for_each_channel_cpu(cpu, chan) \
74 for_each_possible_cpu(cpu)
76 extern struct lttng_ust_lib_ring_buffer
*channel_get_ring_buffer(
77 const struct lttng_ust_lib_ring_buffer_config
*config
,
78 struct channel
*chan
, int cpu
,
79 struct lttng_ust_shm_handle
*handle
,
80 int *shm_fd
, int *wait_fd
,
82 uint64_t *memory_map_size
);
84 int ring_buffer_channel_close_wait_fd(const struct lttng_ust_lib_ring_buffer_config
*config
,
86 struct lttng_ust_shm_handle
*handle
);
88 int ring_buffer_channel_close_wakeup_fd(const struct lttng_ust_lib_ring_buffer_config
*config
,
90 struct lttng_ust_shm_handle
*handle
);
92 int ring_buffer_stream_close_wait_fd(const struct lttng_ust_lib_ring_buffer_config
*config
,
94 struct lttng_ust_shm_handle
*handle
,
97 int ring_buffer_stream_close_wakeup_fd(const struct lttng_ust_lib_ring_buffer_config
*config
,
99 struct lttng_ust_shm_handle
*handle
,
102 extern int lib_ring_buffer_open_read(struct lttng_ust_lib_ring_buffer
*buf
,
103 struct lttng_ust_shm_handle
*handle
);
104 extern void lib_ring_buffer_release_read(struct lttng_ust_lib_ring_buffer
*buf
,
105 struct lttng_ust_shm_handle
*handle
);
108 * Initialize signals for ring buffer. Should be called early e.g. by
109 * main() in the program to affect all threads.
111 void lib_ringbuffer_signal_init(void);
114 * Read sequence: snapshot, many get_subbuf/put_subbuf, move_consumer.
116 extern int lib_ring_buffer_snapshot(struct lttng_ust_lib_ring_buffer
*buf
,
117 unsigned long *consumed
,
118 unsigned long *produced
,
119 struct lttng_ust_shm_handle
*handle
);
120 extern int lib_ring_buffer_snapshot_sample_positions(
121 struct lttng_ust_lib_ring_buffer
*buf
,
122 unsigned long *consumed
,
123 unsigned long *produced
,
124 struct lttng_ust_shm_handle
*handle
);
125 extern void lib_ring_buffer_move_consumer(struct lttng_ust_lib_ring_buffer
*buf
,
126 unsigned long consumed_new
,
127 struct lttng_ust_shm_handle
*handle
);
129 extern int lib_ring_buffer_get_subbuf(struct lttng_ust_lib_ring_buffer
*buf
,
130 unsigned long consumed
,
131 struct lttng_ust_shm_handle
*handle
);
132 extern void lib_ring_buffer_put_subbuf(struct lttng_ust_lib_ring_buffer
*buf
,
133 struct lttng_ust_shm_handle
*handle
);
136 * lib_ring_buffer_get_next_subbuf/lib_ring_buffer_put_next_subbuf are helpers
137 * to read sub-buffers sequentially.
139 static inline int lib_ring_buffer_get_next_subbuf(struct lttng_ust_lib_ring_buffer
*buf
,
140 struct lttng_ust_shm_handle
*handle
)
144 ret
= lib_ring_buffer_snapshot(buf
, &buf
->cons_snapshot
,
145 &buf
->prod_snapshot
, handle
);
148 ret
= lib_ring_buffer_get_subbuf(buf
, buf
->cons_snapshot
, handle
);
153 void lib_ring_buffer_put_next_subbuf(struct lttng_ust_lib_ring_buffer
*buf
,
154 struct lttng_ust_shm_handle
*handle
)
156 struct channel
*chan
;
158 chan
= shmp(handle
, buf
->backend
.chan
);
161 lib_ring_buffer_put_subbuf(buf
, handle
);
162 lib_ring_buffer_move_consumer(buf
, subbuf_align(buf
->cons_snapshot
, chan
),
166 extern void channel_reset(struct channel
*chan
);
167 extern void lib_ring_buffer_reset(struct lttng_ust_lib_ring_buffer
*buf
,
168 struct lttng_ust_shm_handle
*handle
);
171 unsigned long lib_ring_buffer_get_offset(const struct lttng_ust_lib_ring_buffer_config
*config
,
172 struct lttng_ust_lib_ring_buffer
*buf
)
174 return v_read(config
, &buf
->offset
);
178 unsigned long lib_ring_buffer_get_consumed(const struct lttng_ust_lib_ring_buffer_config
*config
,
179 struct lttng_ust_lib_ring_buffer
*buf
)
181 return uatomic_read(&buf
->consumed
);
185 * Must call lib_ring_buffer_is_finalized before reading counters (memory
186 * ordering enforced with respect to trace teardown).
189 int lib_ring_buffer_is_finalized(const struct lttng_ust_lib_ring_buffer_config
*config
,
190 struct lttng_ust_lib_ring_buffer
*buf
)
192 int finalized
= CMM_ACCESS_ONCE(buf
->finalized
);
194 * Read finalized before counters.
201 int lib_ring_buffer_channel_is_finalized(const struct channel
*chan
)
203 return chan
->finalized
;
207 int lib_ring_buffer_channel_is_disabled(const struct channel
*chan
)
209 return uatomic_read(&chan
->record_disabled
);
213 unsigned long lib_ring_buffer_get_read_data_size(
214 const struct lttng_ust_lib_ring_buffer_config
*config
,
215 struct lttng_ust_lib_ring_buffer
*buf
,
216 struct lttng_ust_shm_handle
*handle
)
218 return subbuffer_get_read_data_size(config
, &buf
->backend
, handle
);
222 unsigned long lib_ring_buffer_get_records_count(
223 const struct lttng_ust_lib_ring_buffer_config
*config
,
224 struct lttng_ust_lib_ring_buffer
*buf
)
226 return v_read(config
, &buf
->records_count
);
230 unsigned long lib_ring_buffer_get_records_overrun(
231 const struct lttng_ust_lib_ring_buffer_config
*config
,
232 struct lttng_ust_lib_ring_buffer
*buf
)
234 return v_read(config
, &buf
->records_overrun
);
238 unsigned long lib_ring_buffer_get_records_lost_full(
239 const struct lttng_ust_lib_ring_buffer_config
*config
,
240 struct lttng_ust_lib_ring_buffer
*buf
)
242 return v_read(config
, &buf
->records_lost_full
);
246 unsigned long lib_ring_buffer_get_records_lost_wrap(
247 const struct lttng_ust_lib_ring_buffer_config
*config
,
248 struct lttng_ust_lib_ring_buffer
*buf
)
250 return v_read(config
, &buf
->records_lost_wrap
);
254 unsigned long lib_ring_buffer_get_records_lost_big(
255 const struct lttng_ust_lib_ring_buffer_config
*config
,
256 struct lttng_ust_lib_ring_buffer
*buf
)
258 return v_read(config
, &buf
->records_lost_big
);
262 unsigned long lib_ring_buffer_get_records_read(
263 const struct lttng_ust_lib_ring_buffer_config
*config
,
264 struct lttng_ust_lib_ring_buffer
*buf
)
266 return v_read(config
, &buf
->backend
.records_read
);
269 #endif /* _LTTNG_RING_BUFFER_FRONTEND_H */