1 #ifndef _LTTNG_RING_BUFFER_FRONTEND_H
2 #define _LTTNG_RING_BUFFER_FRONTEND_H
5 * libringbuffer/frontend.h
7 * Ring Buffer Library Synchronization Header (API).
9 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; only
14 * version 2.1 of the License.
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
29 * See ring_buffer_frontend.c for more information on wait-free algorithms.
32 #include <urcu/compiler.h>
33 #include <urcu/uatomic.h>
36 /* Internal helpers */
37 #include "frontend_internal.h"
39 /* Buffer creation/removal and setup operations */
42 * switch_timer_interval is the time interval (in us) to fill sub-buffers with
43 * padding to let readers get those sub-buffers. Used for live streaming.
45 * read_timer_interval is the time interval (in us) to wake up pending readers.
47 * buf_addr is a pointer the the beginning of the preallocated buffer contiguous
48 * address mapping. It is used only by RING_BUFFER_STATIC configuration. It can
49 * be set to NULL for other backends.
51 * priv_data (output) is set to a pointer into a "priv_data_len"-sized
52 * memory area for client-specific data. This memory is managed by lib
53 * ring buffer. priv_data_align is the alignment required for the
58 struct lttng_ust_shm_handle
*channel_create(const struct lttng_ust_lib_ring_buffer_config
*config
,
61 size_t priv_data_align
,
62 size_t priv_data_size
,
65 size_t subbuf_size
, size_t num_subbuf
,
66 unsigned int switch_timer_interval
,
67 unsigned int read_timer_interval
,
68 int **shm_fd
, int **wait_fd
,
69 uint64_t **memory_map_size
);
71 /* channel_handle_create - for consumer. */
73 struct lttng_ust_shm_handle
*channel_handle_create(int shm_fd
, int wait_fd
,
74 uint64_t memory_map_size
);
76 /* channel_handle_add_stream - for consumer. */
78 int channel_handle_add_stream(struct lttng_ust_shm_handle
*handle
,
79 int shm_fd
, int wait_fd
, uint64_t memory_map_size
);
82 * channel_destroy finalizes all channel's buffers, waits for readers to
83 * release all references, and destroys the channel.
86 void channel_destroy(struct channel
*chan
, struct lttng_ust_shm_handle
*handle
,
90 /* Buffer read operations */
93 * Iteration on channel cpumask needs to issue a read barrier to match the write
94 * barrier in cpu hotplug. It orders the cpumask read before read of per-cpu
95 * buffer data. The per-cpu buffer is never removed by cpu hotplug; teardown is
96 * only performed at channel destruction.
98 #define for_each_channel_cpu(cpu, chan) \
99 for_each_possible_cpu(cpu)
101 extern struct lttng_ust_lib_ring_buffer
*channel_get_ring_buffer(
102 const struct lttng_ust_lib_ring_buffer_config
*config
,
103 struct channel
*chan
, int cpu
,
104 struct lttng_ust_shm_handle
*handle
,
105 int **shm_fd
, int **wait_fd
,
106 uint64_t **memory_map_size
);
107 extern int lib_ring_buffer_open_read(struct lttng_ust_lib_ring_buffer
*buf
,
108 struct lttng_ust_shm_handle
*handle
,
110 extern void lib_ring_buffer_release_read(struct lttng_ust_lib_ring_buffer
*buf
,
111 struct lttng_ust_shm_handle
*handle
,
115 * Read sequence: snapshot, many get_subbuf/put_subbuf, move_consumer.
117 extern int lib_ring_buffer_snapshot(struct lttng_ust_lib_ring_buffer
*buf
,
118 unsigned long *consumed
,
119 unsigned long *produced
,
120 struct lttng_ust_shm_handle
*handle
);
121 extern void lib_ring_buffer_move_consumer(struct lttng_ust_lib_ring_buffer
*buf
,
122 unsigned long consumed_new
,
123 struct lttng_ust_shm_handle
*handle
);
125 extern int lib_ring_buffer_get_subbuf(struct lttng_ust_lib_ring_buffer
*buf
,
126 unsigned long consumed
,
127 struct lttng_ust_shm_handle
*handle
);
128 extern void lib_ring_buffer_put_subbuf(struct lttng_ust_lib_ring_buffer
*buf
,
129 struct lttng_ust_shm_handle
*handle
);
132 * lib_ring_buffer_get_next_subbuf/lib_ring_buffer_put_next_subbuf are helpers
133 * to read sub-buffers sequentially.
135 static inline int lib_ring_buffer_get_next_subbuf(struct lttng_ust_lib_ring_buffer
*buf
,
136 struct lttng_ust_shm_handle
*handle
)
140 ret
= lib_ring_buffer_snapshot(buf
, &buf
->cons_snapshot
,
141 &buf
->prod_snapshot
, handle
);
144 ret
= lib_ring_buffer_get_subbuf(buf
, buf
->cons_snapshot
, handle
);
149 void lib_ring_buffer_put_next_subbuf(struct lttng_ust_lib_ring_buffer
*buf
,
150 struct lttng_ust_shm_handle
*handle
)
152 lib_ring_buffer_put_subbuf(buf
, handle
);
153 lib_ring_buffer_move_consumer(buf
, subbuf_align(buf
->cons_snapshot
,
154 shmp(handle
, buf
->backend
.chan
)), handle
);
157 extern void channel_reset(struct channel
*chan
);
158 extern void lib_ring_buffer_reset(struct lttng_ust_lib_ring_buffer
*buf
,
159 struct lttng_ust_shm_handle
*handle
);
162 unsigned long lib_ring_buffer_get_offset(const struct lttng_ust_lib_ring_buffer_config
*config
,
163 struct lttng_ust_lib_ring_buffer
*buf
)
165 return v_read(config
, &buf
->offset
);
169 unsigned long lib_ring_buffer_get_consumed(const struct lttng_ust_lib_ring_buffer_config
*config
,
170 struct lttng_ust_lib_ring_buffer
*buf
)
172 return uatomic_read(&buf
->consumed
);
176 * Must call lib_ring_buffer_is_finalized before reading counters (memory
177 * ordering enforced with respect to trace teardown).
180 int lib_ring_buffer_is_finalized(const struct lttng_ust_lib_ring_buffer_config
*config
,
181 struct lttng_ust_lib_ring_buffer
*buf
)
183 int finalized
= CMM_ACCESS_ONCE(buf
->finalized
);
185 * Read finalized before counters.
192 int lib_ring_buffer_channel_is_finalized(const struct channel
*chan
)
194 return chan
->finalized
;
198 int lib_ring_buffer_channel_is_disabled(const struct channel
*chan
)
200 return uatomic_read(&chan
->record_disabled
);
204 unsigned long lib_ring_buffer_get_read_data_size(
205 const struct lttng_ust_lib_ring_buffer_config
*config
,
206 struct lttng_ust_lib_ring_buffer
*buf
,
207 struct lttng_ust_shm_handle
*handle
)
209 return subbuffer_get_read_data_size(config
, &buf
->backend
, handle
);
213 unsigned long lib_ring_buffer_get_records_count(
214 const struct lttng_ust_lib_ring_buffer_config
*config
,
215 struct lttng_ust_lib_ring_buffer
*buf
)
217 return v_read(config
, &buf
->records_count
);
221 unsigned long lib_ring_buffer_get_records_overrun(
222 const struct lttng_ust_lib_ring_buffer_config
*config
,
223 struct lttng_ust_lib_ring_buffer
*buf
)
225 return v_read(config
, &buf
->records_overrun
);
229 unsigned long lib_ring_buffer_get_records_lost_full(
230 const struct lttng_ust_lib_ring_buffer_config
*config
,
231 struct lttng_ust_lib_ring_buffer
*buf
)
233 return v_read(config
, &buf
->records_lost_full
);
237 unsigned long lib_ring_buffer_get_records_lost_wrap(
238 const struct lttng_ust_lib_ring_buffer_config
*config
,
239 struct lttng_ust_lib_ring_buffer
*buf
)
241 return v_read(config
, &buf
->records_lost_wrap
);
245 unsigned long lib_ring_buffer_get_records_lost_big(
246 const struct lttng_ust_lib_ring_buffer_config
*config
,
247 struct lttng_ust_lib_ring_buffer
*buf
)
249 return v_read(config
, &buf
->records_lost_big
);
253 unsigned long lib_ring_buffer_get_records_read(
254 const struct lttng_ust_lib_ring_buffer_config
*config
,
255 struct lttng_ust_lib_ring_buffer
*buf
)
257 return v_read(config
, &buf
->backend
.records_read
);
260 #endif /* _LTTNG_RING_BUFFER_FRONTEND_H */