Only specify that sdt.h provides system integration for now
[lttng-ust.git] / libringbuffer / frontend.h
CommitLineData
852c2936
MD
1#ifndef _LINUX_RING_BUFFER_FRONTEND_H
2#define _LINUX_RING_BUFFER_FRONTEND_H
3
4/*
5 * linux/ringbuffer/frontend.h
6 *
7 * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Ring Buffer Library Synchronization Header (API).
10 *
11 * Author:
12 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
13 *
14 * See ring_buffer_frontend.c for more information on wait-free algorithms.
15 *
16 * Dual LGPL v2.1/GPL v2 license.
17 */
18
14641deb
MD
19#include <urcu/compiler.h>
20#include <urcu/uatomic.h>
21
a6352fd4 22#include "smp.h"
852c2936 23/* Internal helpers */
4931a13e 24#include "frontend_internal.h"
852c2936
MD
25
26/* Buffer creation/removal and setup operations */
27
28/*
29 * switch_timer_interval is the time interval (in us) to fill sub-buffers with
30 * padding to let readers get those sub-buffers. Used for live streaming.
31 *
32 * read_timer_interval is the time interval (in us) to wake up pending readers.
33 *
34 * buf_addr is a pointer the the beginning of the preallocated buffer contiguous
35 * address mapping. It is used only by RING_BUFFER_STATIC configuration. It can
36 * be set to NULL for other backends.
a3f61e7f
MD
37 *
38 * priv_data (output) is set to a pointer into a "priv_data_len"-sized
39 * memory area for client-specific data. This memory is managed by lib
40 * ring buffer. priv_data_align is the alignment required for the
41 * private data area.
852c2936
MD
42 */
43
44extern
4cfec15c 45struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buffer_config *config,
a3f61e7f
MD
46 const char *name,
47 void **priv_data,
48 size_t priv_data_align,
49 size_t priv_data_size,
d028eddb 50 void *priv_data_init,
431d5cf0
MD
51 void *buf_addr,
52 size_t subbuf_size, size_t num_subbuf,
53 unsigned int switch_timer_interval,
193183fb 54 unsigned int read_timer_interval,
ef9ff354
MD
55 int **shm_fd, int **wait_fd,
56 uint64_t **memory_map_size);
193183fb
MD
57
58/* channel_handle_create - for consumer. */
59extern
38fae1d3 60struct lttng_ust_shm_handle *channel_handle_create(int shm_fd, int wait_fd,
193183fb
MD
61 uint64_t memory_map_size);
62
63/* channel_handle_add_stream - for consumer. */
64extern
38fae1d3 65int channel_handle_add_stream(struct lttng_ust_shm_handle *handle,
193183fb 66 int shm_fd, int wait_fd, uint64_t memory_map_size);
852c2936
MD
67
68/*
a3f61e7f
MD
69 * channel_destroy finalizes all channel's buffers, waits for readers to
70 * release all references, and destroys the channel.
852c2936
MD
71 */
72extern
a3f61e7f 73void channel_destroy(struct channel *chan, struct lttng_ust_shm_handle *handle,
824f40b8 74 int shadow);
852c2936
MD
75
76
77/* Buffer read operations */
78
79/*
80 * Iteration on channel cpumask needs to issue a read barrier to match the write
81 * barrier in cpu hotplug. It orders the cpumask read before read of per-cpu
82 * buffer data. The per-cpu buffer is never removed by cpu hotplug; teardown is
83 * only performed at channel destruction.
84 */
85#define for_each_channel_cpu(cpu, chan) \
a6352fd4 86 for_each_possible_cpu(cpu)
852c2936 87
4cfec15c
MD
88extern struct lttng_ust_lib_ring_buffer *channel_get_ring_buffer(
89 const struct lttng_ust_lib_ring_buffer_config *config,
1d498196 90 struct channel *chan, int cpu,
38fae1d3 91 struct lttng_ust_shm_handle *handle,
ef9ff354
MD
92 int **shm_fd, int **wait_fd,
93 uint64_t **memory_map_size);
4cfec15c 94extern int lib_ring_buffer_open_read(struct lttng_ust_lib_ring_buffer *buf,
38fae1d3 95 struct lttng_ust_shm_handle *handle,
824f40b8 96 int shadow);
4cfec15c 97extern void lib_ring_buffer_release_read(struct lttng_ust_lib_ring_buffer *buf,
38fae1d3 98 struct lttng_ust_shm_handle *handle,
824f40b8 99 int shadow);
852c2936
MD
100
101/*
102 * Read sequence: snapshot, many get_subbuf/put_subbuf, move_consumer.
103 */
4cfec15c 104extern int lib_ring_buffer_snapshot(struct lttng_ust_lib_ring_buffer *buf,
852c2936 105 unsigned long *consumed,
1d498196 106 unsigned long *produced,
38fae1d3 107 struct lttng_ust_shm_handle *handle);
4cfec15c 108extern void lib_ring_buffer_move_consumer(struct lttng_ust_lib_ring_buffer *buf,
1d498196 109 unsigned long consumed_new,
38fae1d3 110 struct lttng_ust_shm_handle *handle);
852c2936 111
4cfec15c 112extern int lib_ring_buffer_get_subbuf(struct lttng_ust_lib_ring_buffer *buf,
1d498196 113 unsigned long consumed,
38fae1d3 114 struct lttng_ust_shm_handle *handle);
4cfec15c 115extern void lib_ring_buffer_put_subbuf(struct lttng_ust_lib_ring_buffer *buf,
38fae1d3 116 struct lttng_ust_shm_handle *handle);
852c2936
MD
117
118/*
119 * lib_ring_buffer_get_next_subbuf/lib_ring_buffer_put_next_subbuf are helpers
120 * to read sub-buffers sequentially.
121 */
4cfec15c 122static inline int lib_ring_buffer_get_next_subbuf(struct lttng_ust_lib_ring_buffer *buf,
38fae1d3 123 struct lttng_ust_shm_handle *handle)
852c2936
MD
124{
125 int ret;
126
127 ret = lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
1d498196 128 &buf->prod_snapshot, handle);
852c2936
MD
129 if (ret)
130 return ret;
1d498196 131 ret = lib_ring_buffer_get_subbuf(buf, buf->cons_snapshot, handle);
852c2936
MD
132 return ret;
133}
134
1d498196 135static inline
4cfec15c 136void lib_ring_buffer_put_next_subbuf(struct lttng_ust_lib_ring_buffer *buf,
38fae1d3 137 struct lttng_ust_shm_handle *handle)
852c2936 138{
1d498196 139 lib_ring_buffer_put_subbuf(buf, handle);
852c2936 140 lib_ring_buffer_move_consumer(buf, subbuf_align(buf->cons_snapshot,
1d498196 141 shmp(handle, buf->backend.chan)), handle);
852c2936
MD
142}
143
144extern void channel_reset(struct channel *chan);
4cfec15c 145extern void lib_ring_buffer_reset(struct lttng_ust_lib_ring_buffer *buf,
38fae1d3 146 struct lttng_ust_shm_handle *handle);
852c2936
MD
147
148static inline
4cfec15c
MD
149unsigned long lib_ring_buffer_get_offset(const struct lttng_ust_lib_ring_buffer_config *config,
150 struct lttng_ust_lib_ring_buffer *buf)
852c2936
MD
151{
152 return v_read(config, &buf->offset);
153}
154
155static inline
4cfec15c
MD
156unsigned long lib_ring_buffer_get_consumed(const struct lttng_ust_lib_ring_buffer_config *config,
157 struct lttng_ust_lib_ring_buffer *buf)
852c2936 158{
14641deb 159 return uatomic_read(&buf->consumed);
852c2936
MD
160}
161
162/*
163 * Must call lib_ring_buffer_is_finalized before reading counters (memory
164 * ordering enforced with respect to trace teardown).
165 */
166static inline
4cfec15c
MD
167int lib_ring_buffer_is_finalized(const struct lttng_ust_lib_ring_buffer_config *config,
168 struct lttng_ust_lib_ring_buffer *buf)
852c2936 169{
14641deb 170 int finalized = CMM_ACCESS_ONCE(buf->finalized);
852c2936
MD
171 /*
172 * Read finalized before counters.
173 */
14641deb 174 cmm_smp_rmb();
852c2936
MD
175 return finalized;
176}
177
178static inline
179int lib_ring_buffer_channel_is_finalized(const struct channel *chan)
180{
181 return chan->finalized;
182}
183
184static inline
185int lib_ring_buffer_channel_is_disabled(const struct channel *chan)
186{
14641deb 187 return uatomic_read(&chan->record_disabled);
852c2936
MD
188}
189
190static inline
191unsigned long lib_ring_buffer_get_read_data_size(
4cfec15c
MD
192 const struct lttng_ust_lib_ring_buffer_config *config,
193 struct lttng_ust_lib_ring_buffer *buf,
38fae1d3 194 struct lttng_ust_shm_handle *handle)
852c2936 195{
1d498196 196 return subbuffer_get_read_data_size(config, &buf->backend, handle);
852c2936
MD
197}
198
199static inline
200unsigned long lib_ring_buffer_get_records_count(
4cfec15c
MD
201 const struct lttng_ust_lib_ring_buffer_config *config,
202 struct lttng_ust_lib_ring_buffer *buf)
852c2936
MD
203{
204 return v_read(config, &buf->records_count);
205}
206
207static inline
208unsigned long lib_ring_buffer_get_records_overrun(
4cfec15c
MD
209 const struct lttng_ust_lib_ring_buffer_config *config,
210 struct lttng_ust_lib_ring_buffer *buf)
852c2936
MD
211{
212 return v_read(config, &buf->records_overrun);
213}
214
215static inline
216unsigned long lib_ring_buffer_get_records_lost_full(
4cfec15c
MD
217 const struct lttng_ust_lib_ring_buffer_config *config,
218 struct lttng_ust_lib_ring_buffer *buf)
852c2936
MD
219{
220 return v_read(config, &buf->records_lost_full);
221}
222
223static inline
224unsigned long lib_ring_buffer_get_records_lost_wrap(
4cfec15c
MD
225 const struct lttng_ust_lib_ring_buffer_config *config,
226 struct lttng_ust_lib_ring_buffer *buf)
852c2936
MD
227{
228 return v_read(config, &buf->records_lost_wrap);
229}
230
231static inline
232unsigned long lib_ring_buffer_get_records_lost_big(
4cfec15c
MD
233 const struct lttng_ust_lib_ring_buffer_config *config,
234 struct lttng_ust_lib_ring_buffer *buf)
852c2936
MD
235{
236 return v_read(config, &buf->records_lost_big);
237}
238
239static inline
240unsigned long lib_ring_buffer_get_records_read(
4cfec15c
MD
241 const struct lttng_ust_lib_ring_buffer_config *config,
242 struct lttng_ust_lib_ring_buffer *buf)
852c2936
MD
243{
244 return v_read(config, &buf->backend.records_read);
245}
246
247#endif /* _LINUX_RING_BUFFER_FRONTEND_H */
This page took 0.037197 seconds and 4 git commands to generate.