1 #ifndef _LINUX_RING_BUFFER_FRONTEND_H
2 #define _LINUX_RING_BUFFER_FRONTEND_H
5 * linux/ringbuffer/frontend.h
7 * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9 * Ring Buffer Library Synchronization Header (API).
12 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
14 * See ring_buffer_frontend.c for more information on wait-free algorithms.
16 * Dual LGPL v2.1/GPL v2 license.
19 #include <urcu/compiler.h>
20 #include <urcu/uatomic.h>
23 /* Internal helpers */
24 #include "frontend_internal.h"
26 /* Buffer creation/removal and setup operations */
29 * switch_timer_interval is the time interval (in us) to fill sub-buffers with
30 * padding to let readers get those sub-buffers. Used for live streaming.
32 * read_timer_interval is the time interval (in us) to wake up pending readers.
34 * buf_addr is a pointer the the beginning of the preallocated buffer contiguous
35 * address mapping. It is used only by RING_BUFFER_STATIC configuration. It can
36 * be set to NULL for other backends.
38 * priv_data (output) is set to a pointer into a "priv_data_len"-sized
39 * memory area for client-specific data. This memory is managed by lib
40 * ring buffer. priv_data_align is the alignment required for the
45 struct lttng_ust_shm_handle
*channel_create(const struct lttng_ust_lib_ring_buffer_config
*config
,
48 size_t priv_data_align
,
49 size_t priv_data_size
,
52 size_t subbuf_size
, size_t num_subbuf
,
53 unsigned int switch_timer_interval
,
54 unsigned int read_timer_interval
,
55 int **shm_fd
, int **wait_fd
,
56 uint64_t **memory_map_size
);
58 /* channel_handle_create - for consumer. */
60 struct lttng_ust_shm_handle
*channel_handle_create(int shm_fd
, int wait_fd
,
61 uint64_t memory_map_size
);
63 /* channel_handle_add_stream - for consumer. */
65 int channel_handle_add_stream(struct lttng_ust_shm_handle
*handle
,
66 int shm_fd
, int wait_fd
, uint64_t memory_map_size
);
69 * channel_destroy finalizes all channel's buffers, waits for readers to
70 * release all references, and destroys the channel.
73 void channel_destroy(struct channel
*chan
, struct lttng_ust_shm_handle
*handle
,
77 /* Buffer read operations */
80 * Iteration on channel cpumask needs to issue a read barrier to match the write
81 * barrier in cpu hotplug. It orders the cpumask read before read of per-cpu
82 * buffer data. The per-cpu buffer is never removed by cpu hotplug; teardown is
83 * only performed at channel destruction.
85 #define for_each_channel_cpu(cpu, chan) \
86 for_each_possible_cpu(cpu)
88 extern struct lttng_ust_lib_ring_buffer
*channel_get_ring_buffer(
89 const struct lttng_ust_lib_ring_buffer_config
*config
,
90 struct channel
*chan
, int cpu
,
91 struct lttng_ust_shm_handle
*handle
,
92 int **shm_fd
, int **wait_fd
,
93 uint64_t **memory_map_size
);
94 extern int lib_ring_buffer_open_read(struct lttng_ust_lib_ring_buffer
*buf
,
95 struct lttng_ust_shm_handle
*handle
,
97 extern void lib_ring_buffer_release_read(struct lttng_ust_lib_ring_buffer
*buf
,
98 struct lttng_ust_shm_handle
*handle
,
102 * Read sequence: snapshot, many get_subbuf/put_subbuf, move_consumer.
104 extern int lib_ring_buffer_snapshot(struct lttng_ust_lib_ring_buffer
*buf
,
105 unsigned long *consumed
,
106 unsigned long *produced
,
107 struct lttng_ust_shm_handle
*handle
);
108 extern void lib_ring_buffer_move_consumer(struct lttng_ust_lib_ring_buffer
*buf
,
109 unsigned long consumed_new
,
110 struct lttng_ust_shm_handle
*handle
);
112 extern int lib_ring_buffer_get_subbuf(struct lttng_ust_lib_ring_buffer
*buf
,
113 unsigned long consumed
,
114 struct lttng_ust_shm_handle
*handle
);
115 extern void lib_ring_buffer_put_subbuf(struct lttng_ust_lib_ring_buffer
*buf
,
116 struct lttng_ust_shm_handle
*handle
);
119 * lib_ring_buffer_get_next_subbuf/lib_ring_buffer_put_next_subbuf are helpers
120 * to read sub-buffers sequentially.
122 static inline int lib_ring_buffer_get_next_subbuf(struct lttng_ust_lib_ring_buffer
*buf
,
123 struct lttng_ust_shm_handle
*handle
)
127 ret
= lib_ring_buffer_snapshot(buf
, &buf
->cons_snapshot
,
128 &buf
->prod_snapshot
, handle
);
131 ret
= lib_ring_buffer_get_subbuf(buf
, buf
->cons_snapshot
, handle
);
136 void lib_ring_buffer_put_next_subbuf(struct lttng_ust_lib_ring_buffer
*buf
,
137 struct lttng_ust_shm_handle
*handle
)
139 lib_ring_buffer_put_subbuf(buf
, handle
);
140 lib_ring_buffer_move_consumer(buf
, subbuf_align(buf
->cons_snapshot
,
141 shmp(handle
, buf
->backend
.chan
)), handle
);
144 extern void channel_reset(struct channel
*chan
);
145 extern void lib_ring_buffer_reset(struct lttng_ust_lib_ring_buffer
*buf
,
146 struct lttng_ust_shm_handle
*handle
);
149 unsigned long lib_ring_buffer_get_offset(const struct lttng_ust_lib_ring_buffer_config
*config
,
150 struct lttng_ust_lib_ring_buffer
*buf
)
152 return v_read(config
, &buf
->offset
);
156 unsigned long lib_ring_buffer_get_consumed(const struct lttng_ust_lib_ring_buffer_config
*config
,
157 struct lttng_ust_lib_ring_buffer
*buf
)
159 return uatomic_read(&buf
->consumed
);
163 * Must call lib_ring_buffer_is_finalized before reading counters (memory
164 * ordering enforced with respect to trace teardown).
167 int lib_ring_buffer_is_finalized(const struct lttng_ust_lib_ring_buffer_config
*config
,
168 struct lttng_ust_lib_ring_buffer
*buf
)
170 int finalized
= CMM_ACCESS_ONCE(buf
->finalized
);
172 * Read finalized before counters.
179 int lib_ring_buffer_channel_is_finalized(const struct channel
*chan
)
181 return chan
->finalized
;
185 int lib_ring_buffer_channel_is_disabled(const struct channel
*chan
)
187 return uatomic_read(&chan
->record_disabled
);
191 unsigned long lib_ring_buffer_get_read_data_size(
192 const struct lttng_ust_lib_ring_buffer_config
*config
,
193 struct lttng_ust_lib_ring_buffer
*buf
,
194 struct lttng_ust_shm_handle
*handle
)
196 return subbuffer_get_read_data_size(config
, &buf
->backend
, handle
);
200 unsigned long lib_ring_buffer_get_records_count(
201 const struct lttng_ust_lib_ring_buffer_config
*config
,
202 struct lttng_ust_lib_ring_buffer
*buf
)
204 return v_read(config
, &buf
->records_count
);
208 unsigned long lib_ring_buffer_get_records_overrun(
209 const struct lttng_ust_lib_ring_buffer_config
*config
,
210 struct lttng_ust_lib_ring_buffer
*buf
)
212 return v_read(config
, &buf
->records_overrun
);
216 unsigned long lib_ring_buffer_get_records_lost_full(
217 const struct lttng_ust_lib_ring_buffer_config
*config
,
218 struct lttng_ust_lib_ring_buffer
*buf
)
220 return v_read(config
, &buf
->records_lost_full
);
224 unsigned long lib_ring_buffer_get_records_lost_wrap(
225 const struct lttng_ust_lib_ring_buffer_config
*config
,
226 struct lttng_ust_lib_ring_buffer
*buf
)
228 return v_read(config
, &buf
->records_lost_wrap
);
232 unsigned long lib_ring_buffer_get_records_lost_big(
233 const struct lttng_ust_lib_ring_buffer_config
*config
,
234 struct lttng_ust_lib_ring_buffer
*buf
)
236 return v_read(config
, &buf
->records_lost_big
);
240 unsigned long lib_ring_buffer_get_records_read(
241 const struct lttng_ust_lib_ring_buffer_config
*config
,
242 struct lttng_ust_lib_ring_buffer
*buf
)
244 return v_read(config
, &buf
->backend
.records_read
);
247 #endif /* _LINUX_RING_BUFFER_FRONTEND_H */