Commit | Line | Data |
---|---|---|
852c2936 MD |
1 | #ifndef _LINUX_RING_BUFFER_FRONTEND_H |
2 | #define _LINUX_RING_BUFFER_FRONTEND_H | |
3 | ||
4 | /* | |
5 | * linux/ringbuffer/frontend.h | |
6 | * | |
7 | * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
8 | * | |
9 | * Ring Buffer Library Synchronization Header (API). | |
10 | * | |
11 | * Author: | |
12 | * Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
13 | * | |
14 | * See ring_buffer_frontend.c for more information on wait-free algorithms. | |
15 | * | |
16 | * Dual LGPL v2.1/GPL v2 license. | |
17 | */ | |
18 | ||
14641deb MD |
19 | #include <urcu/compiler.h> |
20 | #include <urcu/uatomic.h> | |
21 | ||
a6352fd4 | 22 | #include "smp.h" |
852c2936 | 23 | /* Internal helpers */ |
4931a13e | 24 | #include "frontend_internal.h" |
852c2936 MD |
25 | |
26 | /* Buffer creation/removal and setup operations */ | |
27 | ||
28 | /* | |
29 | * switch_timer_interval is the time interval (in us) to fill sub-buffers with | |
30 | * padding to let readers get those sub-buffers. Used for live streaming. | |
31 | * | |
32 | * read_timer_interval is the time interval (in us) to wake up pending readers. | |
33 | * | |
34 | * buf_addr is a pointer the the beginning of the preallocated buffer contiguous | |
35 | * address mapping. It is used only by RING_BUFFER_STATIC configuration. It can | |
36 | * be set to NULL for other backends. | |
37 | */ | |
38 | ||
39 | extern | |
4cfec15c | 40 | struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buffer_config *config, |
431d5cf0 MD |
41 | const char *name, void *priv, |
42 | void *buf_addr, | |
43 | size_t subbuf_size, size_t num_subbuf, | |
44 | unsigned int switch_timer_interval, | |
193183fb MD |
45 | unsigned int read_timer_interval, |
46 | int *shm_fd, int *wait_fd, | |
47 | uint64_t *memory_map_size); | |
48 | ||
49 | /* channel_handle_create - for consumer. */ | |
50 | extern | |
38fae1d3 | 51 | struct lttng_ust_shm_handle *channel_handle_create(int shm_fd, int wait_fd, |
193183fb MD |
52 | uint64_t memory_map_size); |
53 | ||
54 | /* channel_handle_add_stream - for consumer. */ | |
55 | extern | |
38fae1d3 | 56 | int channel_handle_add_stream(struct lttng_ust_shm_handle *handle, |
193183fb | 57 | int shm_fd, int wait_fd, uint64_t memory_map_size); |
852c2936 MD |
58 | |
59 | /* | |
60 | * channel_destroy returns the private data pointer. It finalizes all channel's | |
61 | * buffers, waits for readers to release all references, and destroys the | |
62 | * channel. | |
63 | */ | |
64 | extern | |
38fae1d3 | 65 | void *channel_destroy(struct channel *chan, struct lttng_ust_shm_handle *handle, |
824f40b8 | 66 | int shadow); |
852c2936 MD |
67 | |
68 | ||
69 | /* Buffer read operations */ | |
70 | ||
71 | /* | |
72 | * Iteration on channel cpumask needs to issue a read barrier to match the write | |
73 | * barrier in cpu hotplug. It orders the cpumask read before read of per-cpu | |
74 | * buffer data. The per-cpu buffer is never removed by cpu hotplug; teardown is | |
75 | * only performed at channel destruction. | |
76 | */ | |
77 | #define for_each_channel_cpu(cpu, chan) \ | |
a6352fd4 | 78 | for_each_possible_cpu(cpu) |
852c2936 | 79 | |
4cfec15c MD |
80 | extern struct lttng_ust_lib_ring_buffer *channel_get_ring_buffer( |
81 | const struct lttng_ust_lib_ring_buffer_config *config, | |
1d498196 | 82 | struct channel *chan, int cpu, |
38fae1d3 | 83 | struct lttng_ust_shm_handle *handle, |
381c0f1e MD |
84 | int *shm_fd, int *wait_fd, |
85 | uint64_t *memory_map_size); | |
4cfec15c | 86 | extern int lib_ring_buffer_open_read(struct lttng_ust_lib_ring_buffer *buf, |
38fae1d3 | 87 | struct lttng_ust_shm_handle *handle, |
824f40b8 | 88 | int shadow); |
4cfec15c | 89 | extern void lib_ring_buffer_release_read(struct lttng_ust_lib_ring_buffer *buf, |
38fae1d3 | 90 | struct lttng_ust_shm_handle *handle, |
824f40b8 | 91 | int shadow); |
852c2936 MD |
92 | |
93 | /* | |
94 | * Read sequence: snapshot, many get_subbuf/put_subbuf, move_consumer. | |
95 | */ | |
4cfec15c | 96 | extern int lib_ring_buffer_snapshot(struct lttng_ust_lib_ring_buffer *buf, |
852c2936 | 97 | unsigned long *consumed, |
1d498196 | 98 | unsigned long *produced, |
38fae1d3 | 99 | struct lttng_ust_shm_handle *handle); |
4cfec15c | 100 | extern void lib_ring_buffer_move_consumer(struct lttng_ust_lib_ring_buffer *buf, |
1d498196 | 101 | unsigned long consumed_new, |
38fae1d3 | 102 | struct lttng_ust_shm_handle *handle); |
852c2936 | 103 | |
4cfec15c | 104 | extern int lib_ring_buffer_get_subbuf(struct lttng_ust_lib_ring_buffer *buf, |
1d498196 | 105 | unsigned long consumed, |
38fae1d3 | 106 | struct lttng_ust_shm_handle *handle); |
4cfec15c | 107 | extern void lib_ring_buffer_put_subbuf(struct lttng_ust_lib_ring_buffer *buf, |
38fae1d3 | 108 | struct lttng_ust_shm_handle *handle); |
852c2936 MD |
109 | |
110 | /* | |
111 | * lib_ring_buffer_get_next_subbuf/lib_ring_buffer_put_next_subbuf are helpers | |
112 | * to read sub-buffers sequentially. | |
113 | */ | |
4cfec15c | 114 | static inline int lib_ring_buffer_get_next_subbuf(struct lttng_ust_lib_ring_buffer *buf, |
38fae1d3 | 115 | struct lttng_ust_shm_handle *handle) |
852c2936 MD |
116 | { |
117 | int ret; | |
118 | ||
119 | ret = lib_ring_buffer_snapshot(buf, &buf->cons_snapshot, | |
1d498196 | 120 | &buf->prod_snapshot, handle); |
852c2936 MD |
121 | if (ret) |
122 | return ret; | |
1d498196 | 123 | ret = lib_ring_buffer_get_subbuf(buf, buf->cons_snapshot, handle); |
852c2936 MD |
124 | return ret; |
125 | } | |
126 | ||
1d498196 | 127 | static inline |
4cfec15c | 128 | void lib_ring_buffer_put_next_subbuf(struct lttng_ust_lib_ring_buffer *buf, |
38fae1d3 | 129 | struct lttng_ust_shm_handle *handle) |
852c2936 | 130 | { |
1d498196 | 131 | lib_ring_buffer_put_subbuf(buf, handle); |
852c2936 | 132 | lib_ring_buffer_move_consumer(buf, subbuf_align(buf->cons_snapshot, |
1d498196 | 133 | shmp(handle, buf->backend.chan)), handle); |
852c2936 MD |
134 | } |
135 | ||
136 | extern void channel_reset(struct channel *chan); | |
4cfec15c | 137 | extern void lib_ring_buffer_reset(struct lttng_ust_lib_ring_buffer *buf, |
38fae1d3 | 138 | struct lttng_ust_shm_handle *handle); |
852c2936 MD |
139 | |
140 | static inline | |
4cfec15c MD |
141 | unsigned long lib_ring_buffer_get_offset(const struct lttng_ust_lib_ring_buffer_config *config, |
142 | struct lttng_ust_lib_ring_buffer *buf) | |
852c2936 MD |
143 | { |
144 | return v_read(config, &buf->offset); | |
145 | } | |
146 | ||
147 | static inline | |
4cfec15c MD |
148 | unsigned long lib_ring_buffer_get_consumed(const struct lttng_ust_lib_ring_buffer_config *config, |
149 | struct lttng_ust_lib_ring_buffer *buf) | |
852c2936 | 150 | { |
14641deb | 151 | return uatomic_read(&buf->consumed); |
852c2936 MD |
152 | } |
153 | ||
154 | /* | |
155 | * Must call lib_ring_buffer_is_finalized before reading counters (memory | |
156 | * ordering enforced with respect to trace teardown). | |
157 | */ | |
158 | static inline | |
4cfec15c MD |
159 | int lib_ring_buffer_is_finalized(const struct lttng_ust_lib_ring_buffer_config *config, |
160 | struct lttng_ust_lib_ring_buffer *buf) | |
852c2936 | 161 | { |
14641deb | 162 | int finalized = CMM_ACCESS_ONCE(buf->finalized); |
852c2936 MD |
163 | /* |
164 | * Read finalized before counters. | |
165 | */ | |
14641deb | 166 | cmm_smp_rmb(); |
852c2936 MD |
167 | return finalized; |
168 | } | |
169 | ||
170 | static inline | |
171 | int lib_ring_buffer_channel_is_finalized(const struct channel *chan) | |
172 | { | |
173 | return chan->finalized; | |
174 | } | |
175 | ||
176 | static inline | |
177 | int lib_ring_buffer_channel_is_disabled(const struct channel *chan) | |
178 | { | |
14641deb | 179 | return uatomic_read(&chan->record_disabled); |
852c2936 MD |
180 | } |
181 | ||
182 | static inline | |
183 | unsigned long lib_ring_buffer_get_read_data_size( | |
4cfec15c MD |
184 | const struct lttng_ust_lib_ring_buffer_config *config, |
185 | struct lttng_ust_lib_ring_buffer *buf, | |
38fae1d3 | 186 | struct lttng_ust_shm_handle *handle) |
852c2936 | 187 | { |
1d498196 | 188 | return subbuffer_get_read_data_size(config, &buf->backend, handle); |
852c2936 MD |
189 | } |
190 | ||
191 | static inline | |
192 | unsigned long lib_ring_buffer_get_records_count( | |
4cfec15c MD |
193 | const struct lttng_ust_lib_ring_buffer_config *config, |
194 | struct lttng_ust_lib_ring_buffer *buf) | |
852c2936 MD |
195 | { |
196 | return v_read(config, &buf->records_count); | |
197 | } | |
198 | ||
199 | static inline | |
200 | unsigned long lib_ring_buffer_get_records_overrun( | |
4cfec15c MD |
201 | const struct lttng_ust_lib_ring_buffer_config *config, |
202 | struct lttng_ust_lib_ring_buffer *buf) | |
852c2936 MD |
203 | { |
204 | return v_read(config, &buf->records_overrun); | |
205 | } | |
206 | ||
207 | static inline | |
208 | unsigned long lib_ring_buffer_get_records_lost_full( | |
4cfec15c MD |
209 | const struct lttng_ust_lib_ring_buffer_config *config, |
210 | struct lttng_ust_lib_ring_buffer *buf) | |
852c2936 MD |
211 | { |
212 | return v_read(config, &buf->records_lost_full); | |
213 | } | |
214 | ||
215 | static inline | |
216 | unsigned long lib_ring_buffer_get_records_lost_wrap( | |
4cfec15c MD |
217 | const struct lttng_ust_lib_ring_buffer_config *config, |
218 | struct lttng_ust_lib_ring_buffer *buf) | |
852c2936 MD |
219 | { |
220 | return v_read(config, &buf->records_lost_wrap); | |
221 | } | |
222 | ||
223 | static inline | |
224 | unsigned long lib_ring_buffer_get_records_lost_big( | |
4cfec15c MD |
225 | const struct lttng_ust_lib_ring_buffer_config *config, |
226 | struct lttng_ust_lib_ring_buffer *buf) | |
852c2936 MD |
227 | { |
228 | return v_read(config, &buf->records_lost_big); | |
229 | } | |
230 | ||
231 | static inline | |
232 | unsigned long lib_ring_buffer_get_records_read( | |
4cfec15c MD |
233 | const struct lttng_ust_lib_ring_buffer_config *config, |
234 | struct lttng_ust_lib_ring_buffer *buf) | |
852c2936 MD |
235 | { |
236 | return v_read(config, &buf->backend.records_read); | |
237 | } | |
238 | ||
239 | #endif /* _LINUX_RING_BUFFER_FRONTEND_H */ |