Commit | Line | Data |
---|---|---|
852c2936 MD |
1 | #ifndef _LINUX_RING_BUFFER_FRONTEND_H |
2 | #define _LINUX_RING_BUFFER_FRONTEND_H | |
3 | ||
4 | /* | |
5 | * linux/ringbuffer/frontend.h | |
6 | * | |
7 | * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
8 | * | |
9 | * Ring Buffer Library Synchronization Header (API). | |
10 | * | |
11 | * Author: | |
12 | * Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
13 | * | |
14 | * See ring_buffer_frontend.c for more information on wait-free algorithms. | |
15 | * | |
16 | * Dual LGPL v2.1/GPL v2 license. | |
17 | */ | |
18 | ||
14641deb MD |
19 | #include <urcu/compiler.h> |
20 | #include <urcu/uatomic.h> | |
21 | ||
a6352fd4 | 22 | #include "smp.h" |
852c2936 | 23 | /* Internal helpers */ |
4931a13e | 24 | #include "frontend_internal.h" |
852c2936 MD |
25 | |
26 | /* Buffer creation/removal and setup operations */ | |
27 | ||
28 | /* | |
29 | * switch_timer_interval is the time interval (in us) to fill sub-buffers with | |
30 | * padding to let readers get those sub-buffers. Used for live streaming. | |
31 | * | |
32 | * read_timer_interval is the time interval (in us) to wake up pending readers. | |
33 | * | |
34 | * buf_addr is a pointer the the beginning of the preallocated buffer contiguous | |
35 | * address mapping. It is used only by RING_BUFFER_STATIC configuration. It can | |
36 | * be set to NULL for other backends. | |
37 | */ | |
38 | ||
39 | extern | |
431d5cf0 MD |
40 | struct shm_handle *channel_create(const struct lib_ring_buffer_config *config, |
41 | const char *name, void *priv, | |
42 | void *buf_addr, | |
43 | size_t subbuf_size, size_t num_subbuf, | |
44 | unsigned int switch_timer_interval, | |
45 | unsigned int read_timer_interval); | |
852c2936 MD |
46 | |
47 | /* | |
48 | * channel_destroy returns the private data pointer. It finalizes all channel's | |
49 | * buffers, waits for readers to release all references, and destroys the | |
50 | * channel. | |
51 | */ | |
52 | extern | |
431d5cf0 | 53 | void *channel_destroy(struct shm_handle *handle); |
852c2936 MD |
54 | |
55 | ||
56 | /* Buffer read operations */ | |
57 | ||
58 | /* | |
59 | * Iteration on channel cpumask needs to issue a read barrier to match the write | |
60 | * barrier in cpu hotplug. It orders the cpumask read before read of per-cpu | |
61 | * buffer data. The per-cpu buffer is never removed by cpu hotplug; teardown is | |
62 | * only performed at channel destruction. | |
63 | */ | |
64 | #define for_each_channel_cpu(cpu, chan) \ | |
a6352fd4 | 65 | for_each_possible_cpu(cpu) |
852c2936 MD |
66 | |
67 | extern struct lib_ring_buffer *channel_get_ring_buffer( | |
68 | const struct lib_ring_buffer_config *config, | |
69 | struct channel *chan, int cpu); | |
70 | extern int lib_ring_buffer_open_read(struct lib_ring_buffer *buf); | |
71 | extern void lib_ring_buffer_release_read(struct lib_ring_buffer *buf); | |
72 | ||
73 | /* | |
74 | * Read sequence: snapshot, many get_subbuf/put_subbuf, move_consumer. | |
75 | */ | |
76 | extern int lib_ring_buffer_snapshot(struct lib_ring_buffer *buf, | |
77 | unsigned long *consumed, | |
78 | unsigned long *produced); | |
79 | extern void lib_ring_buffer_move_consumer(struct lib_ring_buffer *buf, | |
80 | unsigned long consumed_new); | |
81 | ||
82 | extern int lib_ring_buffer_get_subbuf(struct lib_ring_buffer *buf, | |
83 | unsigned long consumed); | |
84 | extern void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf); | |
85 | ||
86 | /* | |
87 | * lib_ring_buffer_get_next_subbuf/lib_ring_buffer_put_next_subbuf are helpers | |
88 | * to read sub-buffers sequentially. | |
89 | */ | |
90 | static inline int lib_ring_buffer_get_next_subbuf(struct lib_ring_buffer *buf) | |
91 | { | |
92 | int ret; | |
93 | ||
94 | ret = lib_ring_buffer_snapshot(buf, &buf->cons_snapshot, | |
95 | &buf->prod_snapshot); | |
96 | if (ret) | |
97 | return ret; | |
98 | ret = lib_ring_buffer_get_subbuf(buf, buf->cons_snapshot); | |
99 | return ret; | |
100 | } | |
101 | ||
102 | static inline void lib_ring_buffer_put_next_subbuf(struct lib_ring_buffer *buf) | |
103 | { | |
104 | lib_ring_buffer_put_subbuf(buf); | |
105 | lib_ring_buffer_move_consumer(buf, subbuf_align(buf->cons_snapshot, | |
a6352fd4 | 106 | shmp(buf->backend.chan))); |
852c2936 MD |
107 | } |
108 | ||
109 | extern void channel_reset(struct channel *chan); | |
110 | extern void lib_ring_buffer_reset(struct lib_ring_buffer *buf); | |
111 | ||
112 | static inline | |
113 | unsigned long lib_ring_buffer_get_offset(const struct lib_ring_buffer_config *config, | |
114 | struct lib_ring_buffer *buf) | |
115 | { | |
116 | return v_read(config, &buf->offset); | |
117 | } | |
118 | ||
119 | static inline | |
120 | unsigned long lib_ring_buffer_get_consumed(const struct lib_ring_buffer_config *config, | |
121 | struct lib_ring_buffer *buf) | |
122 | { | |
14641deb | 123 | return uatomic_read(&buf->consumed); |
852c2936 MD |
124 | } |
125 | ||
126 | /* | |
127 | * Must call lib_ring_buffer_is_finalized before reading counters (memory | |
128 | * ordering enforced with respect to trace teardown). | |
129 | */ | |
130 | static inline | |
131 | int lib_ring_buffer_is_finalized(const struct lib_ring_buffer_config *config, | |
132 | struct lib_ring_buffer *buf) | |
133 | { | |
14641deb | 134 | int finalized = CMM_ACCESS_ONCE(buf->finalized); |
852c2936 MD |
135 | /* |
136 | * Read finalized before counters. | |
137 | */ | |
14641deb | 138 | cmm_smp_rmb(); |
852c2936 MD |
139 | return finalized; |
140 | } | |
141 | ||
142 | static inline | |
143 | int lib_ring_buffer_channel_is_finalized(const struct channel *chan) | |
144 | { | |
145 | return chan->finalized; | |
146 | } | |
147 | ||
148 | static inline | |
149 | int lib_ring_buffer_channel_is_disabled(const struct channel *chan) | |
150 | { | |
14641deb | 151 | return uatomic_read(&chan->record_disabled); |
852c2936 MD |
152 | } |
153 | ||
154 | static inline | |
155 | unsigned long lib_ring_buffer_get_read_data_size( | |
156 | const struct lib_ring_buffer_config *config, | |
157 | struct lib_ring_buffer *buf) | |
158 | { | |
159 | return subbuffer_get_read_data_size(config, &buf->backend); | |
160 | } | |
161 | ||
162 | static inline | |
163 | unsigned long lib_ring_buffer_get_records_count( | |
164 | const struct lib_ring_buffer_config *config, | |
165 | struct lib_ring_buffer *buf) | |
166 | { | |
167 | return v_read(config, &buf->records_count); | |
168 | } | |
169 | ||
170 | static inline | |
171 | unsigned long lib_ring_buffer_get_records_overrun( | |
172 | const struct lib_ring_buffer_config *config, | |
173 | struct lib_ring_buffer *buf) | |
174 | { | |
175 | return v_read(config, &buf->records_overrun); | |
176 | } | |
177 | ||
178 | static inline | |
179 | unsigned long lib_ring_buffer_get_records_lost_full( | |
180 | const struct lib_ring_buffer_config *config, | |
181 | struct lib_ring_buffer *buf) | |
182 | { | |
183 | return v_read(config, &buf->records_lost_full); | |
184 | } | |
185 | ||
186 | static inline | |
187 | unsigned long lib_ring_buffer_get_records_lost_wrap( | |
188 | const struct lib_ring_buffer_config *config, | |
189 | struct lib_ring_buffer *buf) | |
190 | { | |
191 | return v_read(config, &buf->records_lost_wrap); | |
192 | } | |
193 | ||
194 | static inline | |
195 | unsigned long lib_ring_buffer_get_records_lost_big( | |
196 | const struct lib_ring_buffer_config *config, | |
197 | struct lib_ring_buffer *buf) | |
198 | { | |
199 | return v_read(config, &buf->records_lost_big); | |
200 | } | |
201 | ||
202 | static inline | |
203 | unsigned long lib_ring_buffer_get_records_read( | |
204 | const struct lib_ring_buffer_config *config, | |
205 | struct lib_ring_buffer *buf) | |
206 | { | |
207 | return v_read(config, &buf->backend.records_read); | |
208 | } | |
209 | ||
210 | #endif /* _LINUX_RING_BUFFER_FRONTEND_H */ |