ust: continue work
[lttng-ust.git] / libtracing / channels.c
1 /*
2 * ltt/ltt-channels.c
3 *
4 * (C) Copyright 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
5 *
6 * LTTng channel management.
7 *
8 * Author:
9 * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
10 */
11
12 //ust// #include <linux/module.h>
13 //ust// #include <linux/ltt-channels.h>
14 //ust// #include <linux/mutex.h>
15 //ust// #include <linux/vmalloc.h>
16
17 #include "kernelcompat.h"
18 #include "channels.h"
19 #include "usterr.h"
20
21 /*
22 * ltt_channel_mutex may be nested inside the LTT trace mutex.
23 * ltt_channel_mutex mutex may be nested inside markers mutex.
24 */
25 static DEFINE_MUTEX(ltt_channel_mutex);
26 static LIST_HEAD(ltt_channels);
27 /*
28 * Index of next channel in array. Makes sure that as long as a trace channel is
29 * allocated, no array index will be re-used when a channel is freed and then
30 * another channel is allocated. This index is cleared and the array indexeds
31 * get reassigned when the index_kref goes back to 0, which indicates that no
32 * more trace channels are allocated.
33 */
34 static unsigned int free_index;
35 static struct kref index_kref; /* Keeps track of allocated trace channels */
36
37 static struct ltt_channel_setting *lookup_channel(const char *name)
38 {
39 struct ltt_channel_setting *iter;
40
41 list_for_each_entry(iter, &ltt_channels, list)
42 if (strcmp(name, iter->name) == 0)
43 return iter;
44 return NULL;
45 }
46
47 /*
48 * Must be called when channel refcount falls to 0 _and_ also when the last
49 * trace is freed. This function is responsible for compacting the channel and
50 * event IDs when no users are active.
51 *
52 * Called with lock_markers() and channels mutex held.
53 */
54 static void release_channel_setting(struct kref *kref)
55 {
56 struct ltt_channel_setting *setting = container_of(kref,
57 struct ltt_channel_setting, kref);
58 struct ltt_channel_setting *iter;
59
60 if (atomic_read(&index_kref.refcount) == 0
61 && atomic_read(&setting->kref.refcount) == 0) {
62 list_del(&setting->list);
63 kfree(setting);
64
65 free_index = 0;
66 list_for_each_entry(iter, &ltt_channels, list) {
67 iter->index = free_index++;
68 iter->free_event_id = 0;
69 }
70 //ust// markers_compact_event_ids();
71 }
72 }
73
74 /*
75 * Perform channel index compaction when the last trace channel is freed.
76 *
77 * Called with lock_markers() and channels mutex held.
78 */
79 static void release_trace_channel(struct kref *kref)
80 {
81 struct ltt_channel_setting *iter, *n;
82
83 list_for_each_entry_safe(iter, n, &ltt_channels, list)
84 release_channel_setting(&iter->kref);
85 }
86
87 /**
88 * ltt_channels_register - Register a trace channel.
89 * @name: channel name
90 *
91 * Uses refcounting.
92 */
93 int ltt_channels_register(const char *name)
94 {
95 struct ltt_channel_setting *setting;
96 int ret = 0;
97
98 mutex_lock(&ltt_channel_mutex);
99 setting = lookup_channel(name);
100 if (setting) {
101 if (atomic_read(&setting->kref.refcount) == 0)
102 goto init_kref;
103 else {
104 kref_get(&setting->kref);
105 goto end;
106 }
107 }
108 setting = kzalloc(sizeof(*setting), GFP_KERNEL);
109 if (!setting) {
110 ret = -ENOMEM;
111 goto end;
112 }
113 list_add(&setting->list, &ltt_channels);
114 strncpy(setting->name, name, PATH_MAX-1);
115 setting->index = free_index++;
116 init_kref:
117 kref_init(&setting->kref);
118 end:
119 mutex_unlock(&ltt_channel_mutex);
120 return ret;
121 }
122 //ust// EXPORT_SYMBOL_GPL(ltt_channels_register);
123
124 /**
125 * ltt_channels_unregister - Unregister a trace channel.
126 * @name: channel name
127 *
128 * Must be called with markers mutex held.
129 */
130 int ltt_channels_unregister(const char *name)
131 {
132 struct ltt_channel_setting *setting;
133 int ret = 0;
134
135 mutex_lock(&ltt_channel_mutex);
136 setting = lookup_channel(name);
137 if (!setting || atomic_read(&setting->kref.refcount) == 0) {
138 ret = -ENOENT;
139 goto end;
140 }
141 kref_put(&setting->kref, release_channel_setting);
142 end:
143 mutex_unlock(&ltt_channel_mutex);
144 return ret;
145 }
146 //ust// EXPORT_SYMBOL_GPL(ltt_channels_unregister);
147
148 /**
149 * ltt_channels_set_default - Set channel default behavior.
150 * @name: default channel name
151 * @subbuf_size: size of the subbuffers
152 * @subbuf_cnt: number of subbuffers
153 */
154 int ltt_channels_set_default(const char *name,
155 unsigned int subbuf_size,
156 unsigned int subbuf_cnt)
157 {
158 struct ltt_channel_setting *setting;
159 int ret = 0;
160
161 mutex_lock(&ltt_channel_mutex);
162 setting = lookup_channel(name);
163 if (!setting || atomic_read(&setting->kref.refcount) == 0) {
164 ret = -ENOENT;
165 goto end;
166 }
167 setting->subbuf_size = subbuf_size;
168 setting->subbuf_cnt = subbuf_cnt;
169 end:
170 mutex_unlock(&ltt_channel_mutex);
171 return ret;
172 }
173 //ust// EXPORT_SYMBOL_GPL(ltt_channels_set_default);
174
175 /**
176 * ltt_channels_get_name_from_index - get channel name from channel index
177 * @index: channel index
178 *
179 * Allows to lookup the channel name given its index. Done to keep the name
180 * information outside of each trace channel instance.
181 */
182 const char *ltt_channels_get_name_from_index(unsigned int index)
183 {
184 struct ltt_channel_setting *iter;
185
186 list_for_each_entry(iter, &ltt_channels, list)
187 if (iter->index == index && atomic_read(&iter->kref.refcount))
188 return iter->name;
189 return NULL;
190 }
191 //ust// EXPORT_SYMBOL_GPL(ltt_channels_get_name_from_index);
192
193 static struct ltt_channel_setting *
194 ltt_channels_get_setting_from_name(const char *name)
195 {
196 struct ltt_channel_setting *iter;
197
198 list_for_each_entry(iter, &ltt_channels, list)
199 if (!strcmp(iter->name, name)
200 && atomic_read(&iter->kref.refcount))
201 return iter;
202 return NULL;
203 }
204
205 /**
206 * ltt_channels_get_index_from_name - get channel index from channel name
207 * @name: channel name
208 *
209 * Allows to lookup the channel index given its name. Done to keep the name
210 * information outside of each trace channel instance.
211 * Returns -1 if not found.
212 */
213 int ltt_channels_get_index_from_name(const char *name)
214 {
215 struct ltt_channel_setting *setting;
216
217 setting = ltt_channels_get_setting_from_name(name);
218 if (setting)
219 return setting->index;
220 else
221 return -1;
222 }
223 //ust// EXPORT_SYMBOL_GPL(ltt_channels_get_index_from_name);
224
225 /**
226 * ltt_channels_trace_alloc - Allocate channel structures for a trace
227 * @subbuf_size: subbuffer size. 0 uses default.
228 * @subbuf_cnt: number of subbuffers per per-cpu buffers. 0 uses default.
229 * @flags: Default channel flags
230 *
231 * Use the current channel list to allocate the channels for a trace.
232 * Called with trace lock held. Does not perform the trace buffer allocation,
233 * because we must let the user overwrite specific channel sizes.
234 */
235 struct ltt_channel_struct *ltt_channels_trace_alloc(unsigned int *nr_channels,
236 int overwrite,
237 int active)
238 {
239 struct ltt_channel_struct *channel = NULL;
240 struct ltt_channel_setting *iter;
241
242 mutex_lock(&ltt_channel_mutex);
243 if (!free_index) {
244 WARN("ltt_channels_trace_alloc: no free_index; are there any probes connected?");
245 goto end;
246 }
247 if (!atomic_read(&index_kref.refcount))
248 kref_init(&index_kref);
249 else
250 kref_get(&index_kref);
251 *nr_channels = free_index;
252 channel = kzalloc(sizeof(struct ltt_channel_struct) * free_index,
253 GFP_KERNEL);
254 if (!channel) {
255 WARN("ltt_channel_struct: channel null after alloc");
256 goto end;
257 }
258 list_for_each_entry(iter, &ltt_channels, list) {
259 if (!atomic_read(&iter->kref.refcount))
260 continue;
261 channel[iter->index].subbuf_size = iter->subbuf_size;
262 channel[iter->index].subbuf_cnt = iter->subbuf_cnt;
263 channel[iter->index].overwrite = overwrite;
264 channel[iter->index].active = active;
265 channel[iter->index].channel_name = iter->name;
266 }
267 end:
268 mutex_unlock(&ltt_channel_mutex);
269 return channel;
270 }
271 //ust// EXPORT_SYMBOL_GPL(ltt_channels_trace_alloc);
272
273 /**
274 * ltt_channels_trace_free - Free one trace's channels
275 * @channels: channels to free
276 *
277 * Called with trace lock held. The actual channel buffers must be freed before
278 * this function is called.
279 */
280 void ltt_channels_trace_free(struct ltt_channel_struct *channels)
281 {
282 lock_markers();
283 mutex_lock(&ltt_channel_mutex);
284 kfree(channels);
285 kref_put(&index_kref, release_trace_channel);
286 mutex_unlock(&ltt_channel_mutex);
287 unlock_markers();
288 }
289 //ust// EXPORT_SYMBOL_GPL(ltt_channels_trace_free);
290
291 /**
292 * _ltt_channels_get_event_id - get next event ID for a marker
293 * @channel: channel name
294 * @name: event name
295 *
296 * Returns a unique event ID (for this channel) or < 0 on error.
297 * Must be called with channels mutex held.
298 */
299 int _ltt_channels_get_event_id(const char *channel, const char *name)
300 {
301 struct ltt_channel_setting *setting;
302 int ret;
303
304 setting = ltt_channels_get_setting_from_name(channel);
305 if (!setting) {
306 ret = -ENOENT;
307 goto end;
308 }
309 if (strcmp(channel, "metadata") == 0) {
310 if (strcmp(name, "core_marker_id") == 0)
311 ret = 0;
312 else if (strcmp(name, "core_marker_format") == 0)
313 ret = 1;
314 else
315 ret = -ENOENT;
316 goto end;
317 }
318 if (setting->free_event_id == EVENTS_PER_CHANNEL - 1) {
319 ret = -ENOSPC;
320 goto end;
321 }
322 ret = setting->free_event_id++;
323 end:
324 return ret;
325 }
326
327 /**
328 * ltt_channels_get_event_id - get next event ID for a marker
329 * @channel: channel name
330 * @name: event name
331 *
332 * Returns a unique event ID (for this channel) or < 0 on error.
333 */
334 int ltt_channels_get_event_id(const char *channel, const char *name)
335 {
336 int ret;
337
338 mutex_lock(&ltt_channel_mutex);
339 ret = _ltt_channels_get_event_id(channel, name);
340 mutex_unlock(&ltt_channel_mutex);
341 return ret;
342 }
343
344 //ust// MODULE_LICENSE("GPL");
345 //ust// MODULE_AUTHOR("Mathieu Desnoyers");
346 //ust// MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Channel Management");
This page took 0.040772 seconds and 4 git commands to generate.