move processor.h to include/ust dir
[ust.git] / libust / channels.c
1 /*
2 * ltt/ltt-channels.c
3 *
4 * (C) Copyright 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
5 *
6 * LTTng channel management.
7 *
8 * Author:
9 * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26 //ust// #include <linux/module.h>
27 //ust// #include <linux/ltt-channels.h>
28 //ust// #include <linux/mutex.h>
29 //ust// #include <linux/vmalloc.h>
30
31 #include <ust/kernelcompat.h>
32 #include "channels.h"
33 #include "usterr.h"
34 #include <ust/marker.h>
35
36 /*
37 * ltt_channel_mutex may be nested inside the LTT trace mutex.
38 * ltt_channel_mutex mutex may be nested inside markers mutex.
39 */
40 static DEFINE_MUTEX(ltt_channel_mutex);
41 static LIST_HEAD(ltt_channels);
42 /*
43 * Index of next channel in array. Makes sure that as long as a trace channel is
44 * allocated, no array index will be re-used when a channel is freed and then
45 * another channel is allocated. This index is cleared and the array indexeds
46 * get reassigned when the index_kref goes back to 0, which indicates that no
47 * more trace channels are allocated.
48 */
49 static unsigned int free_index;
50 static struct kref index_kref; /* Keeps track of allocated trace channels */
51
52 static struct ltt_channel_setting *lookup_channel(const char *name)
53 {
54 struct ltt_channel_setting *iter;
55
56 list_for_each_entry(iter, &ltt_channels, list)
57 if (strcmp(name, iter->name) == 0)
58 return iter;
59 return NULL;
60 }
61
62 /*
63 * Must be called when channel refcount falls to 0 _and_ also when the last
64 * trace is freed. This function is responsible for compacting the channel and
65 * event IDs when no users are active.
66 *
67 * Called with lock_markers() and channels mutex held.
68 */
69 static void release_channel_setting(struct kref *kref)
70 {
71 struct ltt_channel_setting *setting = container_of(kref,
72 struct ltt_channel_setting, kref);
73 struct ltt_channel_setting *iter;
74
75 if (atomic_read(&index_kref.refcount) == 0
76 && atomic_read(&setting->kref.refcount) == 0) {
77 list_del(&setting->list);
78 kfree(setting);
79
80 free_index = 0;
81 list_for_each_entry(iter, &ltt_channels, list) {
82 iter->index = free_index++;
83 iter->free_event_id = 0;
84 }
85 //ust// markers_compact_event_ids();
86 }
87 }
88
89 /*
90 * Perform channel index compaction when the last trace channel is freed.
91 *
92 * Called with lock_markers() and channels mutex held.
93 */
94 static void release_trace_channel(struct kref *kref)
95 {
96 struct ltt_channel_setting *iter, *n;
97
98 list_for_each_entry_safe(iter, n, &ltt_channels, list)
99 release_channel_setting(&iter->kref);
100 }
101
102 /**
103 * ltt_channels_register - Register a trace channel.
104 * @name: channel name
105 *
106 * Uses refcounting.
107 */
108 int ltt_channels_register(const char *name)
109 {
110 struct ltt_channel_setting *setting;
111 int ret = 0;
112
113 mutex_lock(&ltt_channel_mutex);
114 setting = lookup_channel(name);
115 if (setting) {
116 if (atomic_read(&setting->kref.refcount) == 0)
117 goto init_kref;
118 else {
119 kref_get(&setting->kref);
120 goto end;
121 }
122 }
123 setting = kzalloc(sizeof(*setting), GFP_KERNEL);
124 if (!setting) {
125 ret = -ENOMEM;
126 goto end;
127 }
128 list_add(&setting->list, &ltt_channels);
129 strncpy(setting->name, name, PATH_MAX-1);
130 setting->index = free_index++;
131 init_kref:
132 kref_init(&setting->kref);
133 end:
134 mutex_unlock(&ltt_channel_mutex);
135 return ret;
136 }
137 //ust// EXPORT_SYMBOL_GPL(ltt_channels_register);
138
139 /**
140 * ltt_channels_unregister - Unregister a trace channel.
141 * @name: channel name
142 *
143 * Must be called with markers mutex held.
144 */
145 int ltt_channels_unregister(const char *name)
146 {
147 struct ltt_channel_setting *setting;
148 int ret = 0;
149
150 mutex_lock(&ltt_channel_mutex);
151 setting = lookup_channel(name);
152 if (!setting || atomic_read(&setting->kref.refcount) == 0) {
153 ret = -ENOENT;
154 goto end;
155 }
156 kref_put(&setting->kref, release_channel_setting);
157 end:
158 mutex_unlock(&ltt_channel_mutex);
159 return ret;
160 }
161 //ust// EXPORT_SYMBOL_GPL(ltt_channels_unregister);
162
163 /**
164 * ltt_channels_set_default - Set channel default behavior.
165 * @name: default channel name
166 * @subbuf_size: size of the subbuffers
167 * @subbuf_cnt: number of subbuffers
168 */
169 int ltt_channels_set_default(const char *name,
170 unsigned int subbuf_size,
171 unsigned int subbuf_cnt)
172 {
173 struct ltt_channel_setting *setting;
174 int ret = 0;
175
176 mutex_lock(&ltt_channel_mutex);
177 setting = lookup_channel(name);
178 if (!setting || atomic_read(&setting->kref.refcount) == 0) {
179 ret = -ENOENT;
180 goto end;
181 }
182 setting->subbuf_size = subbuf_size;
183 setting->subbuf_cnt = subbuf_cnt;
184 end:
185 mutex_unlock(&ltt_channel_mutex);
186 return ret;
187 }
188 //ust// EXPORT_SYMBOL_GPL(ltt_channels_set_default);
189
190 /**
191 * ltt_channels_get_name_from_index - get channel name from channel index
192 * @index: channel index
193 *
194 * Allows to lookup the channel name given its index. Done to keep the name
195 * information outside of each trace channel instance.
196 */
197 const char *ltt_channels_get_name_from_index(unsigned int index)
198 {
199 struct ltt_channel_setting *iter;
200
201 list_for_each_entry(iter, &ltt_channels, list)
202 if (iter->index == index && atomic_read(&iter->kref.refcount))
203 return iter->name;
204 return NULL;
205 }
206 //ust// EXPORT_SYMBOL_GPL(ltt_channels_get_name_from_index);
207
208 static struct ltt_channel_setting *
209 ltt_channels_get_setting_from_name(const char *name)
210 {
211 struct ltt_channel_setting *iter;
212
213 list_for_each_entry(iter, &ltt_channels, list)
214 if (!strcmp(iter->name, name)
215 && atomic_read(&iter->kref.refcount))
216 return iter;
217 return NULL;
218 }
219
220 /**
221 * ltt_channels_get_index_from_name - get channel index from channel name
222 * @name: channel name
223 *
224 * Allows to lookup the channel index given its name. Done to keep the name
225 * information outside of each trace channel instance.
226 * Returns -1 if not found.
227 */
228 int ltt_channels_get_index_from_name(const char *name)
229 {
230 struct ltt_channel_setting *setting;
231
232 setting = ltt_channels_get_setting_from_name(name);
233 if (setting)
234 return setting->index;
235 else
236 return -1;
237 }
238 //ust// EXPORT_SYMBOL_GPL(ltt_channels_get_index_from_name);
239
240 /**
241 * ltt_channels_trace_alloc - Allocate channel structures for a trace
242 * @subbuf_size: subbuffer size. 0 uses default.
243 * @subbuf_cnt: number of subbuffers per per-cpu buffers. 0 uses default.
244 * @flags: Default channel flags
245 *
246 * Use the current channel list to allocate the channels for a trace.
247 * Called with trace lock held. Does not perform the trace buffer allocation,
248 * because we must let the user overwrite specific channel sizes.
249 */
250 struct ltt_channel_struct *ltt_channels_trace_alloc(unsigned int *nr_channels,
251 int overwrite,
252 int active)
253 {
254 struct ltt_channel_struct *channel = NULL;
255 struct ltt_channel_setting *iter;
256
257 mutex_lock(&ltt_channel_mutex);
258 if (!free_index) {
259 WARN("ltt_channels_trace_alloc: no free_index; are there any probes connected?");
260 goto end;
261 }
262 if (!atomic_read(&index_kref.refcount))
263 kref_init(&index_kref);
264 else
265 kref_get(&index_kref);
266 *nr_channels = free_index;
267 channel = kzalloc(sizeof(struct ltt_channel_struct) * free_index,
268 GFP_KERNEL);
269 if (!channel) {
270 WARN("ltt_channel_struct: channel null after alloc");
271 goto end;
272 }
273 list_for_each_entry(iter, &ltt_channels, list) {
274 if (!atomic_read(&iter->kref.refcount))
275 continue;
276 channel[iter->index].subbuf_size = iter->subbuf_size;
277 channel[iter->index].subbuf_cnt = iter->subbuf_cnt;
278 channel[iter->index].overwrite = overwrite;
279 channel[iter->index].active = active;
280 channel[iter->index].channel_name = iter->name;
281 }
282 end:
283 mutex_unlock(&ltt_channel_mutex);
284 return channel;
285 }
286 //ust// EXPORT_SYMBOL_GPL(ltt_channels_trace_alloc);
287
288 /**
289 * ltt_channels_trace_free - Free one trace's channels
290 * @channels: channels to free
291 *
292 * Called with trace lock held. The actual channel buffers must be freed before
293 * this function is called.
294 */
295 void ltt_channels_trace_free(struct ltt_channel_struct *channels)
296 {
297 lock_markers();
298 mutex_lock(&ltt_channel_mutex);
299 kfree(channels);
300 kref_put(&index_kref, release_trace_channel);
301 mutex_unlock(&ltt_channel_mutex);
302 unlock_markers();
303 }
304 //ust// EXPORT_SYMBOL_GPL(ltt_channels_trace_free);
305
306 /**
307 * _ltt_channels_get_event_id - get next event ID for a marker
308 * @channel: channel name
309 * @name: event name
310 *
311 * Returns a unique event ID (for this channel) or < 0 on error.
312 * Must be called with channels mutex held.
313 */
314 int _ltt_channels_get_event_id(const char *channel, const char *name)
315 {
316 struct ltt_channel_setting *setting;
317 int ret;
318
319 setting = ltt_channels_get_setting_from_name(channel);
320 if (!setting) {
321 ret = -ENOENT;
322 goto end;
323 }
324 if (strcmp(channel, "metadata") == 0) {
325 if (strcmp(name, "core_marker_id") == 0)
326 ret = 0;
327 else if (strcmp(name, "core_marker_format") == 0)
328 ret = 1;
329 else if (strcmp(name, "testev") == 0)
330 ret = 2;
331 else
332 ret = -ENOENT;
333 goto end;
334 }
335 if (setting->free_event_id == EVENTS_PER_CHANNEL - 1) {
336 ret = -ENOSPC;
337 goto end;
338 }
339 ret = setting->free_event_id++;
340 end:
341 return ret;
342 }
343
344 /**
345 * ltt_channels_get_event_id - get next event ID for a marker
346 * @channel: channel name
347 * @name: event name
348 *
349 * Returns a unique event ID (for this channel) or < 0 on error.
350 */
351 int ltt_channels_get_event_id(const char *channel, const char *name)
352 {
353 int ret;
354
355 mutex_lock(&ltt_channel_mutex);
356 ret = _ltt_channels_get_event_id(channel, name);
357 mutex_unlock(&ltt_channel_mutex);
358 return ret;
359 }
360
361 //ust// MODULE_LICENSE("GPL");
362 //ust// MODULE_AUTHOR("Mathieu Desnoyers");
363 //ust// MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Channel Management");
This page took 0.037184 seconds and 4 git commands to generate.