remove mutex_lock, mutex_unlock macros
[lttng-ust.git] / libust / channels.c
1 /*
2 * ltt/ltt-channels.c
3 *
4 * (C) Copyright 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
5 *
6 * LTTng channel management.
7 *
8 * Author:
9 * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26 #include <stdlib.h>
27 #include <ust/marker.h>
28 #include "channels.h"
29 #include "usterr.h"
30
31 /*
32 * ltt_channel_mutex may be nested inside the LTT trace mutex.
33 * ltt_channel_mutex mutex may be nested inside markers mutex.
34 */
35 static DEFINE_MUTEX(ltt_channel_mutex);
36 static LIST_HEAD(ltt_channels);
37 /*
38 * Index of next channel in array. Makes sure that as long as a trace channel is
39 * allocated, no array index will be re-used when a channel is freed and then
40 * another channel is allocated. This index is cleared and the array indexeds
41 * get reassigned when the index_kref goes back to 0, which indicates that no
42 * more trace channels are allocated.
43 */
44 static unsigned int free_index;
45 static struct kref index_kref; /* Keeps track of allocated trace channels */
46
47 int ust_channels_overwrite_by_default = 0;
48 int ust_channels_request_collection_by_default = 1;
49
50 static struct ltt_channel_setting *lookup_channel(const char *name)
51 {
52 struct ltt_channel_setting *iter;
53
54 list_for_each_entry(iter, &ltt_channels, list)
55 if (strcmp(name, iter->name) == 0)
56 return iter;
57 return NULL;
58 }
59
60 /*
61 * Must be called when channel refcount falls to 0 _and_ also when the last
62 * trace is freed. This function is responsible for compacting the channel and
63 * event IDs when no users are active.
64 *
65 * Called with lock_markers() and channels mutex held.
66 */
67 static void release_channel_setting(struct kref *kref)
68 {
69 struct ltt_channel_setting *setting = container_of(kref,
70 struct ltt_channel_setting, kref);
71 struct ltt_channel_setting *iter;
72
73 if (uatomic_read(&index_kref.refcount) == 0
74 && uatomic_read(&setting->kref.refcount) == 0) {
75 list_del(&setting->list);
76 free(setting);
77
78 free_index = 0;
79 list_for_each_entry(iter, &ltt_channels, list) {
80 iter->index = free_index++;
81 iter->free_event_id = 0;
82 }
83 /* FIXME: why not run this? */
84 //ust// markers_compact_event_ids();
85 }
86 }
87
88 /*
89 * Perform channel index compaction when the last trace channel is freed.
90 *
91 * Called with lock_markers() and channels mutex held.
92 */
93 static void release_trace_channel(struct kref *kref)
94 {
95 struct ltt_channel_setting *iter, *n;
96
97 list_for_each_entry_safe(iter, n, &ltt_channels, list)
98 release_channel_setting(&iter->kref);
99 }
100
101 /**
102 * ltt_channels_register - Register a trace channel.
103 * @name: channel name
104 *
105 * Uses refcounting.
106 */
107 int ltt_channels_register(const char *name)
108 {
109 struct ltt_channel_setting *setting;
110 int ret = 0;
111
112 pthread_mutex_lock(&ltt_channel_mutex);
113 setting = lookup_channel(name);
114 if (setting) {
115 if (uatomic_read(&setting->kref.refcount) == 0)
116 goto init_kref;
117 else {
118 kref_get(&setting->kref);
119 goto end;
120 }
121 }
122 setting = zmalloc(sizeof(*setting));
123 if (!setting) {
124 ret = -ENOMEM;
125 goto end;
126 }
127 list_add(&setting->list, &ltt_channels);
128 strncpy(setting->name, name, PATH_MAX-1);
129 setting->index = free_index++;
130 init_kref:
131 kref_init(&setting->kref);
132 end:
133 pthread_mutex_unlock(&ltt_channel_mutex);
134 return ret;
135 }
136 //ust// EXPORT_SYMBOL_GPL(ltt_channels_register);
137
138 /**
139 * ltt_channels_unregister - Unregister a trace channel.
140 * @name: channel name
141 *
142 * Must be called with markers mutex held.
143 */
144 int ltt_channels_unregister(const char *name)
145 {
146 struct ltt_channel_setting *setting;
147 int ret = 0;
148
149 pthread_mutex_lock(&ltt_channel_mutex);
150 setting = lookup_channel(name);
151 if (!setting || uatomic_read(&setting->kref.refcount) == 0) {
152 ret = -ENOENT;
153 goto end;
154 }
155 kref_put(&setting->kref, release_channel_setting);
156 end:
157 pthread_mutex_unlock(&ltt_channel_mutex);
158 return ret;
159 }
160 //ust// EXPORT_SYMBOL_GPL(ltt_channels_unregister);
161
162 /**
163 * ltt_channels_set_default - Set channel default behavior.
164 * @name: default channel name
165 * @subbuf_size: size of the subbuffers
166 * @subbuf_cnt: number of subbuffers
167 */
168 int ltt_channels_set_default(const char *name,
169 unsigned int subbuf_size,
170 unsigned int subbuf_cnt)
171 {
172 struct ltt_channel_setting *setting;
173 int ret = 0;
174
175 pthread_mutex_lock(&ltt_channel_mutex);
176 setting = lookup_channel(name);
177 if (!setting || uatomic_read(&setting->kref.refcount) == 0) {
178 ret = -ENOENT;
179 goto end;
180 }
181 setting->subbuf_size = subbuf_size;
182 setting->subbuf_cnt = subbuf_cnt;
183 end:
184 pthread_mutex_unlock(&ltt_channel_mutex);
185 return ret;
186 }
187 //ust// EXPORT_SYMBOL_GPL(ltt_channels_set_default);
188
189 /**
190 * ltt_channels_get_name_from_index - get channel name from channel index
191 * @index: channel index
192 *
193 * Allows to lookup the channel name given its index. Done to keep the name
194 * information outside of each trace channel instance.
195 */
196 const char *ltt_channels_get_name_from_index(unsigned int index)
197 {
198 struct ltt_channel_setting *iter;
199
200 list_for_each_entry(iter, &ltt_channels, list)
201 if (iter->index == index && uatomic_read(&iter->kref.refcount))
202 return iter->name;
203 return NULL;
204 }
205 //ust// EXPORT_SYMBOL_GPL(ltt_channels_get_name_from_index);
206
207 static struct ltt_channel_setting *
208 ltt_channels_get_setting_from_name(const char *name)
209 {
210 struct ltt_channel_setting *iter;
211
212 list_for_each_entry(iter, &ltt_channels, list)
213 if (!strcmp(iter->name, name)
214 && uatomic_read(&iter->kref.refcount))
215 return iter;
216 return NULL;
217 }
218
219 /**
220 * ltt_channels_get_index_from_name - get channel index from channel name
221 * @name: channel name
222 *
223 * Allows to lookup the channel index given its name. Done to keep the name
224 * information outside of each trace channel instance.
225 * Returns -1 if not found.
226 */
227 int ltt_channels_get_index_from_name(const char *name)
228 {
229 struct ltt_channel_setting *setting;
230
231 setting = ltt_channels_get_setting_from_name(name);
232 if (setting)
233 return setting->index;
234 else
235 return -1;
236 }
237 //ust// EXPORT_SYMBOL_GPL(ltt_channels_get_index_from_name);
238
239 /**
240 * ltt_channels_trace_alloc - Allocate channel structures for a trace
241 * @subbuf_size: subbuffer size. 0 uses default.
242 * @subbuf_cnt: number of subbuffers per per-cpu buffers. 0 uses default.
243 * @flags: Default channel flags
244 *
245 * Use the current channel list to allocate the channels for a trace.
246 * Called with trace lock held. Does not perform the trace buffer allocation,
247 * because we must let the user overwrite specific channel sizes.
248 */
249 struct ust_channel *ltt_channels_trace_alloc(unsigned int *nr_channels,
250 int overwrite,
251 int request_collection,
252 int active)
253 {
254 struct ust_channel *channel = NULL;
255 struct ltt_channel_setting *iter;
256
257 pthread_mutex_lock(&ltt_channel_mutex);
258 if (!free_index) {
259 WARN("ltt_channels_trace_alloc: no free_index; are there any probes connected?");
260 goto end;
261 }
262 if (!uatomic_read(&index_kref.refcount))
263 kref_init(&index_kref);
264 else
265 kref_get(&index_kref);
266 *nr_channels = free_index;
267 channel = zmalloc(sizeof(struct ust_channel) * free_index);
268 if (!channel) {
269 WARN("ltt_channel_struct: channel null after alloc");
270 goto end;
271 }
272 list_for_each_entry(iter, &ltt_channels, list) {
273 if (!uatomic_read(&iter->kref.refcount))
274 continue;
275 channel[iter->index].subbuf_size = iter->subbuf_size;
276 channel[iter->index].subbuf_cnt = iter->subbuf_cnt;
277 channel[iter->index].overwrite = overwrite;
278 channel[iter->index].request_collection = request_collection;
279 channel[iter->index].active = active;
280 channel[iter->index].channel_name = iter->name;
281 }
282 end:
283 pthread_mutex_unlock(&ltt_channel_mutex);
284 return channel;
285 }
286 //ust// EXPORT_SYMBOL_GPL(ltt_channels_trace_alloc);
287
288 /**
289 * ltt_channels_trace_free - Free one trace's channels
290 * @channels: channels to free
291 *
292 * Called with trace lock held. The actual channel buffers must be freed before
293 * this function is called.
294 */
295 void ltt_channels_trace_free(struct ust_channel *channels)
296 {
297 lock_markers();
298 pthread_mutex_lock(&ltt_channel_mutex);
299 free(channels);
300 kref_put(&index_kref, release_trace_channel);
301 pthread_mutex_unlock(&ltt_channel_mutex);
302 unlock_markers();
303 }
304 //ust// EXPORT_SYMBOL_GPL(ltt_channels_trace_free);
305
306 /**
307 * _ltt_channels_get_event_id - get next event ID for a marker
308 * @channel: channel name
309 * @name: event name
310 *
311 * Returns a unique event ID (for this channel) or < 0 on error.
312 * Must be called with channels mutex held.
313 */
314 int _ltt_channels_get_event_id(const char *channel, const char *name)
315 {
316 struct ltt_channel_setting *setting;
317 int ret;
318
319 setting = ltt_channels_get_setting_from_name(channel);
320 if (!setting) {
321 ret = -ENOENT;
322 goto end;
323 }
324 if (strcmp(channel, "metadata") == 0) {
325 if (strcmp(name, "core_marker_id") == 0)
326 ret = 0;
327 else if (strcmp(name, "core_marker_format") == 0)
328 ret = 1;
329 else if (strcmp(name, "testev") == 0)
330 ret = 2;
331 else
332 ret = -ENOENT;
333 goto end;
334 }
335 if (setting->free_event_id == EVENTS_PER_CHANNEL - 1) {
336 ret = -ENOSPC;
337 goto end;
338 }
339 ret = setting->free_event_id++;
340 end:
341 return ret;
342 }
343
344 /**
345 * ltt_channels_get_event_id - get next event ID for a marker
346 * @channel: channel name
347 * @name: event name
348 *
349 * Returns a unique event ID (for this channel) or < 0 on error.
350 */
351 int ltt_channels_get_event_id(const char *channel, const char *name)
352 {
353 int ret;
354
355 pthread_mutex_lock(&ltt_channel_mutex);
356 ret = _ltt_channels_get_event_id(channel, name);
357 pthread_mutex_unlock(&ltt_channel_mutex);
358 return ret;
359 }
360
361 //ust// MODULE_LICENSE("GPL");
362 //ust// MODULE_AUTHOR("Mathieu Desnoyers");
363 //ust// MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Channel Management");
This page took 0.04114 seconds and 4 git commands to generate.