finish_consuming_dead_subbuffer: cleanup
[lttng-ust.git] / libust / tracepoint.c
... / ...
CommitLineData
1/*
2 * Copyright (C) 2008 Mathieu Desnoyers
3 * Copyright (C) 2009 Pierre-Marc Fournier
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2.1 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18 *
19 * Ported to userspace by Pierre-Marc Fournier.
20 */
21
22#include <errno.h>
23#include <ust/tracepoint.h>
24#include <ust/core.h>
25#include <ust/kcompat/kcompat.h>
26#include "usterr.h"
27
28#define _LGPL_SOURCE
29#include <urcu-bp.h>
30#include <urcu/hlist.h>
31
32//extern struct tracepoint __start___tracepoints[] __attribute__((visibility("hidden")));
33//extern struct tracepoint __stop___tracepoints[] __attribute__((visibility("hidden")));
34
35/* Set to 1 to enable tracepoint debug output */
36static const int tracepoint_debug;
37
38/* libraries that contain tracepoints (struct tracepoint_lib) */
39static CDS_LIST_HEAD(libs);
40
41/*
42 * tracepoints_mutex nests inside module_mutex. Tracepoints mutex protects the
43 * builtin and module tracepoints and the hash table.
44 */
45static DEFINE_MUTEX(tracepoints_mutex);
46
47/*
48 * Tracepoint hash table, containing the active tracepoints.
49 * Protected by tracepoints_mutex.
50 */
51#define TRACEPOINT_HASH_BITS 6
52#define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS)
53static struct cds_hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
54
55/*
56 * Note about RCU :
57 * It is used to to delay the free of multiple probes array until a quiescent
58 * state is reached.
59 * Tracepoint entries modifications are protected by the tracepoints_mutex.
60 */
61struct tracepoint_entry {
62 struct cds_hlist_node hlist;
63 struct probe *probes;
64 int refcount; /* Number of times armed. 0 if disarmed. */
65 char name[0];
66};
67
68struct tp_probes {
69 union {
70//ust// struct rcu_head rcu;
71 struct cds_list_head list;
72 } u;
73 struct probe probes[0];
74};
75
76static inline void *allocate_probes(int count)
77{
78 struct tp_probes *p = zmalloc(count * sizeof(struct probe)
79 + sizeof(struct tp_probes));
80 return p == NULL ? NULL : p->probes;
81}
82
83//ust// static void rcu_free_old_probes(struct rcu_head *head)
84//ust// {
85//ust// kfree(container_of(head, struct tp_probes, u.rcu));
86//ust// }
87
88static inline void release_probes(void *old)
89{
90 if (old) {
91 struct tp_probes *tp_probes = _ust_container_of(old,
92 struct tp_probes, probes[0]);
93//ust// call_rcu_sched(&tp_probes->u.rcu, rcu_free_old_probes);
94 synchronize_rcu();
95 free(tp_probes);
96 }
97}
98
99static void debug_print_probes(struct tracepoint_entry *entry)
100{
101 int i;
102
103 if (!tracepoint_debug || !entry->probes)
104 return;
105
106 for (i = 0; entry->probes[i].func; i++)
107 DBG("Probe %d : %p", i, entry->probes[i].func);
108}
109
110static void *
111tracepoint_entry_add_probe(struct tracepoint_entry *entry,
112 void *probe, void *data)
113{
114 int nr_probes = 0;
115 struct probe *old, *new;
116
117 WARN_ON(!probe);
118
119 debug_print_probes(entry);
120 old = entry->probes;
121 if (old) {
122 /* (N -> N+1), (N != 0, 1) probes */
123 for (nr_probes = 0; old[nr_probes].func; nr_probes++)
124 if (old[nr_probes].func == probe &&
125 old[nr_probes].data == data)
126 return ERR_PTR(-EEXIST);
127 }
128 /* + 2 : one for new probe, one for NULL func */
129 new = allocate_probes(nr_probes + 2);
130 if (new == NULL)
131 return ERR_PTR(-ENOMEM);
132 if (old)
133 memcpy(new, old, nr_probes * sizeof(struct probe));
134 new[nr_probes].func = probe;
135 new[nr_probes].data = data;
136 new[nr_probes + 1].func = NULL;
137 entry->refcount = nr_probes + 1;
138 entry->probes = new;
139 debug_print_probes(entry);
140 return old;
141}
142
143static void *
144tracepoint_entry_remove_probe(struct tracepoint_entry *entry, void *probe,
145 void *data)
146{
147 int nr_probes = 0, nr_del = 0, i;
148 struct probe *old, *new;
149
150 old = entry->probes;
151
152 if (!old)
153 return ERR_PTR(-ENOENT);
154
155 debug_print_probes(entry);
156 /* (N -> M), (N > 1, M >= 0) probes */
157 for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
158 if (!probe ||
159 (old[nr_probes].func == probe &&
160 old[nr_probes].data == data))
161 nr_del++;
162 }
163
164 if (nr_probes - nr_del == 0) {
165 /* N -> 0, (N > 1) */
166 entry->probes = NULL;
167 entry->refcount = 0;
168 debug_print_probes(entry);
169 return old;
170 } else {
171 int j = 0;
172 /* N -> M, (N > 1, M > 0) */
173 /* + 1 for NULL */
174 new = allocate_probes(nr_probes - nr_del + 1);
175 if (new == NULL)
176 return ERR_PTR(-ENOMEM);
177 for (i = 0; old[i].func; i++)
178 if (probe &&
179 (old[i].func != probe || old[i].data != data))
180 new[j++] = old[i];
181 new[nr_probes - nr_del].func = NULL;
182 entry->refcount = nr_probes - nr_del;
183 entry->probes = new;
184 }
185 debug_print_probes(entry);
186 return old;
187}
188
189/*
190 * Get tracepoint if the tracepoint is present in the tracepoint hash table.
191 * Must be called with tracepoints_mutex held.
192 * Returns NULL if not present.
193 */
194static struct tracepoint_entry *get_tracepoint(const char *name)
195{
196 struct cds_hlist_head *head;
197 struct cds_hlist_node *node;
198 struct tracepoint_entry *e;
199 u32 hash = jhash(name, strlen(name), 0);
200
201 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
202 cds_hlist_for_each_entry(e, node, head, hlist) {
203 if (!strcmp(name, e->name))
204 return e;
205 }
206 return NULL;
207}
208
209/*
210 * Add the tracepoint to the tracepoint hash table. Must be called with
211 * tracepoints_mutex held.
212 */
213static struct tracepoint_entry *add_tracepoint(const char *name)
214{
215 struct cds_hlist_head *head;
216 struct cds_hlist_node *node;
217 struct tracepoint_entry *e;
218 size_t name_len = strlen(name) + 1;
219 u32 hash = jhash(name, name_len-1, 0);
220
221 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
222 cds_hlist_for_each_entry(e, node, head, hlist) {
223 if (!strcmp(name, e->name)) {
224 DBG("tracepoint %s busy", name);
225 return ERR_PTR(-EEXIST); /* Already there */
226 }
227 }
228 /*
229 * Using zmalloc here to allocate a variable length element. Could
230 * cause some memory fragmentation if overused.
231 */
232 e = zmalloc(sizeof(struct tracepoint_entry) + name_len);
233 if (!e)
234 return ERR_PTR(-ENOMEM);
235 memcpy(&e->name[0], name, name_len);
236 e->probes = NULL;
237 e->refcount = 0;
238 cds_hlist_add_head(&e->hlist, head);
239 return e;
240}
241
242/*
243 * Remove the tracepoint from the tracepoint hash table. Must be called with
244 * mutex_lock held.
245 */
246static inline void remove_tracepoint(struct tracepoint_entry *e)
247{
248 cds_hlist_del(&e->hlist);
249 free(e);
250}
251
252/*
253 * Sets the probe callback corresponding to one tracepoint.
254 */
255static void set_tracepoint(struct tracepoint_entry **entry,
256 struct tracepoint *elem, int active)
257{
258 WARN_ON(strcmp((*entry)->name, elem->name) != 0);
259
260 /*
261 * rcu_assign_pointer has a cmm_smp_wmb() which makes sure that the new
262 * probe callbacks array is consistent before setting a pointer to it.
263 * This array is referenced by __DO_TRACE from
264 * include/linux/tracepoints.h. A matching cmm_smp_read_barrier_depends()
265 * is used.
266 */
267 rcu_assign_pointer(elem->probes, (*entry)->probes);
268 elem->state__imv = active;
269}
270
271/*
272 * Disable a tracepoint and its probe callback.
273 * Note: only waiting an RCU period after setting elem->call to the empty
274 * function insures that the original callback is not used anymore. This insured
275 * by preempt_disable around the call site.
276 */
277static void disable_tracepoint(struct tracepoint *elem)
278{
279 elem->state__imv = 0;
280 rcu_assign_pointer(elem->probes, NULL);
281}
282
283/**
284 * tracepoint_update_probe_range - Update a probe range
285 * @begin: beginning of the range
286 * @end: end of the range
287 *
288 * Updates the probe callback corresponding to a range of tracepoints.
289 */
290void tracepoint_update_probe_range(struct tracepoint * const *begin,
291 struct tracepoint * const *end)
292{
293 struct tracepoint * const *iter;
294 struct tracepoint_entry *mark_entry;
295
296 pthread_mutex_lock(&tracepoints_mutex);
297 for (iter = begin; iter < end; iter++) {
298 if (!(*iter)->name) {
299 disable_tracepoint(*iter);
300 continue;
301 }
302 mark_entry = get_tracepoint((*iter)->name);
303 if (mark_entry) {
304 set_tracepoint(&mark_entry, *iter,
305 !!mark_entry->refcount);
306 } else {
307 disable_tracepoint(*iter);
308 }
309 }
310 pthread_mutex_unlock(&tracepoints_mutex);
311}
312
313static void lib_update_tracepoints(void)
314{
315 struct tracepoint_lib *lib;
316
317//ust// pthread_mutex_lock(&module_mutex);
318 cds_list_for_each_entry(lib, &libs, list)
319 tracepoint_update_probe_range(lib->tracepoints_start,
320 lib->tracepoints_start + lib->tracepoints_count);
321//ust// pthread_mutex_unlock(&module_mutex);
322}
323
324/*
325 * Update probes, removing the faulty probes.
326 */
327static void tracepoint_update_probes(void)
328{
329 /* Core kernel tracepoints */
330//ust// tracepoint_update_probe_range(__start___tracepoints,
331//ust// __stop___tracepoints);
332 /* tracepoints in modules. */
333 lib_update_tracepoints();
334 /* Update immediate values */
335 core_imv_update();
336//ust// module_imv_update();
337}
338
339static struct probe *
340tracepoint_add_probe(const char *name, void *probe, void *data)
341{
342 struct tracepoint_entry *entry;
343 struct probe *old;
344
345 entry = get_tracepoint(name);
346 if (!entry) {
347 entry = add_tracepoint(name);
348 if (IS_ERR(entry))
349 return (struct probe *)entry;
350 }
351 old = tracepoint_entry_add_probe(entry, probe, data);
352 if (IS_ERR(old) && !entry->refcount)
353 remove_tracepoint(entry);
354 return old;
355}
356
357/**
358 * tracepoint_probe_register - Connect a probe to a tracepoint
359 * @name: tracepoint name
360 * @probe: probe handler
361 *
362 * Returns 0 if ok, error value on error.
363 * The probe address must at least be aligned on the architecture pointer size.
364 */
365int tracepoint_probe_register(const char *name, void *probe, void *data)
366{
367 void *old;
368
369 pthread_mutex_lock(&tracepoints_mutex);
370 old = tracepoint_add_probe(name, probe, data);
371 pthread_mutex_unlock(&tracepoints_mutex);
372 if (IS_ERR(old))
373 return PTR_ERR(old);
374
375 tracepoint_update_probes(); /* may update entry */
376 release_probes(old);
377 return 0;
378}
379//ust// EXPORT_SYMBOL_GPL(tracepoint_probe_register);
380
381static void *tracepoint_remove_probe(const char *name, void *probe, void *data)
382{
383 struct tracepoint_entry *entry;
384 void *old;
385
386 entry = get_tracepoint(name);
387 if (!entry)
388 return ERR_PTR(-ENOENT);
389 old = tracepoint_entry_remove_probe(entry, probe, data);
390 if (IS_ERR(old))
391 return old;
392 if (!entry->refcount)
393 remove_tracepoint(entry);
394 return old;
395}
396
397/**
398 * tracepoint_probe_unregister - Disconnect a probe from a tracepoint
399 * @name: tracepoint name
400 * @probe: probe function pointer
401 * @probe: probe data pointer
402 *
403 * We do not need to call a synchronize_sched to make sure the probes have
404 * finished running before doing a module unload, because the module unload
405 * itself uses stop_machine(), which insures that every preempt disabled section
406 * have finished.
407 */
408int tracepoint_probe_unregister(const char *name, void *probe, void *data)
409{
410 void *old;
411
412 pthread_mutex_lock(&tracepoints_mutex);
413 old = tracepoint_remove_probe(name, probe, data);
414 pthread_mutex_unlock(&tracepoints_mutex);
415 if (IS_ERR(old))
416 return PTR_ERR(old);
417
418 tracepoint_update_probes(); /* may update entry */
419 release_probes(old);
420 return 0;
421}
422//ust// EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
423
424static CDS_LIST_HEAD(old_probes);
425static int need_update;
426
427static void tracepoint_add_old_probes(void *old)
428{
429 need_update = 1;
430 if (old) {
431 struct tp_probes *tp_probes = _ust_container_of(old,
432 struct tp_probes, probes[0]);
433 cds_list_add(&tp_probes->u.list, &old_probes);
434 }
435}
436
437/**
438 * tracepoint_probe_register_noupdate - register a probe but not connect
439 * @name: tracepoint name
440 * @probe: probe handler
441 *
442 * caller must call tracepoint_probe_update_all()
443 */
444int tracepoint_probe_register_noupdate(const char *name, void *probe,
445 void *data)
446{
447 void *old;
448
449 pthread_mutex_lock(&tracepoints_mutex);
450 old = tracepoint_add_probe(name, probe, data);
451 if (IS_ERR(old)) {
452 pthread_mutex_unlock(&tracepoints_mutex);
453 return PTR_ERR(old);
454 }
455 tracepoint_add_old_probes(old);
456 pthread_mutex_unlock(&tracepoints_mutex);
457 return 0;
458}
459//ust// EXPORT_SYMBOL_GPL(tracepoint_probe_register_noupdate);
460
461/**
462 * tracepoint_probe_unregister_noupdate - remove a probe but not disconnect
463 * @name: tracepoint name
464 * @probe: probe function pointer
465 *
466 * caller must call tracepoint_probe_update_all()
467 */
468int tracepoint_probe_unregister_noupdate(const char *name, void *probe,
469 void *data)
470{
471 void *old;
472
473 pthread_mutex_lock(&tracepoints_mutex);
474 old = tracepoint_remove_probe(name, probe, data);
475 if (IS_ERR(old)) {
476 pthread_mutex_unlock(&tracepoints_mutex);
477 return PTR_ERR(old);
478 }
479 tracepoint_add_old_probes(old);
480 pthread_mutex_unlock(&tracepoints_mutex);
481 return 0;
482}
483//ust// EXPORT_SYMBOL_GPL(tracepoint_probe_unregister_noupdate);
484
485/**
486 * tracepoint_probe_update_all - update tracepoints
487 */
488void tracepoint_probe_update_all(void)
489{
490 CDS_LIST_HEAD(release_probes);
491 struct tp_probes *pos, *next;
492
493 pthread_mutex_lock(&tracepoints_mutex);
494 if (!need_update) {
495 pthread_mutex_unlock(&tracepoints_mutex);
496 return;
497 }
498 if (!cds_list_empty(&old_probes))
499 cds_list_replace_init(&old_probes, &release_probes);
500 need_update = 0;
501 pthread_mutex_unlock(&tracepoints_mutex);
502
503 tracepoint_update_probes();
504 cds_list_for_each_entry_safe(pos, next, &release_probes, u.list) {
505 cds_list_del(&pos->u.list);
506//ust// call_rcu_sched(&pos->u.rcu, rcu_free_old_probes);
507 synchronize_rcu();
508 free(pos);
509 }
510}
511//ust// EXPORT_SYMBOL_GPL(tracepoint_probe_update_all);
512
513/*
514 * Returns 0 if current not found.
515 * Returns 1 if current found.
516 */
517int lib_get_iter_tracepoints(struct tracepoint_iter *iter)
518{
519 struct tracepoint_lib *iter_lib;
520 int found = 0;
521
522//ust// pthread_mutex_lock(&module_mutex);
523 cds_list_for_each_entry(iter_lib, &libs, list) {
524 if (iter_lib < iter->lib)
525 continue;
526 else if (iter_lib > iter->lib)
527 iter->tracepoint = NULL;
528 found = tracepoint_get_iter_range(&iter->tracepoint,
529 iter_lib->tracepoints_start,
530 iter_lib->tracepoints_start + iter_lib->tracepoints_count);
531 if (found) {
532 iter->lib = iter_lib;
533 break;
534 }
535 }
536//ust// pthread_mutex_unlock(&module_mutex);
537 return found;
538}
539
540/**
541 * tracepoint_get_iter_range - Get a next tracepoint iterator given a range.
542 * @tracepoint: current tracepoints (in), next tracepoint (out)
543 * @begin: beginning of the range
544 * @end: end of the range
545 *
546 * Returns whether a next tracepoint has been found (1) or not (0).
547 * Will return the first tracepoint in the range if the input tracepoint is
548 * NULL.
549 */
550int tracepoint_get_iter_range(struct tracepoint * const **tracepoint,
551 struct tracepoint * const *begin, struct tracepoint * const *end)
552{
553 if (!*tracepoint && begin != end) {
554 *tracepoint = begin;
555 return 1;
556 }
557 if (*tracepoint >= begin && *tracepoint < end)
558 return 1;
559 return 0;
560}
561//ust// EXPORT_SYMBOL_GPL(tracepoint_get_iter_range);
562
563static void tracepoint_get_iter(struct tracepoint_iter *iter)
564{
565 int found = 0;
566
567//ust// /* Core kernel tracepoints */
568//ust// if (!iter->module) {
569//ust// found = tracepoint_get_iter_range(&iter->tracepoint,
570//ust// __start___tracepoints, __stop___tracepoints);
571//ust// if (found)
572//ust// goto end;
573//ust// }
574 /* tracepoints in libs. */
575 found = lib_get_iter_tracepoints(iter);
576//ust// end:
577 if (!found)
578 tracepoint_iter_reset(iter);
579}
580
581void tracepoint_iter_start(struct tracepoint_iter *iter)
582{
583 tracepoint_get_iter(iter);
584}
585//ust// EXPORT_SYMBOL_GPL(tracepoint_iter_start);
586
587void tracepoint_iter_next(struct tracepoint_iter *iter)
588{
589 iter->tracepoint++;
590 /*
591 * iter->tracepoint may be invalid because we blindly incremented it.
592 * Make sure it is valid by marshalling on the tracepoints, getting the
593 * tracepoints from following modules if necessary.
594 */
595 tracepoint_get_iter(iter);
596}
597//ust// EXPORT_SYMBOL_GPL(tracepoint_iter_next);
598
599void tracepoint_iter_stop(struct tracepoint_iter *iter)
600{
601}
602//ust// EXPORT_SYMBOL_GPL(tracepoint_iter_stop);
603
604void tracepoint_iter_reset(struct tracepoint_iter *iter)
605{
606//ust// iter->module = NULL;
607 iter->tracepoint = NULL;
608}
609//ust// EXPORT_SYMBOL_GPL(tracepoint_iter_reset);
610
611//ust// #ifdef CONFIG_MODULES
612
613//ust// int tracepoint_module_notify(struct notifier_block *self,
614//ust// unsigned long val, void *data)
615//ust// {
616//ust// struct module *mod = data;
617//ust//
618//ust// switch (val) {
619//ust// case MODULE_STATE_COMING:
620//ust// tracepoint_update_probe_range(mod->tracepoints,
621//ust// mod->tracepoints + mod->num_tracepoints);
622//ust// break;
623//ust// case MODULE_STATE_GOING:
624//ust// tracepoint_update_probe_range(mod->tracepoints,
625//ust// mod->tracepoints + mod->num_tracepoints);
626//ust// break;
627//ust// }
628//ust// return 0;
629//ust// }
630
631//ust// struct notifier_block tracepoint_module_nb = {
632//ust// .notifier_call = tracepoint_module_notify,
633//ust// .priority = 0,
634//ust// };
635
636//ust// static int init_tracepoints(void)
637//ust// {
638//ust// return register_module_notifier(&tracepoint_module_nb);
639//ust// }
640//ust// __initcall(init_tracepoints);
641
642//ust// #endif /* CONFIG_MODULES */
643
644static void (*new_tracepoint_cb)(struct tracepoint *) = NULL;
645
646void tracepoint_set_new_tracepoint_cb(void (*cb)(struct tracepoint *))
647{
648 new_tracepoint_cb = cb;
649}
650
651static void new_tracepoints(struct tracepoint * const *start, struct tracepoint * const *end)
652{
653 if (new_tracepoint_cb) {
654 struct tracepoint * const *t;
655 for(t=start; t < end; t++) {
656 new_tracepoint_cb(*t);
657 }
658 }
659}
660
661int tracepoint_register_lib(struct tracepoint * const *tracepoints_start, int tracepoints_count)
662{
663 struct tracepoint_lib *pl, *iter;
664
665 pl = (struct tracepoint_lib *) zmalloc(sizeof(struct tracepoint_lib));
666
667 pl->tracepoints_start = tracepoints_start;
668 pl->tracepoints_count = tracepoints_count;
669
670 /* FIXME: maybe protect this with its own mutex? */
671 pthread_mutex_lock(&tracepoints_mutex);
672 /*
673 * We sort the libs by struct lib pointer address.
674 */
675 cds_list_for_each_entry_reverse(iter, &libs, list) {
676 BUG_ON(iter == pl); /* Should never be in the list twice */
677 if (iter < pl) {
678 /* We belong to the location right after iter. */
679 cds_list_add(&pl->list, &iter->list);
680 goto lib_added;
681 }
682 }
683 /* We should be added at the head of the list */
684 cds_list_add(&pl->list, &libs);
685lib_added:
686 pthread_mutex_unlock(&tracepoints_mutex);
687
688 new_tracepoints(tracepoints_start, tracepoints_start + tracepoints_count);
689
690 /* FIXME: update just the loaded lib */
691 lib_update_tracepoints();
692
693 DBG("just registered a tracepoints section from %p and having %d tracepoints", tracepoints_start, tracepoints_count);
694
695 return 0;
696}
697
698int tracepoint_unregister_lib(struct tracepoint * const *tracepoints_start)
699{
700 struct tracepoint_lib *lib;
701
702 pthread_mutex_lock(&tracepoints_mutex);
703
704 cds_list_for_each_entry(lib, &libs, list) {
705 if (lib->tracepoints_start == tracepoints_start) {
706 struct tracepoint_lib *lib2free = lib;
707 cds_list_del(&lib->list);
708 free(lib2free);
709 break;
710 }
711 }
712
713 pthread_mutex_unlock(&tracepoints_mutex);
714
715 return 0;
716}
This page took 0.027363 seconds and 4 git commands to generate.