Implement event notifier error counter
[lttng-modules.git] / src / lttng-events.c
... / ...
CommitLineData
1/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-events.c
4 *
5 * Holds LTTng per-session event registry.
6 *
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10/*
11 * This page_alloc.h wrapper needs to be included before gfpflags.h because it
12 * overrides a function with a define.
13 */
14#include "wrapper/page_alloc.h"
15
16#include <linux/module.h>
17#include <linux/mutex.h>
18#include <linux/sched.h>
19#include <linux/slab.h>
20#include <linux/jiffies.h>
21#include <linux/utsname.h>
22#include <linux/err.h>
23#include <linux/seq_file.h>
24#include <linux/file.h>
25#include <linux/anon_inodes.h>
26#include <wrapper/file.h>
27#include <linux/uaccess.h>
28#include <linux/vmalloc.h>
29#include <linux/dmi.h>
30
31#include <wrapper/uuid.h>
32#include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
33#include <wrapper/random.h>
34#include <wrapper/tracepoint.h>
35#include <wrapper/list.h>
36#include <wrapper/types.h>
37#include <lttng/kernel-version.h>
38#include <lttng/events.h>
39#include <lttng/lttng-bytecode.h>
40#include <lttng/tracer.h>
41#include <lttng/event-notifier-notification.h>
42#include <lttng/abi-old.h>
43#include <lttng/endian.h>
44#include <lttng/string-utils.h>
45#include <lttng/utils.h>
46#include <ringbuffer/backend.h>
47#include <ringbuffer/frontend.h>
48#include <wrapper/time.h>
49
50#define METADATA_CACHE_DEFAULT_SIZE 4096
51
52static LIST_HEAD(sessions);
53static LIST_HEAD(event_notifier_groups);
54static LIST_HEAD(lttng_transport_list);
55static LIST_HEAD(lttng_counter_transport_list);
56/*
57 * Protect the sessions and metadata caches.
58 */
59static DEFINE_MUTEX(sessions_mutex);
60static struct kmem_cache *event_cache;
61static struct kmem_cache *event_notifier_cache;
62
63static void lttng_session_lazy_sync_event_enablers(struct lttng_session *session);
64static void lttng_session_sync_event_enablers(struct lttng_session *session);
65static void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler);
66static void lttng_event_notifier_enabler_destroy(struct lttng_event_notifier_enabler *event_notifier_enabler);
67static void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group);
68
69static void _lttng_event_destroy(struct lttng_event *event);
70static void _lttng_event_notifier_destroy(struct lttng_event_notifier *event_notifier);
71static void _lttng_channel_destroy(struct lttng_channel *chan);
72static int _lttng_event_unregister(struct lttng_event *event);
73static int _lttng_event_notifier_unregister(struct lttng_event_notifier *event_notifier);
74static
75int _lttng_event_metadata_statedump(struct lttng_session *session,
76 struct lttng_channel *chan,
77 struct lttng_event *event);
78static
79int _lttng_session_metadata_statedump(struct lttng_session *session);
80static
81void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream);
82static
83int _lttng_type_statedump(struct lttng_session *session,
84 const struct lttng_type *type,
85 size_t nesting);
86static
87int _lttng_field_statedump(struct lttng_session *session,
88 const struct lttng_event_field *field,
89 size_t nesting);
90
91void synchronize_trace(void)
92{
93#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0))
94 synchronize_rcu();
95#else
96 synchronize_sched();
97#endif
98
99#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
100#ifdef CONFIG_PREEMPT_RT_FULL
101 synchronize_rcu();
102#endif
103#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
104#ifdef CONFIG_PREEMPT_RT
105 synchronize_rcu();
106#endif
107#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
108}
109
110void lttng_lock_sessions(void)
111{
112 mutex_lock(&sessions_mutex);
113}
114
115void lttng_unlock_sessions(void)
116{
117 mutex_unlock(&sessions_mutex);
118}
119
120static struct lttng_transport *lttng_transport_find(const char *name)
121{
122 struct lttng_transport *transport;
123
124 list_for_each_entry(transport, &lttng_transport_list, node) {
125 if (!strcmp(transport->name, name))
126 return transport;
127 }
128 return NULL;
129}
130
131/*
132 * Called with sessions lock held.
133 */
134int lttng_session_active(void)
135{
136 struct lttng_session *iter;
137
138 list_for_each_entry(iter, &sessions, list) {
139 if (iter->active)
140 return 1;
141 }
142 return 0;
143}
144
145struct lttng_session *lttng_session_create(void)
146{
147 struct lttng_session *session;
148 struct lttng_metadata_cache *metadata_cache;
149 int i;
150
151 mutex_lock(&sessions_mutex);
152 session = lttng_kvzalloc(sizeof(struct lttng_session), GFP_KERNEL);
153 if (!session)
154 goto err;
155 INIT_LIST_HEAD(&session->chan);
156 INIT_LIST_HEAD(&session->events);
157 lttng_guid_gen(&session->uuid);
158
159 metadata_cache = kzalloc(sizeof(struct lttng_metadata_cache),
160 GFP_KERNEL);
161 if (!metadata_cache)
162 goto err_free_session;
163 metadata_cache->data = vzalloc(METADATA_CACHE_DEFAULT_SIZE);
164 if (!metadata_cache->data)
165 goto err_free_cache;
166 metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
167 kref_init(&metadata_cache->refcount);
168 mutex_init(&metadata_cache->lock);
169 session->metadata_cache = metadata_cache;
170 INIT_LIST_HEAD(&metadata_cache->metadata_stream);
171 memcpy(&metadata_cache->uuid, &session->uuid,
172 sizeof(metadata_cache->uuid));
173 INIT_LIST_HEAD(&session->enablers_head);
174 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
175 INIT_HLIST_HEAD(&session->events_ht.table[i]);
176 list_add(&session->list, &sessions);
177 session->pid_tracker.session = session;
178 session->pid_tracker.tracker_type = TRACKER_PID;
179 session->vpid_tracker.session = session;
180 session->vpid_tracker.tracker_type = TRACKER_VPID;
181 session->uid_tracker.session = session;
182 session->uid_tracker.tracker_type = TRACKER_UID;
183 session->vuid_tracker.session = session;
184 session->vuid_tracker.tracker_type = TRACKER_VUID;
185 session->gid_tracker.session = session;
186 session->gid_tracker.tracker_type = TRACKER_GID;
187 session->vgid_tracker.session = session;
188 session->vgid_tracker.tracker_type = TRACKER_VGID;
189 mutex_unlock(&sessions_mutex);
190 return session;
191
192err_free_cache:
193 kfree(metadata_cache);
194err_free_session:
195 lttng_kvfree(session);
196err:
197 mutex_unlock(&sessions_mutex);
198 return NULL;
199}
200
201static
202struct lttng_counter_transport *lttng_counter_transport_find(const char *name)
203{
204 struct lttng_counter_transport *transport;
205
206 list_for_each_entry(transport, &lttng_counter_transport_list, node) {
207 if (!strcmp(transport->name, name))
208 return transport;
209 }
210 return NULL;
211}
212
213struct lttng_counter *lttng_kernel_counter_create(
214 const char *counter_transport_name,
215 size_t number_dimensions, const size_t *dimensions_sizes)
216{
217 struct lttng_counter *counter = NULL;
218 struct lttng_counter_transport *counter_transport = NULL;
219
220 counter_transport = lttng_counter_transport_find(counter_transport_name);
221 if (!counter_transport) {
222 printk(KERN_WARNING "LTTng: counter transport %s not found.\n",
223 counter_transport_name);
224 goto notransport;
225 }
226 if (!try_module_get(counter_transport->owner)) {
227 printk(KERN_WARNING "LTTng: Can't lock counter transport module.\n");
228 goto notransport;
229 }
230
231 counter = lttng_kvzalloc(sizeof(struct lttng_counter), GFP_KERNEL);
232 if (!counter)
233 goto nomem;
234
235 /* Create event notifier error counter. */
236 counter->ops = &counter_transport->ops;
237 counter->transport = counter_transport;
238
239 counter->counter = counter->ops->counter_create(
240 number_dimensions, dimensions_sizes, 0);
241 if (!counter->counter) {
242 goto create_error;
243 }
244
245 return counter;
246
247create_error:
248 lttng_kvfree(counter);
249nomem:
250 if (counter_transport)
251 module_put(counter_transport->owner);
252notransport:
253 return NULL;
254}
255
256struct lttng_event_notifier_group *lttng_event_notifier_group_create(void)
257{
258 struct lttng_transport *transport = NULL;
259 struct lttng_event_notifier_group *event_notifier_group;
260 const char *transport_name = "relay-event-notifier";
261 size_t subbuf_size = 4096; //TODO
262 size_t num_subbuf = 16; //TODO
263 unsigned int switch_timer_interval = 0;
264 unsigned int read_timer_interval = 0;
265 int i;
266
267 mutex_lock(&sessions_mutex);
268
269 transport = lttng_transport_find(transport_name);
270 if (!transport) {
271 printk(KERN_WARNING "LTTng: transport %s not found\n",
272 transport_name);
273 goto notransport;
274 }
275 if (!try_module_get(transport->owner)) {
276 printk(KERN_WARNING "LTTng: Can't lock transport %s module.\n",
277 transport_name);
278 goto notransport;
279 }
280
281 event_notifier_group = lttng_kvzalloc(sizeof(struct lttng_event_notifier_group),
282 GFP_KERNEL);
283 if (!event_notifier_group)
284 goto nomem;
285
286 /*
287 * Initialize the ring buffer used to store event notifier
288 * notifications.
289 */
290 event_notifier_group->ops = &transport->ops;
291 event_notifier_group->chan = transport->ops.channel_create(
292 transport_name, event_notifier_group, NULL,
293 subbuf_size, num_subbuf, switch_timer_interval,
294 read_timer_interval);
295 if (!event_notifier_group->chan)
296 goto create_error;
297
298 event_notifier_group->transport = transport;
299
300 INIT_LIST_HEAD(&event_notifier_group->enablers_head);
301 INIT_LIST_HEAD(&event_notifier_group->event_notifiers_head);
302 for (i = 0; i < LTTNG_EVENT_NOTIFIER_HT_SIZE; i++)
303 INIT_HLIST_HEAD(&event_notifier_group->event_notifiers_ht.table[i]);
304
305 list_add(&event_notifier_group->node, &event_notifier_groups);
306
307 mutex_unlock(&sessions_mutex);
308
309 return event_notifier_group;
310
311create_error:
312 lttng_kvfree(event_notifier_group);
313nomem:
314 if (transport)
315 module_put(transport->owner);
316notransport:
317 mutex_unlock(&sessions_mutex);
318 return NULL;
319}
320
321void metadata_cache_destroy(struct kref *kref)
322{
323 struct lttng_metadata_cache *cache =
324 container_of(kref, struct lttng_metadata_cache, refcount);
325 vfree(cache->data);
326 kfree(cache);
327}
328
329void lttng_session_destroy(struct lttng_session *session)
330{
331 struct lttng_channel *chan, *tmpchan;
332 struct lttng_event *event, *tmpevent;
333 struct lttng_metadata_stream *metadata_stream;
334 struct lttng_event_enabler *event_enabler, *tmp_event_enabler;
335 int ret;
336
337 mutex_lock(&sessions_mutex);
338 WRITE_ONCE(session->active, 0);
339 list_for_each_entry(chan, &session->chan, list) {
340 ret = lttng_syscalls_unregister_event(chan);
341 WARN_ON(ret);
342 }
343 list_for_each_entry(event, &session->events, list) {
344 ret = _lttng_event_unregister(event);
345 WARN_ON(ret);
346 }
347 synchronize_trace(); /* Wait for in-flight events to complete */
348 list_for_each_entry(chan, &session->chan, list) {
349 ret = lttng_syscalls_destroy_event(chan);
350 WARN_ON(ret);
351 }
352 list_for_each_entry_safe(event_enabler, tmp_event_enabler,
353 &session->enablers_head, node)
354 lttng_event_enabler_destroy(event_enabler);
355 list_for_each_entry_safe(event, tmpevent, &session->events, list)
356 _lttng_event_destroy(event);
357 list_for_each_entry_safe(chan, tmpchan, &session->chan, list) {
358 BUG_ON(chan->channel_type == METADATA_CHANNEL);
359 _lttng_channel_destroy(chan);
360 }
361 mutex_lock(&session->metadata_cache->lock);
362 list_for_each_entry(metadata_stream, &session->metadata_cache->metadata_stream, list)
363 _lttng_metadata_channel_hangup(metadata_stream);
364 mutex_unlock(&session->metadata_cache->lock);
365 lttng_id_tracker_destroy(&session->pid_tracker, false);
366 lttng_id_tracker_destroy(&session->vpid_tracker, false);
367 lttng_id_tracker_destroy(&session->uid_tracker, false);
368 lttng_id_tracker_destroy(&session->vuid_tracker, false);
369 lttng_id_tracker_destroy(&session->gid_tracker, false);
370 lttng_id_tracker_destroy(&session->vgid_tracker, false);
371 kref_put(&session->metadata_cache->refcount, metadata_cache_destroy);
372 list_del(&session->list);
373 mutex_unlock(&sessions_mutex);
374 lttng_kvfree(session);
375}
376
377void lttng_event_notifier_group_destroy(
378 struct lttng_event_notifier_group *event_notifier_group)
379{
380 struct lttng_event_notifier_enabler *event_notifier_enabler, *tmp_event_notifier_enabler;
381 struct lttng_event_notifier *event_notifier, *tmpevent_notifier;
382 int ret;
383
384 if (!event_notifier_group)
385 return;
386
387 mutex_lock(&sessions_mutex);
388
389 ret = lttng_syscalls_unregister_event_notifier(event_notifier_group);
390 WARN_ON(ret);
391
392 list_for_each_entry_safe(event_notifier, tmpevent_notifier,
393 &event_notifier_group->event_notifiers_head, list) {
394 ret = _lttng_event_notifier_unregister(event_notifier);
395 WARN_ON(ret);
396 }
397
398 /* Wait for in-flight event notifier to complete */
399 synchronize_trace();
400
401 irq_work_sync(&event_notifier_group->wakeup_pending);
402
403 kfree(event_notifier_group->sc_filter);
404
405 list_for_each_entry_safe(event_notifier_enabler, tmp_event_notifier_enabler,
406 &event_notifier_group->enablers_head, node)
407 lttng_event_notifier_enabler_destroy(event_notifier_enabler);
408
409 list_for_each_entry_safe(event_notifier, tmpevent_notifier,
410 &event_notifier_group->event_notifiers_head, list)
411 _lttng_event_notifier_destroy(event_notifier);
412
413 if (event_notifier_group->error_counter) {
414 struct lttng_counter *error_counter = event_notifier_group->error_counter;
415 error_counter->ops->counter_destroy(error_counter->counter);
416 module_put(error_counter->transport->owner);
417 lttng_kvfree(error_counter);
418 event_notifier_group->error_counter = NULL;
419 }
420
421 event_notifier_group->ops->channel_destroy(event_notifier_group->chan);
422 module_put(event_notifier_group->transport->owner);
423 list_del(&event_notifier_group->node);
424
425 mutex_unlock(&sessions_mutex);
426 lttng_kvfree(event_notifier_group);
427}
428
429int lttng_session_statedump(struct lttng_session *session)
430{
431 int ret;
432
433 mutex_lock(&sessions_mutex);
434 ret = lttng_statedump_start(session);
435 mutex_unlock(&sessions_mutex);
436 return ret;
437}
438
439int lttng_session_enable(struct lttng_session *session)
440{
441 int ret = 0;
442 struct lttng_channel *chan;
443
444 mutex_lock(&sessions_mutex);
445 if (session->active) {
446 ret = -EBUSY;
447 goto end;
448 }
449
450 /* Set transient enabler state to "enabled" */
451 session->tstate = 1;
452
453 /* We need to sync enablers with session before activation. */
454 lttng_session_sync_event_enablers(session);
455
456 /*
457 * Snapshot the number of events per channel to know the type of header
458 * we need to use.
459 */
460 list_for_each_entry(chan, &session->chan, list) {
461 if (chan->header_type)
462 continue; /* don't change it if session stop/restart */
463 if (chan->free_event_id < 31)
464 chan->header_type = 1; /* compact */
465 else
466 chan->header_type = 2; /* large */
467 }
468
469 /* Clear each stream's quiescent state. */
470 list_for_each_entry(chan, &session->chan, list) {
471 if (chan->channel_type != METADATA_CHANNEL)
472 lib_ring_buffer_clear_quiescent_channel(chan->chan);
473 }
474
475 WRITE_ONCE(session->active, 1);
476 WRITE_ONCE(session->been_active, 1);
477 ret = _lttng_session_metadata_statedump(session);
478 if (ret) {
479 WRITE_ONCE(session->active, 0);
480 goto end;
481 }
482 ret = lttng_statedump_start(session);
483 if (ret)
484 WRITE_ONCE(session->active, 0);
485end:
486 mutex_unlock(&sessions_mutex);
487 return ret;
488}
489
490int lttng_session_disable(struct lttng_session *session)
491{
492 int ret = 0;
493 struct lttng_channel *chan;
494
495 mutex_lock(&sessions_mutex);
496 if (!session->active) {
497 ret = -EBUSY;
498 goto end;
499 }
500 WRITE_ONCE(session->active, 0);
501
502 /* Set transient enabler state to "disabled" */
503 session->tstate = 0;
504 lttng_session_sync_event_enablers(session);
505
506 /* Set each stream's quiescent state. */
507 list_for_each_entry(chan, &session->chan, list) {
508 if (chan->channel_type != METADATA_CHANNEL)
509 lib_ring_buffer_set_quiescent_channel(chan->chan);
510 }
511end:
512 mutex_unlock(&sessions_mutex);
513 return ret;
514}
515
516int lttng_session_metadata_regenerate(struct lttng_session *session)
517{
518 int ret = 0;
519 struct lttng_channel *chan;
520 struct lttng_event *event;
521 struct lttng_metadata_cache *cache = session->metadata_cache;
522 struct lttng_metadata_stream *stream;
523
524 mutex_lock(&sessions_mutex);
525 if (!session->active) {
526 ret = -EBUSY;
527 goto end;
528 }
529
530 mutex_lock(&cache->lock);
531 memset(cache->data, 0, cache->cache_alloc);
532 cache->metadata_written = 0;
533 cache->version++;
534 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list) {
535 stream->metadata_out = 0;
536 stream->metadata_in = 0;
537 }
538 mutex_unlock(&cache->lock);
539
540 session->metadata_dumped = 0;
541 list_for_each_entry(chan, &session->chan, list) {
542 chan->metadata_dumped = 0;
543 }
544
545 list_for_each_entry(event, &session->events, list) {
546 event->metadata_dumped = 0;
547 }
548
549 ret = _lttng_session_metadata_statedump(session);
550
551end:
552 mutex_unlock(&sessions_mutex);
553 return ret;
554}
555
556int lttng_channel_enable(struct lttng_channel *channel)
557{
558 int ret = 0;
559
560 mutex_lock(&sessions_mutex);
561 if (channel->channel_type == METADATA_CHANNEL) {
562 ret = -EPERM;
563 goto end;
564 }
565 if (channel->enabled) {
566 ret = -EEXIST;
567 goto end;
568 }
569 /* Set transient enabler state to "enabled" */
570 channel->tstate = 1;
571 lttng_session_sync_event_enablers(channel->session);
572 /* Set atomically the state to "enabled" */
573 WRITE_ONCE(channel->enabled, 1);
574end:
575 mutex_unlock(&sessions_mutex);
576 return ret;
577}
578
579int lttng_channel_disable(struct lttng_channel *channel)
580{
581 int ret = 0;
582
583 mutex_lock(&sessions_mutex);
584 if (channel->channel_type == METADATA_CHANNEL) {
585 ret = -EPERM;
586 goto end;
587 }
588 if (!channel->enabled) {
589 ret = -EEXIST;
590 goto end;
591 }
592 /* Set atomically the state to "disabled" */
593 WRITE_ONCE(channel->enabled, 0);
594 /* Set transient enabler state to "enabled" */
595 channel->tstate = 0;
596 lttng_session_sync_event_enablers(channel->session);
597end:
598 mutex_unlock(&sessions_mutex);
599 return ret;
600}
601
602int lttng_event_enable(struct lttng_event *event)
603{
604 int ret = 0;
605
606 mutex_lock(&sessions_mutex);
607 if (event->chan->channel_type == METADATA_CHANNEL) {
608 ret = -EPERM;
609 goto end;
610 }
611 if (event->enabled) {
612 ret = -EEXIST;
613 goto end;
614 }
615 switch (event->instrumentation) {
616 case LTTNG_KERNEL_TRACEPOINT:
617 case LTTNG_KERNEL_SYSCALL:
618 ret = -EINVAL;
619 break;
620 case LTTNG_KERNEL_KPROBE:
621 case LTTNG_KERNEL_UPROBE:
622 case LTTNG_KERNEL_NOOP:
623 WRITE_ONCE(event->enabled, 1);
624 break;
625 case LTTNG_KERNEL_KRETPROBE:
626 ret = lttng_kretprobes_event_enable_state(event, 1);
627 break;
628 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
629 default:
630 WARN_ON_ONCE(1);
631 ret = -EINVAL;
632 }
633end:
634 mutex_unlock(&sessions_mutex);
635 return ret;
636}
637
638int lttng_event_disable(struct lttng_event *event)
639{
640 int ret = 0;
641
642 mutex_lock(&sessions_mutex);
643 if (event->chan->channel_type == METADATA_CHANNEL) {
644 ret = -EPERM;
645 goto end;
646 }
647 if (!event->enabled) {
648 ret = -EEXIST;
649 goto end;
650 }
651 switch (event->instrumentation) {
652 case LTTNG_KERNEL_TRACEPOINT:
653 case LTTNG_KERNEL_SYSCALL:
654 ret = -EINVAL;
655 break;
656 case LTTNG_KERNEL_KPROBE:
657 case LTTNG_KERNEL_UPROBE:
658 case LTTNG_KERNEL_NOOP:
659 WRITE_ONCE(event->enabled, 0);
660 break;
661 case LTTNG_KERNEL_KRETPROBE:
662 ret = lttng_kretprobes_event_enable_state(event, 0);
663 break;
664 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
665 default:
666 WARN_ON_ONCE(1);
667 ret = -EINVAL;
668 }
669end:
670 mutex_unlock(&sessions_mutex);
671 return ret;
672}
673
674int lttng_event_notifier_enable(struct lttng_event_notifier *event_notifier)
675{
676 int ret = 0;
677
678 mutex_lock(&sessions_mutex);
679 if (event_notifier->enabled) {
680 ret = -EEXIST;
681 goto end;
682 }
683 switch (event_notifier->instrumentation) {
684 case LTTNG_KERNEL_TRACEPOINT:
685 case LTTNG_KERNEL_SYSCALL:
686 ret = -EINVAL;
687 break;
688 case LTTNG_KERNEL_KPROBE:
689 case LTTNG_KERNEL_UPROBE:
690 WRITE_ONCE(event_notifier->enabled, 1);
691 break;
692 case LTTNG_KERNEL_FUNCTION:
693 case LTTNG_KERNEL_NOOP:
694 case LTTNG_KERNEL_KRETPROBE:
695 default:
696 WARN_ON_ONCE(1);
697 ret = -EINVAL;
698 }
699end:
700 mutex_unlock(&sessions_mutex);
701 return ret;
702}
703
704int lttng_event_notifier_disable(struct lttng_event_notifier *event_notifier)
705{
706 int ret = 0;
707
708 mutex_lock(&sessions_mutex);
709 if (!event_notifier->enabled) {
710 ret = -EEXIST;
711 goto end;
712 }
713 switch (event_notifier->instrumentation) {
714 case LTTNG_KERNEL_TRACEPOINT:
715 case LTTNG_KERNEL_SYSCALL:
716 ret = -EINVAL;
717 break;
718 case LTTNG_KERNEL_KPROBE:
719 case LTTNG_KERNEL_UPROBE:
720 WRITE_ONCE(event_notifier->enabled, 0);
721 break;
722 case LTTNG_KERNEL_FUNCTION:
723 case LTTNG_KERNEL_NOOP:
724 case LTTNG_KERNEL_KRETPROBE:
725 default:
726 WARN_ON_ONCE(1);
727 ret = -EINVAL;
728 }
729end:
730 mutex_unlock(&sessions_mutex);
731 return ret;
732}
733
734struct lttng_channel *lttng_channel_create(struct lttng_session *session,
735 const char *transport_name,
736 void *buf_addr,
737 size_t subbuf_size, size_t num_subbuf,
738 unsigned int switch_timer_interval,
739 unsigned int read_timer_interval,
740 enum channel_type channel_type)
741{
742 struct lttng_channel *chan;
743 struct lttng_transport *transport = NULL;
744
745 mutex_lock(&sessions_mutex);
746 if (session->been_active && channel_type != METADATA_CHANNEL)
747 goto active; /* Refuse to add channel to active session */
748 transport = lttng_transport_find(transport_name);
749 if (!transport) {
750 printk(KERN_WARNING "LTTng: transport %s not found\n",
751 transport_name);
752 goto notransport;
753 }
754 if (!try_module_get(transport->owner)) {
755 printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
756 goto notransport;
757 }
758 chan = kzalloc(sizeof(struct lttng_channel), GFP_KERNEL);
759 if (!chan)
760 goto nomem;
761 chan->session = session;
762 chan->id = session->free_chan_id++;
763 chan->ops = &transport->ops;
764 /*
765 * Note: the channel creation op already writes into the packet
766 * headers. Therefore the "chan" information used as input
767 * should be already accessible.
768 */
769 chan->chan = transport->ops.channel_create(transport_name,
770 chan, buf_addr, subbuf_size, num_subbuf,
771 switch_timer_interval, read_timer_interval);
772 if (!chan->chan)
773 goto create_error;
774 chan->tstate = 1;
775 chan->enabled = 1;
776 chan->transport = transport;
777 chan->channel_type = channel_type;
778 list_add(&chan->list, &session->chan);
779 mutex_unlock(&sessions_mutex);
780 return chan;
781
782create_error:
783 kfree(chan);
784nomem:
785 if (transport)
786 module_put(transport->owner);
787notransport:
788active:
789 mutex_unlock(&sessions_mutex);
790 return NULL;
791}
792
793/*
794 * Only used internally at session destruction for per-cpu channels, and
795 * when metadata channel is released.
796 * Needs to be called with sessions mutex held.
797 */
798static
799void _lttng_channel_destroy(struct lttng_channel *chan)
800{
801 chan->ops->channel_destroy(chan->chan);
802 module_put(chan->transport->owner);
803 list_del(&chan->list);
804 lttng_destroy_context(chan->ctx);
805 kfree(chan);
806}
807
808void lttng_metadata_channel_destroy(struct lttng_channel *chan)
809{
810 BUG_ON(chan->channel_type != METADATA_CHANNEL);
811
812 /* Protect the metadata cache with the sessions_mutex. */
813 mutex_lock(&sessions_mutex);
814 _lttng_channel_destroy(chan);
815 mutex_unlock(&sessions_mutex);
816}
817EXPORT_SYMBOL_GPL(lttng_metadata_channel_destroy);
818
819static
820void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream)
821{
822 stream->finalized = 1;
823 wake_up_interruptible(&stream->read_wait);
824}
825
826
827/*
828 * Supports event creation while tracing session is active.
829 * Needs to be called with sessions mutex held.
830 */
831struct lttng_event *_lttng_event_create(struct lttng_channel *chan,
832 struct lttng_kernel_event *event_param,
833 void *filter,
834 const struct lttng_event_desc *event_desc,
835 enum lttng_kernel_instrumentation itype)
836{
837 struct lttng_session *session = chan->session;
838 struct lttng_event *event;
839 const char *event_name;
840 struct hlist_head *head;
841 int ret;
842
843 if (chan->free_event_id == -1U) {
844 ret = -EMFILE;
845 goto full;
846 }
847
848 switch (itype) {
849 case LTTNG_KERNEL_TRACEPOINT:
850 event_name = event_desc->name;
851 break;
852 case LTTNG_KERNEL_KPROBE:
853 case LTTNG_KERNEL_UPROBE:
854 case LTTNG_KERNEL_KRETPROBE:
855 case LTTNG_KERNEL_NOOP:
856 case LTTNG_KERNEL_SYSCALL:
857 event_name = event_param->name;
858 break;
859 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
860 default:
861 WARN_ON_ONCE(1);
862 ret = -EINVAL;
863 goto type_error;
864 }
865
866 head = utils_borrow_hash_table_bucket(session->events_ht.table,
867 LTTNG_EVENT_HT_SIZE, event_name);
868 lttng_hlist_for_each_entry(event, head, hlist) {
869 WARN_ON_ONCE(!event->desc);
870 if (!strncmp(event->desc->name, event_name,
871 LTTNG_KERNEL_SYM_NAME_LEN - 1)
872 && chan == event->chan) {
873 ret = -EEXIST;
874 goto exist;
875 }
876 }
877
878 event = kmem_cache_zalloc(event_cache, GFP_KERNEL);
879 if (!event) {
880 ret = -ENOMEM;
881 goto cache_error;
882 }
883 event->chan = chan;
884 event->filter = filter;
885 event->id = chan->free_event_id++;
886 event->instrumentation = itype;
887 event->evtype = LTTNG_TYPE_EVENT;
888 INIT_LIST_HEAD(&event->filter_bytecode_runtime_head);
889 INIT_LIST_HEAD(&event->enablers_ref_head);
890
891 switch (itype) {
892 case LTTNG_KERNEL_TRACEPOINT:
893 /* Event will be enabled by enabler sync. */
894 event->enabled = 0;
895 event->registered = 0;
896 event->desc = lttng_event_desc_get(event_name);
897 if (!event->desc) {
898 ret = -ENOENT;
899 goto register_error;
900 }
901 /* Populate lttng_event structure before event registration. */
902 smp_wmb();
903 break;
904 case LTTNG_KERNEL_KPROBE:
905 /*
906 * Needs to be explicitly enabled after creation, since
907 * we may want to apply filters.
908 */
909 event->enabled = 0;
910 event->registered = 1;
911 /*
912 * Populate lttng_event structure before event
913 * registration.
914 */
915 smp_wmb();
916 ret = lttng_kprobes_register_event(event_name,
917 event_param->u.kprobe.symbol_name,
918 event_param->u.kprobe.offset,
919 event_param->u.kprobe.addr,
920 event);
921 if (ret) {
922 ret = -EINVAL;
923 goto register_error;
924 }
925 ret = try_module_get(event->desc->owner);
926 WARN_ON_ONCE(!ret);
927 break;
928 case LTTNG_KERNEL_KRETPROBE:
929 {
930 struct lttng_event *event_return;
931
932 /* kretprobe defines 2 events */
933 /*
934 * Needs to be explicitly enabled after creation, since
935 * we may want to apply filters.
936 */
937 event->enabled = 0;
938 event->registered = 1;
939 event_return =
940 kmem_cache_zalloc(event_cache, GFP_KERNEL);
941 if (!event_return) {
942 ret = -ENOMEM;
943 goto register_error;
944 }
945 event_return->chan = chan;
946 event_return->filter = filter;
947 event_return->id = chan->free_event_id++;
948 event_return->enabled = 0;
949 event_return->registered = 1;
950 event_return->instrumentation = itype;
951 /*
952 * Populate lttng_event structure before kretprobe registration.
953 */
954 smp_wmb();
955 ret = lttng_kretprobes_register(event_name,
956 event_param->u.kretprobe.symbol_name,
957 event_param->u.kretprobe.offset,
958 event_param->u.kretprobe.addr,
959 event, event_return);
960 if (ret) {
961 kmem_cache_free(event_cache, event_return);
962 ret = -EINVAL;
963 goto register_error;
964 }
965 /* Take 2 refs on the module: one per event. */
966 ret = try_module_get(event->desc->owner);
967 WARN_ON_ONCE(!ret);
968 ret = try_module_get(event->desc->owner);
969 WARN_ON_ONCE(!ret);
970 ret = _lttng_event_metadata_statedump(chan->session, chan,
971 event_return);
972 WARN_ON_ONCE(ret > 0);
973 if (ret) {
974 kmem_cache_free(event_cache, event_return);
975 module_put(event->desc->owner);
976 module_put(event->desc->owner);
977 goto statedump_error;
978 }
979 list_add(&event_return->list, &chan->session->events);
980 break;
981 }
982 case LTTNG_KERNEL_NOOP:
983 case LTTNG_KERNEL_SYSCALL:
984 /*
985 * Needs to be explicitly enabled after creation, since
986 * we may want to apply filters.
987 */
988 event->enabled = 0;
989 event->registered = 0;
990 event->desc = event_desc;
991 switch (event_param->u.syscall.entryexit) {
992 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
993 ret = -EINVAL;
994 goto register_error;
995 case LTTNG_KERNEL_SYSCALL_ENTRY:
996 event->u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
997 break;
998 case LTTNG_KERNEL_SYSCALL_EXIT:
999 event->u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
1000 break;
1001 }
1002 switch (event_param->u.syscall.abi) {
1003 case LTTNG_KERNEL_SYSCALL_ABI_ALL:
1004 ret = -EINVAL;
1005 goto register_error;
1006 case LTTNG_KERNEL_SYSCALL_ABI_NATIVE:
1007 event->u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
1008 break;
1009 case LTTNG_KERNEL_SYSCALL_ABI_COMPAT:
1010 event->u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
1011 break;
1012 }
1013 if (!event->desc) {
1014 ret = -EINVAL;
1015 goto register_error;
1016 }
1017 break;
1018 case LTTNG_KERNEL_UPROBE:
1019 /*
1020 * Needs to be explicitly enabled after creation, since
1021 * we may want to apply filters.
1022 */
1023 event->enabled = 0;
1024 event->registered = 1;
1025
1026 /*
1027 * Populate lttng_event structure before event
1028 * registration.
1029 */
1030 smp_wmb();
1031
1032 ret = lttng_uprobes_register_event(event_param->name,
1033 event_param->u.uprobe.fd,
1034 event);
1035 if (ret)
1036 goto register_error;
1037 ret = try_module_get(event->desc->owner);
1038 WARN_ON_ONCE(!ret);
1039 break;
1040 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1041 default:
1042 WARN_ON_ONCE(1);
1043 ret = -EINVAL;
1044 goto register_error;
1045 }
1046 ret = _lttng_event_metadata_statedump(chan->session, chan, event);
1047 WARN_ON_ONCE(ret > 0);
1048 if (ret) {
1049 goto statedump_error;
1050 }
1051 hlist_add_head(&event->hlist, head);
1052 list_add(&event->list, &chan->session->events);
1053 return event;
1054
1055statedump_error:
1056 /* If a statedump error occurs, events will not be readable. */
1057register_error:
1058 kmem_cache_free(event_cache, event);
1059cache_error:
1060exist:
1061type_error:
1062full:
1063 return ERR_PTR(ret);
1064}
1065
1066struct lttng_event_notifier *_lttng_event_notifier_create(
1067 const struct lttng_event_desc *event_desc,
1068 uint64_t token, uint64_t error_counter_index,
1069 struct lttng_event_notifier_group *event_notifier_group,
1070 struct lttng_kernel_event_notifier *event_notifier_param,
1071 void *filter, enum lttng_kernel_instrumentation itype)
1072{
1073 struct lttng_event_notifier *event_notifier;
1074 const char *event_name;
1075 struct hlist_head *head;
1076 int ret;
1077
1078 switch (itype) {
1079 case LTTNG_KERNEL_TRACEPOINT:
1080 event_name = event_desc->name;
1081 break;
1082 case LTTNG_KERNEL_KPROBE:
1083 case LTTNG_KERNEL_UPROBE:
1084 case LTTNG_KERNEL_SYSCALL:
1085 event_name = event_notifier_param->event.name;
1086 break;
1087 case LTTNG_KERNEL_KRETPROBE:
1088 case LTTNG_KERNEL_FUNCTION:
1089 case LTTNG_KERNEL_NOOP:
1090 default:
1091 WARN_ON_ONCE(1);
1092 ret = -EINVAL;
1093 goto type_error;
1094 }
1095
1096 head = utils_borrow_hash_table_bucket(event_notifier_group->event_notifiers_ht.table,
1097 LTTNG_EVENT_NOTIFIER_HT_SIZE, event_name);
1098 lttng_hlist_for_each_entry(event_notifier, head, hlist) {
1099 WARN_ON_ONCE(!event_notifier->desc);
1100 if (!strncmp(event_notifier->desc->name, event_name,
1101 LTTNG_KERNEL_SYM_NAME_LEN - 1)
1102 && event_notifier_group == event_notifier->group
1103 && token == event_notifier->user_token) {
1104 ret = -EEXIST;
1105 goto exist;
1106 }
1107 }
1108
1109 event_notifier = kmem_cache_zalloc(event_notifier_cache, GFP_KERNEL);
1110 if (!event_notifier) {
1111 ret = -ENOMEM;
1112 goto cache_error;
1113 }
1114
1115 event_notifier->group = event_notifier_group;
1116 event_notifier->user_token = token;
1117 event_notifier->error_counter_index = error_counter_index;
1118 event_notifier->num_captures = 0;
1119 event_notifier->filter = filter;
1120 event_notifier->instrumentation = itype;
1121 event_notifier->evtype = LTTNG_TYPE_EVENT;
1122 event_notifier->send_notification = lttng_event_notifier_notification_send;
1123 INIT_LIST_HEAD(&event_notifier->filter_bytecode_runtime_head);
1124 INIT_LIST_HEAD(&event_notifier->capture_bytecode_runtime_head);
1125 INIT_LIST_HEAD(&event_notifier->enablers_ref_head);
1126
1127 switch (itype) {
1128 case LTTNG_KERNEL_TRACEPOINT:
1129 /* Event will be enabled by enabler sync. */
1130 event_notifier->enabled = 0;
1131 event_notifier->registered = 0;
1132 event_notifier->desc = lttng_event_desc_get(event_name);
1133 if (!event_notifier->desc) {
1134 ret = -ENOENT;
1135 goto register_error;
1136 }
1137 /* Populate lttng_event_notifier structure before event registration. */
1138 smp_wmb();
1139 break;
1140 case LTTNG_KERNEL_KPROBE:
1141 /*
1142 * Needs to be explicitly enabled after creation, since
1143 * we may want to apply filters.
1144 */
1145 event_notifier->enabled = 0;
1146 event_notifier->registered = 1;
1147 /*
1148 * Populate lttng_event_notifier structure before event
1149 * registration.
1150 */
1151 smp_wmb();
1152 ret = lttng_kprobes_register_event_notifier(
1153 event_notifier_param->event.u.kprobe.symbol_name,
1154 event_notifier_param->event.u.kprobe.offset,
1155 event_notifier_param->event.u.kprobe.addr,
1156 event_notifier);
1157 if (ret) {
1158 ret = -EINVAL;
1159 goto register_error;
1160 }
1161 ret = try_module_get(event_notifier->desc->owner);
1162 WARN_ON_ONCE(!ret);
1163 break;
1164 case LTTNG_KERNEL_NOOP:
1165 case LTTNG_KERNEL_SYSCALL:
1166 /*
1167 * Needs to be explicitly enabled after creation, since
1168 * we may want to apply filters.
1169 */
1170 event_notifier->enabled = 0;
1171 event_notifier->registered = 0;
1172 event_notifier->desc = event_desc;
1173 switch (event_notifier_param->event.u.syscall.entryexit) {
1174 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
1175 ret = -EINVAL;
1176 goto register_error;
1177 case LTTNG_KERNEL_SYSCALL_ENTRY:
1178 event_notifier->u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
1179 break;
1180 case LTTNG_KERNEL_SYSCALL_EXIT:
1181 event_notifier->u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
1182 break;
1183 }
1184 switch (event_notifier_param->event.u.syscall.abi) {
1185 case LTTNG_KERNEL_SYSCALL_ABI_ALL:
1186 ret = -EINVAL;
1187 goto register_error;
1188 case LTTNG_KERNEL_SYSCALL_ABI_NATIVE:
1189 event_notifier->u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
1190 break;
1191 case LTTNG_KERNEL_SYSCALL_ABI_COMPAT:
1192 event_notifier->u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
1193 break;
1194 }
1195
1196 if (!event_notifier->desc) {
1197 ret = -EINVAL;
1198 goto register_error;
1199 }
1200 break;
1201 case LTTNG_KERNEL_UPROBE:
1202 /*
1203 * Needs to be explicitly enabled after creation, since
1204 * we may want to apply filters.
1205 */
1206 event_notifier->enabled = 0;
1207 event_notifier->registered = 1;
1208
1209 /*
1210 * Populate lttng_event_notifier structure before
1211 * event_notifier registration.
1212 */
1213 smp_wmb();
1214
1215 ret = lttng_uprobes_register_event_notifier(
1216 event_notifier_param->event.name,
1217 event_notifier_param->event.u.uprobe.fd,
1218 event_notifier);
1219 if (ret)
1220 goto register_error;
1221 ret = try_module_get(event_notifier->desc->owner);
1222 WARN_ON_ONCE(!ret);
1223 break;
1224 case LTTNG_KERNEL_KRETPROBE:
1225 case LTTNG_KERNEL_FUNCTION:
1226 default:
1227 WARN_ON_ONCE(1);
1228 ret = -EINVAL;
1229 goto register_error;
1230 }
1231
1232 list_add(&event_notifier->list, &event_notifier_group->event_notifiers_head);
1233 hlist_add_head(&event_notifier->hlist, head);
1234
1235 /*
1236 * Clear the error counter bucket. The sessiond keeps track of which
1237 * bucket is currently in use. We trust it.
1238 */
1239 if (event_notifier_group->error_counter) {
1240 size_t dimension_index[1];
1241
1242 /*
1243 * Check that the index is within the boundary of the counter.
1244 */
1245 if (event_notifier->error_counter_index >= event_notifier_group->error_counter_len) {
1246 printk(KERN_INFO "LTTng: event_notifier: Error counter index out-of-bound: counter-len=%zu, index=%llu\n",
1247 event_notifier_group->error_counter_len, event_notifier->error_counter_index);
1248 ret = -EINVAL;
1249 goto register_error;
1250 }
1251
1252 dimension_index[0] = event_notifier->error_counter_index;
1253 ret = event_notifier_group->error_counter->ops->counter_clear(
1254 event_notifier_group->error_counter->counter,
1255 dimension_index);
1256 if (ret) {
1257 printk(KERN_INFO "LTTng: event_notifier: Unable to clear error counter bucket %llu\n",
1258 event_notifier->error_counter_index);
1259 goto register_error;
1260 }
1261 }
1262
1263 return event_notifier;
1264
1265register_error:
1266 kmem_cache_free(event_notifier_cache, event_notifier);
1267cache_error:
1268exist:
1269type_error:
1270 return ERR_PTR(ret);
1271}
1272
1273int lttng_kernel_counter_read(struct lttng_counter *counter,
1274 const size_t *dim_indexes, int32_t cpu,
1275 int64_t *val, bool *overflow, bool *underflow)
1276{
1277 return counter->ops->counter_read(counter->counter, dim_indexes,
1278 cpu, val, overflow, underflow);
1279}
1280
1281int lttng_kernel_counter_aggregate(struct lttng_counter *counter,
1282 const size_t *dim_indexes, int64_t *val,
1283 bool *overflow, bool *underflow)
1284{
1285 return counter->ops->counter_aggregate(counter->counter, dim_indexes,
1286 val, overflow, underflow);
1287}
1288
1289int lttng_kernel_counter_clear(struct lttng_counter *counter,
1290 const size_t *dim_indexes)
1291{
1292 return counter->ops->counter_clear(counter->counter, dim_indexes);
1293}
1294
1295struct lttng_event *lttng_event_create(struct lttng_channel *chan,
1296 struct lttng_kernel_event *event_param,
1297 void *filter,
1298 const struct lttng_event_desc *event_desc,
1299 enum lttng_kernel_instrumentation itype)
1300{
1301 struct lttng_event *event;
1302
1303 mutex_lock(&sessions_mutex);
1304 event = _lttng_event_create(chan, event_param, filter, event_desc,
1305 itype);
1306 mutex_unlock(&sessions_mutex);
1307 return event;
1308}
1309
1310struct lttng_event_notifier *lttng_event_notifier_create(
1311 const struct lttng_event_desc *event_desc,
1312 uint64_t id, uint64_t error_counter_index,
1313 struct lttng_event_notifier_group *event_notifier_group,
1314 struct lttng_kernel_event_notifier *event_notifier_param,
1315 void *filter, enum lttng_kernel_instrumentation itype)
1316{
1317 struct lttng_event_notifier *event_notifier;
1318
1319 mutex_lock(&sessions_mutex);
1320 event_notifier = _lttng_event_notifier_create(event_desc, id,
1321 error_counter_index, event_notifier_group,
1322 event_notifier_param, filter, itype);
1323 mutex_unlock(&sessions_mutex);
1324 return event_notifier;
1325}
1326
1327/* Only used for tracepoints for now. */
1328static
1329void register_event(struct lttng_event *event)
1330{
1331 const struct lttng_event_desc *desc;
1332 int ret = -EINVAL;
1333
1334 if (event->registered)
1335 return;
1336
1337 desc = event->desc;
1338 switch (event->instrumentation) {
1339 case LTTNG_KERNEL_TRACEPOINT:
1340 ret = lttng_wrapper_tracepoint_probe_register(desc->kname,
1341 desc->probe_callback,
1342 event);
1343 break;
1344 case LTTNG_KERNEL_SYSCALL:
1345 ret = lttng_syscall_filter_enable_event(event->chan, event);
1346 break;
1347 case LTTNG_KERNEL_KPROBE:
1348 case LTTNG_KERNEL_UPROBE:
1349 case LTTNG_KERNEL_KRETPROBE:
1350 case LTTNG_KERNEL_NOOP:
1351 ret = 0;
1352 break;
1353 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1354 default:
1355 WARN_ON_ONCE(1);
1356 }
1357 if (!ret)
1358 event->registered = 1;
1359}
1360
1361/*
1362 * Only used internally at session destruction.
1363 */
1364int _lttng_event_unregister(struct lttng_event *event)
1365{
1366 const struct lttng_event_desc *desc;
1367 int ret = -EINVAL;
1368
1369 if (!event->registered)
1370 return 0;
1371
1372 desc = event->desc;
1373 switch (event->instrumentation) {
1374 case LTTNG_KERNEL_TRACEPOINT:
1375 ret = lttng_wrapper_tracepoint_probe_unregister(event->desc->kname,
1376 event->desc->probe_callback,
1377 event);
1378 break;
1379 case LTTNG_KERNEL_KPROBE:
1380 lttng_kprobes_unregister_event(event);
1381 ret = 0;
1382 break;
1383 case LTTNG_KERNEL_KRETPROBE:
1384 lttng_kretprobes_unregister(event);
1385 ret = 0;
1386 break;
1387 case LTTNG_KERNEL_SYSCALL:
1388 ret = lttng_syscall_filter_disable_event(event->chan, event);
1389 break;
1390 case LTTNG_KERNEL_NOOP:
1391 ret = 0;
1392 break;
1393 case LTTNG_KERNEL_UPROBE:
1394 lttng_uprobes_unregister_event(event);
1395 ret = 0;
1396 break;
1397 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1398 default:
1399 WARN_ON_ONCE(1);
1400 }
1401 if (!ret)
1402 event->registered = 0;
1403 return ret;
1404}
1405
1406/* Only used for tracepoints for now. */
1407static
1408void register_event_notifier(struct lttng_event_notifier *event_notifier)
1409{
1410 const struct lttng_event_desc *desc;
1411 int ret = -EINVAL;
1412
1413 if (event_notifier->registered)
1414 return;
1415
1416 desc = event_notifier->desc;
1417 switch (event_notifier->instrumentation) {
1418 case LTTNG_KERNEL_TRACEPOINT:
1419 ret = lttng_wrapper_tracepoint_probe_register(desc->kname,
1420 desc->event_notifier_callback,
1421 event_notifier);
1422 break;
1423 case LTTNG_KERNEL_SYSCALL:
1424 ret = lttng_syscall_filter_enable_event_notifier(event_notifier);
1425 break;
1426 case LTTNG_KERNEL_KPROBE:
1427 case LTTNG_KERNEL_UPROBE:
1428 ret = 0;
1429 break;
1430 case LTTNG_KERNEL_KRETPROBE:
1431 case LTTNG_KERNEL_FUNCTION:
1432 case LTTNG_KERNEL_NOOP:
1433 default:
1434 WARN_ON_ONCE(1);
1435 }
1436 if (!ret)
1437 event_notifier->registered = 1;
1438}
1439
1440static
1441int _lttng_event_notifier_unregister(
1442 struct lttng_event_notifier *event_notifier)
1443{
1444 const struct lttng_event_desc *desc;
1445 int ret = -EINVAL;
1446
1447 if (!event_notifier->registered)
1448 return 0;
1449
1450 desc = event_notifier->desc;
1451 switch (event_notifier->instrumentation) {
1452 case LTTNG_KERNEL_TRACEPOINT:
1453 ret = lttng_wrapper_tracepoint_probe_unregister(event_notifier->desc->kname,
1454 event_notifier->desc->event_notifier_callback,
1455 event_notifier);
1456 break;
1457 case LTTNG_KERNEL_KPROBE:
1458 lttng_kprobes_unregister_event_notifier(event_notifier);
1459 ret = 0;
1460 break;
1461 case LTTNG_KERNEL_UPROBE:
1462 lttng_uprobes_unregister_event_notifier(event_notifier);
1463 ret = 0;
1464 break;
1465 case LTTNG_KERNEL_SYSCALL:
1466 ret = lttng_syscall_filter_disable_event_notifier(event_notifier);
1467 break;
1468 case LTTNG_KERNEL_KRETPROBE:
1469 case LTTNG_KERNEL_FUNCTION:
1470 case LTTNG_KERNEL_NOOP:
1471 default:
1472 WARN_ON_ONCE(1);
1473 }
1474 if (!ret)
1475 event_notifier->registered = 0;
1476 return ret;
1477}
1478
1479/*
1480 * Only used internally at session destruction.
1481 */
1482static
1483void _lttng_event_destroy(struct lttng_event *event)
1484{
1485 switch (event->instrumentation) {
1486 case LTTNG_KERNEL_TRACEPOINT:
1487 lttng_event_desc_put(event->desc);
1488 break;
1489 case LTTNG_KERNEL_KPROBE:
1490 module_put(event->desc->owner);
1491 lttng_kprobes_destroy_event_private(event);
1492 break;
1493 case LTTNG_KERNEL_KRETPROBE:
1494 module_put(event->desc->owner);
1495 lttng_kretprobes_destroy_private(event);
1496 break;
1497 case LTTNG_KERNEL_NOOP:
1498 case LTTNG_KERNEL_SYSCALL:
1499 break;
1500 case LTTNG_KERNEL_UPROBE:
1501 module_put(event->desc->owner);
1502 lttng_uprobes_destroy_event_private(event);
1503 break;
1504 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1505 default:
1506 WARN_ON_ONCE(1);
1507 }
1508 list_del(&event->list);
1509 lttng_destroy_context(event->ctx);
1510 kmem_cache_free(event_cache, event);
1511}
1512
1513/*
1514 * Only used internally at session destruction.
1515 */
1516static
1517void _lttng_event_notifier_destroy(struct lttng_event_notifier *event_notifier)
1518{
1519 switch (event_notifier->instrumentation) {
1520 case LTTNG_KERNEL_TRACEPOINT:
1521 lttng_event_desc_put(event_notifier->desc);
1522 break;
1523 case LTTNG_KERNEL_KPROBE:
1524 module_put(event_notifier->desc->owner);
1525 lttng_kprobes_destroy_event_notifier_private(event_notifier);
1526 break;
1527 case LTTNG_KERNEL_NOOP:
1528 case LTTNG_KERNEL_SYSCALL:
1529 break;
1530 case LTTNG_KERNEL_UPROBE:
1531 module_put(event_notifier->desc->owner);
1532 lttng_uprobes_destroy_event_notifier_private(event_notifier);
1533 break;
1534 case LTTNG_KERNEL_KRETPROBE:
1535 case LTTNG_KERNEL_FUNCTION:
1536 default:
1537 WARN_ON_ONCE(1);
1538 }
1539 list_del(&event_notifier->list);
1540 kmem_cache_free(event_notifier_cache, event_notifier);
1541}
1542
1543struct lttng_id_tracker *get_tracker(struct lttng_session *session,
1544 enum tracker_type tracker_type)
1545{
1546 switch (tracker_type) {
1547 case TRACKER_PID:
1548 return &session->pid_tracker;
1549 case TRACKER_VPID:
1550 return &session->vpid_tracker;
1551 case TRACKER_UID:
1552 return &session->uid_tracker;
1553 case TRACKER_VUID:
1554 return &session->vuid_tracker;
1555 case TRACKER_GID:
1556 return &session->gid_tracker;
1557 case TRACKER_VGID:
1558 return &session->vgid_tracker;
1559 default:
1560 WARN_ON_ONCE(1);
1561 return NULL;
1562 }
1563}
1564
1565int lttng_session_track_id(struct lttng_session *session,
1566 enum tracker_type tracker_type, int id)
1567{
1568 struct lttng_id_tracker *tracker;
1569 int ret;
1570
1571 tracker = get_tracker(session, tracker_type);
1572 if (!tracker)
1573 return -EINVAL;
1574 if (id < -1)
1575 return -EINVAL;
1576 mutex_lock(&sessions_mutex);
1577 if (id == -1) {
1578 /* track all ids: destroy tracker. */
1579 lttng_id_tracker_destroy(tracker, true);
1580 ret = 0;
1581 } else {
1582 ret = lttng_id_tracker_add(tracker, id);
1583 }
1584 mutex_unlock(&sessions_mutex);
1585 return ret;
1586}
1587
1588int lttng_session_untrack_id(struct lttng_session *session,
1589 enum tracker_type tracker_type, int id)
1590{
1591 struct lttng_id_tracker *tracker;
1592 int ret;
1593
1594 tracker = get_tracker(session, tracker_type);
1595 if (!tracker)
1596 return -EINVAL;
1597 if (id < -1)
1598 return -EINVAL;
1599 mutex_lock(&sessions_mutex);
1600 if (id == -1) {
1601 /* untrack all ids: replace by empty tracker. */
1602 ret = lttng_id_tracker_empty_set(tracker);
1603 } else {
1604 ret = lttng_id_tracker_del(tracker, id);
1605 }
1606 mutex_unlock(&sessions_mutex);
1607 return ret;
1608}
1609
1610static
1611void *id_list_start(struct seq_file *m, loff_t *pos)
1612{
1613 struct lttng_id_tracker *id_tracker = m->private;
1614 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1615 struct lttng_id_hash_node *e;
1616 int iter = 0, i;
1617
1618 mutex_lock(&sessions_mutex);
1619 if (id_tracker_p) {
1620 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1621 struct hlist_head *head = &id_tracker_p->id_hash[i];
1622
1623 lttng_hlist_for_each_entry(e, head, hlist) {
1624 if (iter++ >= *pos)
1625 return e;
1626 }
1627 }
1628 } else {
1629 /* ID tracker disabled. */
1630 if (iter >= *pos && iter == 0) {
1631 return id_tracker_p; /* empty tracker */
1632 }
1633 iter++;
1634 }
1635 /* End of list */
1636 return NULL;
1637}
1638
1639/* Called with sessions_mutex held. */
1640static
1641void *id_list_next(struct seq_file *m, void *p, loff_t *ppos)
1642{
1643 struct lttng_id_tracker *id_tracker = m->private;
1644 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1645 struct lttng_id_hash_node *e;
1646 int iter = 0, i;
1647
1648 (*ppos)++;
1649 if (id_tracker_p) {
1650 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1651 struct hlist_head *head = &id_tracker_p->id_hash[i];
1652
1653 lttng_hlist_for_each_entry(e, head, hlist) {
1654 if (iter++ >= *ppos)
1655 return e;
1656 }
1657 }
1658 } else {
1659 /* ID tracker disabled. */
1660 if (iter >= *ppos && iter == 0)
1661 return p; /* empty tracker */
1662 iter++;
1663 }
1664
1665 /* End of list */
1666 return NULL;
1667}
1668
1669static
1670void id_list_stop(struct seq_file *m, void *p)
1671{
1672 mutex_unlock(&sessions_mutex);
1673}
1674
1675static
1676int id_list_show(struct seq_file *m, void *p)
1677{
1678 struct lttng_id_tracker *id_tracker = m->private;
1679 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1680 int id;
1681
1682 if (p == id_tracker_p) {
1683 /* Tracker disabled. */
1684 id = -1;
1685 } else {
1686 const struct lttng_id_hash_node *e = p;
1687
1688 id = lttng_id_tracker_get_node_id(e);
1689 }
1690 switch (id_tracker->tracker_type) {
1691 case TRACKER_PID:
1692 seq_printf(m, "process { pid = %d; };\n", id);
1693 break;
1694 case TRACKER_VPID:
1695 seq_printf(m, "process { vpid = %d; };\n", id);
1696 break;
1697 case TRACKER_UID:
1698 seq_printf(m, "user { uid = %d; };\n", id);
1699 break;
1700 case TRACKER_VUID:
1701 seq_printf(m, "user { vuid = %d; };\n", id);
1702 break;
1703 case TRACKER_GID:
1704 seq_printf(m, "group { gid = %d; };\n", id);
1705 break;
1706 case TRACKER_VGID:
1707 seq_printf(m, "group { vgid = %d; };\n", id);
1708 break;
1709 default:
1710 seq_printf(m, "UNKNOWN { field = %d };\n", id);
1711 }
1712 return 0;
1713}
1714
1715static
1716const struct seq_operations lttng_tracker_ids_list_seq_ops = {
1717 .start = id_list_start,
1718 .next = id_list_next,
1719 .stop = id_list_stop,
1720 .show = id_list_show,
1721};
1722
1723static
1724int lttng_tracker_ids_list_open(struct inode *inode, struct file *file)
1725{
1726 return seq_open(file, &lttng_tracker_ids_list_seq_ops);
1727}
1728
1729static
1730int lttng_tracker_ids_list_release(struct inode *inode, struct file *file)
1731{
1732 struct seq_file *m = file->private_data;
1733 struct lttng_id_tracker *id_tracker = m->private;
1734 int ret;
1735
1736 WARN_ON_ONCE(!id_tracker);
1737 ret = seq_release(inode, file);
1738 if (!ret)
1739 fput(id_tracker->session->file);
1740 return ret;
1741}
1742
1743const struct file_operations lttng_tracker_ids_list_fops = {
1744 .owner = THIS_MODULE,
1745 .open = lttng_tracker_ids_list_open,
1746 .read = seq_read,
1747 .llseek = seq_lseek,
1748 .release = lttng_tracker_ids_list_release,
1749};
1750
1751int lttng_session_list_tracker_ids(struct lttng_session *session,
1752 enum tracker_type tracker_type)
1753{
1754 struct file *tracker_ids_list_file;
1755 struct seq_file *m;
1756 int file_fd, ret;
1757
1758 file_fd = lttng_get_unused_fd();
1759 if (file_fd < 0) {
1760 ret = file_fd;
1761 goto fd_error;
1762 }
1763
1764 tracker_ids_list_file = anon_inode_getfile("[lttng_tracker_ids_list]",
1765 &lttng_tracker_ids_list_fops,
1766 NULL, O_RDWR);
1767 if (IS_ERR(tracker_ids_list_file)) {
1768 ret = PTR_ERR(tracker_ids_list_file);
1769 goto file_error;
1770 }
1771 if (!atomic_long_add_unless(&session->file->f_count, 1, LONG_MAX)) {
1772 ret = -EOVERFLOW;
1773 goto refcount_error;
1774 }
1775 ret = lttng_tracker_ids_list_fops.open(NULL, tracker_ids_list_file);
1776 if (ret < 0)
1777 goto open_error;
1778 m = tracker_ids_list_file->private_data;
1779
1780 m->private = get_tracker(session, tracker_type);
1781 BUG_ON(!m->private);
1782 fd_install(file_fd, tracker_ids_list_file);
1783
1784 return file_fd;
1785
1786open_error:
1787 atomic_long_dec(&session->file->f_count);
1788refcount_error:
1789 fput(tracker_ids_list_file);
1790file_error:
1791 put_unused_fd(file_fd);
1792fd_error:
1793 return ret;
1794}
1795
1796/*
1797 * Enabler management.
1798 */
1799static
1800int lttng_match_enabler_star_glob(const char *desc_name,
1801 const char *pattern)
1802{
1803 if (!strutils_star_glob_match(pattern, LTTNG_SIZE_MAX,
1804 desc_name, LTTNG_SIZE_MAX))
1805 return 0;
1806 return 1;
1807}
1808
1809static
1810int lttng_match_enabler_name(const char *desc_name,
1811 const char *name)
1812{
1813 if (strcmp(desc_name, name))
1814 return 0;
1815 return 1;
1816}
1817
1818int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
1819 struct lttng_enabler *enabler)
1820{
1821 const char *desc_name, *enabler_name;
1822 bool compat = false, entry = false;
1823
1824 enabler_name = enabler->event_param.name;
1825 switch (enabler->event_param.instrumentation) {
1826 case LTTNG_KERNEL_TRACEPOINT:
1827 desc_name = desc->name;
1828 switch (enabler->format_type) {
1829 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
1830 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1831 case LTTNG_ENABLER_FORMAT_NAME:
1832 return lttng_match_enabler_name(desc_name, enabler_name);
1833 default:
1834 return -EINVAL;
1835 }
1836 break;
1837 case LTTNG_KERNEL_SYSCALL:
1838 desc_name = desc->name;
1839 if (!strncmp(desc_name, "compat_", strlen("compat_"))) {
1840 desc_name += strlen("compat_");
1841 compat = true;
1842 }
1843 if (!strncmp(desc_name, "syscall_exit_",
1844 strlen("syscall_exit_"))) {
1845 desc_name += strlen("syscall_exit_");
1846 } else if (!strncmp(desc_name, "syscall_entry_",
1847 strlen("syscall_entry_"))) {
1848 desc_name += strlen("syscall_entry_");
1849 entry = true;
1850 } else {
1851 WARN_ON_ONCE(1);
1852 return -EINVAL;
1853 }
1854 switch (enabler->event_param.u.syscall.entryexit) {
1855 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
1856 break;
1857 case LTTNG_KERNEL_SYSCALL_ENTRY:
1858 if (!entry)
1859 return 0;
1860 break;
1861 case LTTNG_KERNEL_SYSCALL_EXIT:
1862 if (entry)
1863 return 0;
1864 break;
1865 default:
1866 return -EINVAL;
1867 }
1868 switch (enabler->event_param.u.syscall.abi) {
1869 case LTTNG_KERNEL_SYSCALL_ABI_ALL:
1870 break;
1871 case LTTNG_KERNEL_SYSCALL_ABI_NATIVE:
1872 if (compat)
1873 return 0;
1874 break;
1875 case LTTNG_KERNEL_SYSCALL_ABI_COMPAT:
1876 if (!compat)
1877 return 0;
1878 break;
1879 default:
1880 return -EINVAL;
1881 }
1882 switch (enabler->event_param.u.syscall.match) {
1883 case LTTNG_KERNEL_SYSCALL_MATCH_NAME:
1884 switch (enabler->format_type) {
1885 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
1886 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1887 case LTTNG_ENABLER_FORMAT_NAME:
1888 return lttng_match_enabler_name(desc_name, enabler_name);
1889 default:
1890 return -EINVAL;
1891 }
1892 break;
1893 case LTTNG_KERNEL_SYSCALL_MATCH_NR:
1894 return -EINVAL; /* Not implemented. */
1895 default:
1896 return -EINVAL;
1897 }
1898 break;
1899 default:
1900 WARN_ON_ONCE(1);
1901 return -EINVAL;
1902 }
1903}
1904
1905static
1906int lttng_event_enabler_match_event(struct lttng_event_enabler *event_enabler,
1907 struct lttng_event *event)
1908{
1909 struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(
1910 event_enabler);
1911
1912 if (base_enabler->event_param.instrumentation != event->instrumentation)
1913 return 0;
1914 if (lttng_desc_match_enabler(event->desc, base_enabler)
1915 && event->chan == event_enabler->chan)
1916 return 1;
1917 else
1918 return 0;
1919}
1920
1921static
1922int lttng_event_notifier_enabler_match_event_notifier(struct lttng_event_notifier_enabler *event_notifier_enabler,
1923 struct lttng_event_notifier *event_notifier)
1924{
1925 struct lttng_enabler *base_enabler = lttng_event_notifier_enabler_as_enabler(
1926 event_notifier_enabler);
1927
1928 if (base_enabler->event_param.instrumentation != event_notifier->instrumentation)
1929 return 0;
1930 if (lttng_desc_match_enabler(event_notifier->desc, base_enabler)
1931 && event_notifier->group == event_notifier_enabler->group
1932 && event_notifier->user_token == event_notifier_enabler->base.user_token)
1933 return 1;
1934 else
1935 return 0;
1936}
1937
1938static
1939struct lttng_enabler_ref *lttng_enabler_ref(
1940 struct list_head *enablers_ref_list,
1941 struct lttng_enabler *enabler)
1942{
1943 struct lttng_enabler_ref *enabler_ref;
1944
1945 list_for_each_entry(enabler_ref, enablers_ref_list, node) {
1946 if (enabler_ref->ref == enabler)
1947 return enabler_ref;
1948 }
1949 return NULL;
1950}
1951
1952static
1953void lttng_create_tracepoint_event_if_missing(struct lttng_event_enabler *event_enabler)
1954{
1955 struct lttng_session *session = event_enabler->chan->session;
1956 struct lttng_probe_desc *probe_desc;
1957 const struct lttng_event_desc *desc;
1958 int i;
1959 struct list_head *probe_list;
1960
1961 probe_list = lttng_get_probe_list_head();
1962 /*
1963 * For each probe event, if we find that a probe event matches
1964 * our enabler, create an associated lttng_event if not
1965 * already present.
1966 */
1967 list_for_each_entry(probe_desc, probe_list, head) {
1968 for (i = 0; i < probe_desc->nr_events; i++) {
1969 int found = 0;
1970 struct hlist_head *head;
1971 struct lttng_event *event;
1972
1973 desc = probe_desc->event_desc[i];
1974 if (!lttng_desc_match_enabler(desc,
1975 lttng_event_enabler_as_enabler(event_enabler)))
1976 continue;
1977
1978 /*
1979 * Check if already created.
1980 */
1981 head = utils_borrow_hash_table_bucket(
1982 session->events_ht.table, LTTNG_EVENT_HT_SIZE,
1983 desc->name);
1984 lttng_hlist_for_each_entry(event, head, hlist) {
1985 if (event->desc == desc
1986 && event->chan == event_enabler->chan)
1987 found = 1;
1988 }
1989 if (found)
1990 continue;
1991
1992 /*
1993 * We need to create an event for this
1994 * event probe.
1995 */
1996 event = _lttng_event_create(event_enabler->chan,
1997 NULL, NULL, desc,
1998 LTTNG_KERNEL_TRACEPOINT);
1999 if (!event) {
2000 printk(KERN_INFO "LTTng: Unable to create event %s\n",
2001 probe_desc->event_desc[i]->name);
2002 }
2003 }
2004 }
2005}
2006
2007static
2008void lttng_create_tracepoint_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
2009{
2010 struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
2011 struct lttng_probe_desc *probe_desc;
2012 const struct lttng_event_desc *desc;
2013 int i;
2014 struct list_head *probe_list;
2015
2016 probe_list = lttng_get_probe_list_head();
2017 /*
2018 * For each probe event, if we find that a probe event matches
2019 * our enabler, create an associated lttng_event_notifier if not
2020 * already present.
2021 */
2022 list_for_each_entry(probe_desc, probe_list, head) {
2023 for (i = 0; i < probe_desc->nr_events; i++) {
2024 int found = 0;
2025 struct hlist_head *head;
2026 struct lttng_event_notifier *event_notifier;
2027
2028 desc = probe_desc->event_desc[i];
2029 if (!lttng_desc_match_enabler(desc,
2030 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)))
2031 continue;
2032
2033 /*
2034 * Check if already created.
2035 */
2036 head = utils_borrow_hash_table_bucket(
2037 event_notifier_group->event_notifiers_ht.table,
2038 LTTNG_EVENT_NOTIFIER_HT_SIZE, desc->name);
2039 lttng_hlist_for_each_entry(event_notifier, head, hlist) {
2040 if (event_notifier->desc == desc
2041 && event_notifier->user_token == event_notifier_enabler->base.user_token)
2042 found = 1;
2043 }
2044 if (found)
2045 continue;
2046
2047 /*
2048 * We need to create a event_notifier for this event probe.
2049 */
2050 event_notifier = _lttng_event_notifier_create(desc,
2051 event_notifier_enabler->base.user_token,
2052 event_notifier_enabler->error_counter_index,
2053 event_notifier_group, NULL, NULL,
2054 LTTNG_KERNEL_TRACEPOINT);
2055 if (IS_ERR(event_notifier)) {
2056 printk(KERN_INFO "Unable to create event_notifier %s\n",
2057 probe_desc->event_desc[i]->name);
2058 }
2059 }
2060 }
2061}
2062
2063static
2064void lttng_create_syscall_event_if_missing(struct lttng_event_enabler *event_enabler)
2065{
2066 int ret;
2067
2068 ret = lttng_syscalls_register_event(event_enabler->chan, NULL);
2069 WARN_ON_ONCE(ret);
2070}
2071
2072static
2073void lttng_create_syscall_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
2074{
2075 int ret;
2076
2077 ret = lttng_syscalls_register_event_notifier(event_notifier_enabler, NULL);
2078 WARN_ON_ONCE(ret);
2079 ret = lttng_syscals_create_matching_event_notifiers(event_notifier_enabler, NULL);
2080 WARN_ON_ONCE(ret);
2081}
2082
2083/*
2084 * Create struct lttng_event if it is missing and present in the list of
2085 * tracepoint probes.
2086 * Should be called with sessions mutex held.
2087 */
2088static
2089void lttng_create_event_if_missing(struct lttng_event_enabler *event_enabler)
2090{
2091 switch (event_enabler->base.event_param.instrumentation) {
2092 case LTTNG_KERNEL_TRACEPOINT:
2093 lttng_create_tracepoint_event_if_missing(event_enabler);
2094 break;
2095 case LTTNG_KERNEL_SYSCALL:
2096 lttng_create_syscall_event_if_missing(event_enabler);
2097 break;
2098 default:
2099 WARN_ON_ONCE(1);
2100 break;
2101 }
2102}
2103
2104/*
2105 * Create events associated with an event_enabler (if not already present),
2106 * and add backward reference from the event to the enabler.
2107 * Should be called with sessions mutex held.
2108 */
2109static
2110int lttng_event_enabler_ref_events(struct lttng_event_enabler *event_enabler)
2111{
2112 struct lttng_channel *chan = event_enabler->chan;
2113 struct lttng_session *session = event_enabler->chan->session;
2114 struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(event_enabler);
2115 struct lttng_event *event;
2116
2117 if (base_enabler->event_param.instrumentation == LTTNG_KERNEL_SYSCALL &&
2118 base_enabler->event_param.u.syscall.entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT &&
2119 base_enabler->event_param.u.syscall.abi == LTTNG_KERNEL_SYSCALL_ABI_ALL &&
2120 base_enabler->event_param.u.syscall.match == LTTNG_KERNEL_SYSCALL_MATCH_NAME &&
2121 !strcmp(base_enabler->event_param.name, "*")) {
2122 if (base_enabler->enabled)
2123 WRITE_ONCE(chan->syscall_all, 1);
2124 else
2125 WRITE_ONCE(chan->syscall_all, 0);
2126 }
2127
2128 /* First ensure that probe events are created for this enabler. */
2129 lttng_create_event_if_missing(event_enabler);
2130
2131 /* For each event matching event_enabler in session event list. */
2132 list_for_each_entry(event, &session->events, list) {
2133 struct lttng_enabler_ref *enabler_ref;
2134
2135 if (!lttng_event_enabler_match_event(event_enabler, event))
2136 continue;
2137 enabler_ref = lttng_enabler_ref(&event->enablers_ref_head,
2138 lttng_event_enabler_as_enabler(event_enabler));
2139 if (!enabler_ref) {
2140 /*
2141 * If no backward ref, create it.
2142 * Add backward ref from event to event_enabler.
2143 */
2144 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
2145 if (!enabler_ref)
2146 return -ENOMEM;
2147 enabler_ref->ref = lttng_event_enabler_as_enabler(event_enabler);
2148 list_add(&enabler_ref->node,
2149 &event->enablers_ref_head);
2150 }
2151
2152 /*
2153 * Link filter bytecodes if not linked yet.
2154 */
2155 lttng_enabler_link_bytecode(event->desc,
2156 lttng_static_ctx,
2157 &event->filter_bytecode_runtime_head,
2158 &lttng_event_enabler_as_enabler(event_enabler)->filter_bytecode_head);
2159
2160 /* TODO: merge event context. */
2161 }
2162 return 0;
2163}
2164
2165/*
2166 * Create struct lttng_event_notifier if it is missing and present in the list of
2167 * tracepoint probes.
2168 * Should be called with sessions mutex held.
2169 */
2170static
2171void lttng_create_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
2172{
2173 switch (event_notifier_enabler->base.event_param.instrumentation) {
2174 case LTTNG_KERNEL_TRACEPOINT:
2175 lttng_create_tracepoint_event_notifier_if_missing(event_notifier_enabler);
2176 break;
2177 case LTTNG_KERNEL_SYSCALL:
2178 lttng_create_syscall_event_notifier_if_missing(event_notifier_enabler);
2179 break;
2180 default:
2181 WARN_ON_ONCE(1);
2182 break;
2183 }
2184}
2185
2186/*
2187 * Create event_notifiers associated with a event_notifier enabler (if not already present).
2188 */
2189static
2190int lttng_event_notifier_enabler_ref_event_notifiers(
2191 struct lttng_event_notifier_enabler *event_notifier_enabler)
2192{
2193 struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
2194 struct lttng_enabler *base_enabler = lttng_event_notifier_enabler_as_enabler(event_notifier_enabler);
2195 struct lttng_event_notifier *event_notifier;
2196
2197 if (base_enabler->event_param.instrumentation == LTTNG_KERNEL_SYSCALL &&
2198 base_enabler->event_param.u.syscall.abi == LTTNG_KERNEL_SYSCALL_ABI_ALL &&
2199 base_enabler->event_param.u.syscall.match == LTTNG_KERNEL_SYSCALL_MATCH_NAME &&
2200 !strcmp(base_enabler->event_param.name, "*")) {
2201
2202 int enabled = base_enabler->enabled;
2203 enum lttng_kernel_syscall_entryexit entryexit = base_enabler->event_param.u.syscall.entryexit;
2204
2205 if (entryexit == LTTNG_KERNEL_SYSCALL_ENTRY || entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT)
2206 WRITE_ONCE(event_notifier_group->syscall_all_entry, enabled);
2207
2208 if (entryexit == LTTNG_KERNEL_SYSCALL_EXIT || entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT)
2209 WRITE_ONCE(event_notifier_group->syscall_all_exit, enabled);
2210
2211 }
2212
2213 /* First ensure that probe event_notifiers are created for this enabler. */
2214 lttng_create_event_notifier_if_missing(event_notifier_enabler);
2215
2216 /* Link the created event_notifier with its associated enabler. */
2217 list_for_each_entry(event_notifier, &event_notifier_group->event_notifiers_head, list) {
2218 struct lttng_enabler_ref *enabler_ref;
2219
2220 if (!lttng_event_notifier_enabler_match_event_notifier(event_notifier_enabler, event_notifier))
2221 continue;
2222
2223 enabler_ref = lttng_enabler_ref(&event_notifier->enablers_ref_head,
2224 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
2225 if (!enabler_ref) {
2226 /*
2227 * If no backward ref, create it.
2228 * Add backward ref from event_notifier to enabler.
2229 */
2230 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
2231 if (!enabler_ref)
2232 return -ENOMEM;
2233
2234 enabler_ref->ref = lttng_event_notifier_enabler_as_enabler(
2235 event_notifier_enabler);
2236 list_add(&enabler_ref->node,
2237 &event_notifier->enablers_ref_head);
2238 }
2239
2240 /*
2241 * Link filter bytecodes if not linked yet.
2242 */
2243 lttng_enabler_link_bytecode(event_notifier->desc,
2244 lttng_static_ctx, &event_notifier->filter_bytecode_runtime_head,
2245 &lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->filter_bytecode_head);
2246
2247 /* Link capture bytecodes if not linked yet. */
2248 lttng_enabler_link_bytecode(event_notifier->desc,
2249 lttng_static_ctx, &event_notifier->capture_bytecode_runtime_head,
2250 &event_notifier_enabler->capture_bytecode_head);
2251
2252 event_notifier->num_captures = event_notifier_enabler->num_captures;
2253 }
2254 return 0;
2255}
2256
2257/*
2258 * Called at module load: connect the probe on all enablers matching
2259 * this event.
2260 * Called with sessions lock held.
2261 */
2262int lttng_fix_pending_events(void)
2263{
2264 struct lttng_session *session;
2265
2266 list_for_each_entry(session, &sessions, list)
2267 lttng_session_lazy_sync_event_enablers(session);
2268 return 0;
2269}
2270
2271static bool lttng_event_notifier_group_has_active_event_notifiers(
2272 struct lttng_event_notifier_group *event_notifier_group)
2273{
2274 struct lttng_event_notifier_enabler *event_notifier_enabler;
2275
2276 list_for_each_entry(event_notifier_enabler, &event_notifier_group->enablers_head,
2277 node) {
2278 if (event_notifier_enabler->base.enabled)
2279 return true;
2280 }
2281 return false;
2282}
2283
2284bool lttng_event_notifier_active(void)
2285{
2286 struct lttng_event_notifier_group *event_notifier_group;
2287
2288 list_for_each_entry(event_notifier_group, &event_notifier_groups, node) {
2289 if (lttng_event_notifier_group_has_active_event_notifiers(event_notifier_group))
2290 return true;
2291 }
2292 return false;
2293}
2294
2295int lttng_fix_pending_event_notifiers(void)
2296{
2297 struct lttng_event_notifier_group *event_notifier_group;
2298
2299 list_for_each_entry(event_notifier_group, &event_notifier_groups, node)
2300 lttng_event_notifier_group_sync_enablers(event_notifier_group);
2301 return 0;
2302}
2303
2304struct lttng_event_enabler *lttng_event_enabler_create(
2305 enum lttng_enabler_format_type format_type,
2306 struct lttng_kernel_event *event_param,
2307 struct lttng_channel *chan)
2308{
2309 struct lttng_event_enabler *event_enabler;
2310
2311 event_enabler = kzalloc(sizeof(*event_enabler), GFP_KERNEL);
2312 if (!event_enabler)
2313 return NULL;
2314 event_enabler->base.format_type = format_type;
2315 INIT_LIST_HEAD(&event_enabler->base.filter_bytecode_head);
2316 memcpy(&event_enabler->base.event_param, event_param,
2317 sizeof(event_enabler->base.event_param));
2318 event_enabler->chan = chan;
2319 /* ctx left NULL */
2320 event_enabler->base.enabled = 0;
2321 event_enabler->base.evtype = LTTNG_TYPE_ENABLER;
2322 mutex_lock(&sessions_mutex);
2323 list_add(&event_enabler->node, &event_enabler->chan->session->enablers_head);
2324 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2325 mutex_unlock(&sessions_mutex);
2326 return event_enabler;
2327}
2328
2329int lttng_event_enabler_enable(struct lttng_event_enabler *event_enabler)
2330{
2331 mutex_lock(&sessions_mutex);
2332 lttng_event_enabler_as_enabler(event_enabler)->enabled = 1;
2333 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2334 mutex_unlock(&sessions_mutex);
2335 return 0;
2336}
2337
2338int lttng_event_enabler_disable(struct lttng_event_enabler *event_enabler)
2339{
2340 mutex_lock(&sessions_mutex);
2341 lttng_event_enabler_as_enabler(event_enabler)->enabled = 0;
2342 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2343 mutex_unlock(&sessions_mutex);
2344 return 0;
2345}
2346
2347static
2348int lttng_enabler_attach_filter_bytecode(struct lttng_enabler *enabler,
2349 struct lttng_kernel_filter_bytecode __user *bytecode)
2350{
2351 struct lttng_bytecode_node *bytecode_node;
2352 uint32_t bytecode_len;
2353 int ret;
2354
2355 ret = get_user(bytecode_len, &bytecode->len);
2356 if (ret)
2357 return ret;
2358 bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
2359 GFP_KERNEL);
2360 if (!bytecode_node)
2361 return -ENOMEM;
2362 ret = copy_from_user(&bytecode_node->bc, bytecode,
2363 sizeof(*bytecode) + bytecode_len);
2364 if (ret)
2365 goto error_free;
2366
2367 bytecode_node->type = LTTNG_BYTECODE_NODE_TYPE_FILTER;
2368 bytecode_node->enabler = enabler;
2369 /* Enforce length based on allocated size */
2370 bytecode_node->bc.len = bytecode_len;
2371 list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
2372
2373 return 0;
2374
2375error_free:
2376 lttng_kvfree(bytecode_node);
2377 return ret;
2378}
2379
2380int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler *event_enabler,
2381 struct lttng_kernel_filter_bytecode __user *bytecode)
2382{
2383 int ret;
2384 ret = lttng_enabler_attach_filter_bytecode(
2385 lttng_event_enabler_as_enabler(event_enabler), bytecode);
2386 if (ret)
2387 goto error;
2388
2389 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2390 return 0;
2391
2392error:
2393 return ret;
2394}
2395
2396int lttng_event_add_callsite(struct lttng_event *event,
2397 struct lttng_kernel_event_callsite __user *callsite)
2398{
2399
2400 switch (event->instrumentation) {
2401 case LTTNG_KERNEL_UPROBE:
2402 return lttng_uprobes_event_add_callsite(event, callsite);
2403 default:
2404 return -EINVAL;
2405 }
2406}
2407
2408int lttng_event_enabler_attach_context(struct lttng_event_enabler *event_enabler,
2409 struct lttng_kernel_context *context_param)
2410{
2411 return -ENOSYS;
2412}
2413
2414static
2415void lttng_enabler_destroy(struct lttng_enabler *enabler)
2416{
2417 struct lttng_bytecode_node *filter_node, *tmp_filter_node;
2418
2419 /* Destroy filter bytecode */
2420 list_for_each_entry_safe(filter_node, tmp_filter_node,
2421 &enabler->filter_bytecode_head, node) {
2422 lttng_kvfree(filter_node);
2423 }
2424}
2425
2426static
2427void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler)
2428{
2429 lttng_enabler_destroy(lttng_event_enabler_as_enabler(event_enabler));
2430
2431 /* Destroy contexts */
2432 lttng_destroy_context(event_enabler->ctx);
2433
2434 list_del(&event_enabler->node);
2435 kfree(event_enabler);
2436}
2437
2438struct lttng_event_notifier_enabler *lttng_event_notifier_enabler_create(
2439 struct lttng_event_notifier_group *event_notifier_group,
2440 enum lttng_enabler_format_type format_type,
2441 struct lttng_kernel_event_notifier *event_notifier_param)
2442{
2443 struct lttng_event_notifier_enabler *event_notifier_enabler;
2444
2445 event_notifier_enabler = kzalloc(sizeof(*event_notifier_enabler), GFP_KERNEL);
2446 if (!event_notifier_enabler)
2447 return NULL;
2448
2449 event_notifier_enabler->base.format_type = format_type;
2450 INIT_LIST_HEAD(&event_notifier_enabler->base.filter_bytecode_head);
2451 INIT_LIST_HEAD(&event_notifier_enabler->capture_bytecode_head);
2452
2453 event_notifier_enabler->error_counter_index = event_notifier_param->error_counter_index;
2454 event_notifier_enabler->num_captures = 0;
2455
2456 memcpy(&event_notifier_enabler->base.event_param, &event_notifier_param->event,
2457 sizeof(event_notifier_enabler->base.event_param));
2458 event_notifier_enabler->base.evtype = LTTNG_TYPE_ENABLER;
2459
2460 event_notifier_enabler->base.enabled = 0;
2461 event_notifier_enabler->base.user_token = event_notifier_param->event.token;
2462 event_notifier_enabler->group = event_notifier_group;
2463
2464 mutex_lock(&sessions_mutex);
2465 list_add(&event_notifier_enabler->node, &event_notifier_enabler->group->enablers_head);
2466 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2467
2468 mutex_unlock(&sessions_mutex);
2469
2470 return event_notifier_enabler;
2471}
2472
2473int lttng_event_notifier_enabler_enable(
2474 struct lttng_event_notifier_enabler *event_notifier_enabler)
2475{
2476 mutex_lock(&sessions_mutex);
2477 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 1;
2478 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2479 mutex_unlock(&sessions_mutex);
2480 return 0;
2481}
2482
2483int lttng_event_notifier_enabler_disable(
2484 struct lttng_event_notifier_enabler *event_notifier_enabler)
2485{
2486 mutex_lock(&sessions_mutex);
2487 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 0;
2488 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2489 mutex_unlock(&sessions_mutex);
2490 return 0;
2491}
2492
2493int lttng_event_notifier_enabler_attach_filter_bytecode(
2494 struct lttng_event_notifier_enabler *event_notifier_enabler,
2495 struct lttng_kernel_filter_bytecode __user *bytecode)
2496{
2497 int ret;
2498
2499 ret = lttng_enabler_attach_filter_bytecode(
2500 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler),
2501 bytecode);
2502 if (ret)
2503 goto error;
2504
2505 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2506 return 0;
2507
2508error:
2509 return ret;
2510}
2511
2512int lttng_event_notifier_enabler_attach_capture_bytecode(
2513 struct lttng_event_notifier_enabler *event_notifier_enabler,
2514 struct lttng_kernel_capture_bytecode __user *bytecode)
2515{
2516 struct lttng_bytecode_node *bytecode_node;
2517 struct lttng_enabler *enabler =
2518 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler);
2519 uint32_t bytecode_len;
2520 int ret;
2521
2522 ret = get_user(bytecode_len, &bytecode->len);
2523 if (ret)
2524 return ret;
2525
2526 bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
2527 GFP_KERNEL);
2528 if (!bytecode_node)
2529 return -ENOMEM;
2530
2531 ret = copy_from_user(&bytecode_node->bc, bytecode,
2532 sizeof(*bytecode) + bytecode_len);
2533 if (ret)
2534 goto error_free;
2535
2536 bytecode_node->type = LTTNG_BYTECODE_NODE_TYPE_CAPTURE;
2537 bytecode_node->enabler = enabler;
2538
2539 /* Enforce length based on allocated size */
2540 bytecode_node->bc.len = bytecode_len;
2541 list_add_tail(&bytecode_node->node, &event_notifier_enabler->capture_bytecode_head);
2542
2543 event_notifier_enabler->num_captures++;
2544
2545 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2546 goto end;
2547
2548error_free:
2549 lttng_kvfree(bytecode_node);
2550end:
2551 return ret;
2552}
2553
2554int lttng_event_notifier_add_callsite(struct lttng_event_notifier *event_notifier,
2555 struct lttng_kernel_event_callsite __user *callsite)
2556{
2557
2558 switch (event_notifier->instrumentation) {
2559 case LTTNG_KERNEL_UPROBE:
2560 return lttng_uprobes_event_notifier_add_callsite(event_notifier,
2561 callsite);
2562 default:
2563 return -EINVAL;
2564 }
2565}
2566
2567int lttng_event_notifier_enabler_attach_context(
2568 struct lttng_event_notifier_enabler *event_notifier_enabler,
2569 struct lttng_kernel_context *context_param)
2570{
2571 return -ENOSYS;
2572}
2573
2574static
2575void lttng_event_notifier_enabler_destroy(
2576 struct lttng_event_notifier_enabler *event_notifier_enabler)
2577{
2578 if (!event_notifier_enabler) {
2579 return;
2580 }
2581
2582 list_del(&event_notifier_enabler->node);
2583
2584 lttng_enabler_destroy(lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
2585 kfree(event_notifier_enabler);
2586}
2587
2588/*
2589 * lttng_session_sync_event_enablers should be called just before starting a
2590 * session.
2591 * Should be called with sessions mutex held.
2592 */
2593static
2594void lttng_session_sync_event_enablers(struct lttng_session *session)
2595{
2596 struct lttng_event_enabler *event_enabler;
2597 struct lttng_event *event;
2598
2599 list_for_each_entry(event_enabler, &session->enablers_head, node)
2600 lttng_event_enabler_ref_events(event_enabler);
2601 /*
2602 * For each event, if at least one of its enablers is enabled,
2603 * and its channel and session transient states are enabled, we
2604 * enable the event, else we disable it.
2605 */
2606 list_for_each_entry(event, &session->events, list) {
2607 struct lttng_enabler_ref *enabler_ref;
2608 struct lttng_bytecode_runtime *runtime;
2609 int enabled = 0, has_enablers_without_bytecode = 0;
2610
2611 switch (event->instrumentation) {
2612 case LTTNG_KERNEL_TRACEPOINT:
2613 case LTTNG_KERNEL_SYSCALL:
2614 /* Enable events */
2615 list_for_each_entry(enabler_ref,
2616 &event->enablers_ref_head, node) {
2617 if (enabler_ref->ref->enabled) {
2618 enabled = 1;
2619 break;
2620 }
2621 }
2622 break;
2623 default:
2624 /* Not handled with lazy sync. */
2625 continue;
2626 }
2627 /*
2628 * Enabled state is based on union of enablers, with
2629 * intesection of session and channel transient enable
2630 * states.
2631 */
2632 enabled = enabled && session->tstate && event->chan->tstate;
2633
2634 WRITE_ONCE(event->enabled, enabled);
2635 /*
2636 * Sync tracepoint registration with event enabled
2637 * state.
2638 */
2639 if (enabled) {
2640 register_event(event);
2641 } else {
2642 _lttng_event_unregister(event);
2643 }
2644
2645 /* Check if has enablers without bytecode enabled */
2646 list_for_each_entry(enabler_ref,
2647 &event->enablers_ref_head, node) {
2648 if (enabler_ref->ref->enabled
2649 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
2650 has_enablers_without_bytecode = 1;
2651 break;
2652 }
2653 }
2654 event->has_enablers_without_bytecode =
2655 has_enablers_without_bytecode;
2656
2657 /* Enable filters */
2658 list_for_each_entry(runtime,
2659 &event->filter_bytecode_runtime_head, node)
2660 lttng_bytecode_filter_sync_state(runtime);
2661 }
2662}
2663
2664/*
2665 * Apply enablers to session events, adding events to session if need
2666 * be. It is required after each modification applied to an active
2667 * session, and right before session "start".
2668 * "lazy" sync means we only sync if required.
2669 * Should be called with sessions mutex held.
2670 */
2671static
2672void lttng_session_lazy_sync_event_enablers(struct lttng_session *session)
2673{
2674 /* We can skip if session is not active */
2675 if (!session->active)
2676 return;
2677 lttng_session_sync_event_enablers(session);
2678}
2679
2680static
2681void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group)
2682{
2683 struct lttng_event_notifier_enabler *event_notifier_enabler;
2684 struct lttng_event_notifier *event_notifier;
2685
2686 list_for_each_entry(event_notifier_enabler, &event_notifier_group->enablers_head, node)
2687 lttng_event_notifier_enabler_ref_event_notifiers(event_notifier_enabler);
2688
2689 /*
2690 * For each event_notifier, if at least one of its enablers is enabled,
2691 * we enable the event_notifier, else we disable it.
2692 */
2693 list_for_each_entry(event_notifier, &event_notifier_group->event_notifiers_head, list) {
2694 struct lttng_enabler_ref *enabler_ref;
2695 struct lttng_bytecode_runtime *runtime;
2696 int enabled = 0, has_enablers_without_bytecode = 0;
2697
2698 switch (event_notifier->instrumentation) {
2699 case LTTNG_KERNEL_TRACEPOINT:
2700 case LTTNG_KERNEL_SYSCALL:
2701 /* Enable event_notifiers */
2702 list_for_each_entry(enabler_ref,
2703 &event_notifier->enablers_ref_head, node) {
2704 if (enabler_ref->ref->enabled) {
2705 enabled = 1;
2706 break;
2707 }
2708 }
2709 break;
2710 default:
2711 /* Not handled with sync. */
2712 continue;
2713 }
2714
2715 WRITE_ONCE(event_notifier->enabled, enabled);
2716 /*
2717 * Sync tracepoint registration with event_notifier enabled
2718 * state.
2719 */
2720 if (enabled) {
2721 if (!event_notifier->registered)
2722 register_event_notifier(event_notifier);
2723 } else {
2724 if (event_notifier->registered)
2725 _lttng_event_notifier_unregister(event_notifier);
2726 }
2727
2728 /* Check if has enablers without bytecode enabled */
2729 list_for_each_entry(enabler_ref,
2730 &event_notifier->enablers_ref_head, node) {
2731 if (enabler_ref->ref->enabled
2732 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
2733 has_enablers_without_bytecode = 1;
2734 break;
2735 }
2736 }
2737 event_notifier->has_enablers_without_bytecode =
2738 has_enablers_without_bytecode;
2739
2740 /* Enable filters */
2741 list_for_each_entry(runtime,
2742 &event_notifier->filter_bytecode_runtime_head, node)
2743 lttng_bytecode_filter_sync_state(runtime);
2744
2745 /* Enable captures */
2746 list_for_each_entry(runtime,
2747 &event_notifier->capture_bytecode_runtime_head, node)
2748 lttng_bytecode_capture_sync_state(runtime);
2749 }
2750}
2751
2752/*
2753 * Serialize at most one packet worth of metadata into a metadata
2754 * channel.
2755 * We grab the metadata cache mutex to get exclusive access to our metadata
2756 * buffer and to the metadata cache. Exclusive access to the metadata buffer
2757 * allows us to do racy operations such as looking for remaining space left in
2758 * packet and write, since mutual exclusion protects us from concurrent writes.
2759 * Mutual exclusion on the metadata cache allow us to read the cache content
2760 * without racing against reallocation of the cache by updates.
2761 * Returns the number of bytes written in the channel, 0 if no data
2762 * was written and a negative value on error.
2763 */
2764int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
2765 struct channel *chan, bool *coherent)
2766{
2767 struct lib_ring_buffer_ctx ctx;
2768 int ret = 0;
2769 size_t len, reserve_len;
2770
2771 /*
2772 * Ensure we support mutiple get_next / put sequences followed by
2773 * put_next. The metadata cache lock protects reading the metadata
2774 * cache. It can indeed be read concurrently by "get_next_subbuf" and
2775 * "flush" operations on the buffer invoked by different processes.
2776 * Moreover, since the metadata cache memory can be reallocated, we
2777 * need to have exclusive access against updates even though we only
2778 * read it.
2779 */
2780 mutex_lock(&stream->metadata_cache->lock);
2781 WARN_ON(stream->metadata_in < stream->metadata_out);
2782 if (stream->metadata_in != stream->metadata_out)
2783 goto end;
2784
2785 /* Metadata regenerated, change the version. */
2786 if (stream->metadata_cache->version != stream->version)
2787 stream->version = stream->metadata_cache->version;
2788
2789 len = stream->metadata_cache->metadata_written -
2790 stream->metadata_in;
2791 if (!len)
2792 goto end;
2793 reserve_len = min_t(size_t,
2794 stream->transport->ops.packet_avail_size(chan),
2795 len);
2796 lib_ring_buffer_ctx_init(&ctx, chan, NULL, reserve_len,
2797 sizeof(char), -1);
2798 /*
2799 * If reservation failed, return an error to the caller.
2800 */
2801 ret = stream->transport->ops.event_reserve(&ctx, 0);
2802 if (ret != 0) {
2803 printk(KERN_WARNING "LTTng: Metadata event reservation failed\n");
2804 stream->coherent = false;
2805 goto end;
2806 }
2807 stream->transport->ops.event_write(&ctx,
2808 stream->metadata_cache->data + stream->metadata_in,
2809 reserve_len);
2810 stream->transport->ops.event_commit(&ctx);
2811 stream->metadata_in += reserve_len;
2812 if (reserve_len < len)
2813 stream->coherent = false;
2814 else
2815 stream->coherent = true;
2816 ret = reserve_len;
2817
2818end:
2819 if (coherent)
2820 *coherent = stream->coherent;
2821 mutex_unlock(&stream->metadata_cache->lock);
2822 return ret;
2823}
2824
2825static
2826void lttng_metadata_begin(struct lttng_session *session)
2827{
2828 if (atomic_inc_return(&session->metadata_cache->producing) == 1)
2829 mutex_lock(&session->metadata_cache->lock);
2830}
2831
2832static
2833void lttng_metadata_end(struct lttng_session *session)
2834{
2835 WARN_ON_ONCE(!atomic_read(&session->metadata_cache->producing));
2836 if (atomic_dec_return(&session->metadata_cache->producing) == 0) {
2837 struct lttng_metadata_stream *stream;
2838
2839 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list)
2840 wake_up_interruptible(&stream->read_wait);
2841 mutex_unlock(&session->metadata_cache->lock);
2842 }
2843}
2844
2845/*
2846 * Write the metadata to the metadata cache.
2847 * Must be called with sessions_mutex held.
2848 * The metadata cache lock protects us from concurrent read access from
2849 * thread outputting metadata content to ring buffer.
2850 * The content of the printf is printed as a single atomic metadata
2851 * transaction.
2852 */
2853int lttng_metadata_printf(struct lttng_session *session,
2854 const char *fmt, ...)
2855{
2856 char *str;
2857 size_t len;
2858 va_list ap;
2859
2860 WARN_ON_ONCE(!LTTNG_READ_ONCE(session->active));
2861
2862 va_start(ap, fmt);
2863 str = kvasprintf(GFP_KERNEL, fmt, ap);
2864 va_end(ap);
2865 if (!str)
2866 return -ENOMEM;
2867
2868 len = strlen(str);
2869 WARN_ON_ONCE(!atomic_read(&session->metadata_cache->producing));
2870 if (session->metadata_cache->metadata_written + len >
2871 session->metadata_cache->cache_alloc) {
2872 char *tmp_cache_realloc;
2873 unsigned int tmp_cache_alloc_size;
2874
2875 tmp_cache_alloc_size = max_t(unsigned int,
2876 session->metadata_cache->cache_alloc + len,
2877 session->metadata_cache->cache_alloc << 1);
2878 tmp_cache_realloc = vzalloc(tmp_cache_alloc_size);
2879 if (!tmp_cache_realloc)
2880 goto err;
2881 if (session->metadata_cache->data) {
2882 memcpy(tmp_cache_realloc,
2883 session->metadata_cache->data,
2884 session->metadata_cache->cache_alloc);
2885 vfree(session->metadata_cache->data);
2886 }
2887
2888 session->metadata_cache->cache_alloc = tmp_cache_alloc_size;
2889 session->metadata_cache->data = tmp_cache_realloc;
2890 }
2891 memcpy(session->metadata_cache->data +
2892 session->metadata_cache->metadata_written,
2893 str, len);
2894 session->metadata_cache->metadata_written += len;
2895 kfree(str);
2896
2897 return 0;
2898
2899err:
2900 kfree(str);
2901 return -ENOMEM;
2902}
2903
2904static
2905int print_tabs(struct lttng_session *session, size_t nesting)
2906{
2907 size_t i;
2908
2909 for (i = 0; i < nesting; i++) {
2910 int ret;
2911
2912 ret = lttng_metadata_printf(session, " ");
2913 if (ret) {
2914 return ret;
2915 }
2916 }
2917 return 0;
2918}
2919
2920static
2921int lttng_field_name_statedump(struct lttng_session *session,
2922 const struct lttng_event_field *field,
2923 size_t nesting)
2924{
2925 return lttng_metadata_printf(session, " _%s;\n", field->name);
2926}
2927
2928static
2929int _lttng_integer_type_statedump(struct lttng_session *session,
2930 const struct lttng_type *type,
2931 size_t nesting)
2932{
2933 int ret;
2934
2935 WARN_ON_ONCE(type->atype != atype_integer);
2936 ret = print_tabs(session, nesting);
2937 if (ret)
2938 return ret;
2939 ret = lttng_metadata_printf(session,
2940 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s }",
2941 type->u.integer.size,
2942 type->u.integer.alignment,
2943 type->u.integer.signedness,
2944 (type->u.integer.encoding == lttng_encode_none)
2945 ? "none"
2946 : (type->u.integer.encoding == lttng_encode_UTF8)
2947 ? "UTF8"
2948 : "ASCII",
2949 type->u.integer.base,
2950#if __BYTE_ORDER == __BIG_ENDIAN
2951 type->u.integer.reverse_byte_order ? " byte_order = le;" : ""
2952#else
2953 type->u.integer.reverse_byte_order ? " byte_order = be;" : ""
2954#endif
2955 );
2956 return ret;
2957}
2958
2959/*
2960 * Must be called with sessions_mutex held.
2961 */
2962static
2963int _lttng_struct_type_statedump(struct lttng_session *session,
2964 const struct lttng_type *type,
2965 size_t nesting)
2966{
2967 int ret;
2968 uint32_t i, nr_fields;
2969 unsigned int alignment;
2970
2971 WARN_ON_ONCE(type->atype != atype_struct_nestable);
2972
2973 ret = print_tabs(session, nesting);
2974 if (ret)
2975 return ret;
2976 ret = lttng_metadata_printf(session,
2977 "struct {\n");
2978 if (ret)
2979 return ret;
2980 nr_fields = type->u.struct_nestable.nr_fields;
2981 for (i = 0; i < nr_fields; i++) {
2982 const struct lttng_event_field *iter_field;
2983
2984 iter_field = &type->u.struct_nestable.fields[i];
2985 ret = _lttng_field_statedump(session, iter_field, nesting + 1);
2986 if (ret)
2987 return ret;
2988 }
2989 ret = print_tabs(session, nesting);
2990 if (ret)
2991 return ret;
2992 alignment = type->u.struct_nestable.alignment;
2993 if (alignment) {
2994 ret = lttng_metadata_printf(session,
2995 "} align(%u)",
2996 alignment);
2997 } else {
2998 ret = lttng_metadata_printf(session,
2999 "}");
3000 }
3001 return ret;
3002}
3003
3004/*
3005 * Must be called with sessions_mutex held.
3006 */
3007static
3008int _lttng_struct_field_statedump(struct lttng_session *session,
3009 const struct lttng_event_field *field,
3010 size_t nesting)
3011{
3012 int ret;
3013
3014 ret = _lttng_struct_type_statedump(session,
3015 &field->type, nesting);
3016 if (ret)
3017 return ret;
3018 return lttng_field_name_statedump(session, field, nesting);
3019}
3020
3021/*
3022 * Must be called with sessions_mutex held.
3023 */
3024static
3025int _lttng_variant_type_statedump(struct lttng_session *session,
3026 const struct lttng_type *type,
3027 size_t nesting)
3028{
3029 int ret;
3030 uint32_t i, nr_choices;
3031
3032 WARN_ON_ONCE(type->atype != atype_variant_nestable);
3033 /*
3034 * CTF 1.8 does not allow expressing nonzero variant alignment in a nestable way.
3035 */
3036 if (type->u.variant_nestable.alignment != 0)
3037 return -EINVAL;
3038 ret = print_tabs(session, nesting);
3039 if (ret)
3040 return ret;
3041 ret = lttng_metadata_printf(session,
3042 "variant <_%s> {\n",
3043 type->u.variant_nestable.tag_name);
3044 if (ret)
3045 return ret;
3046 nr_choices = type->u.variant_nestable.nr_choices;
3047 for (i = 0; i < nr_choices; i++) {
3048 const struct lttng_event_field *iter_field;
3049
3050 iter_field = &type->u.variant_nestable.choices[i];
3051 ret = _lttng_field_statedump(session, iter_field, nesting + 1);
3052 if (ret)
3053 return ret;
3054 }
3055 ret = print_tabs(session, nesting);
3056 if (ret)
3057 return ret;
3058 ret = lttng_metadata_printf(session,
3059 "}");
3060 return ret;
3061}
3062
3063/*
3064 * Must be called with sessions_mutex held.
3065 */
3066static
3067int _lttng_variant_field_statedump(struct lttng_session *session,
3068 const struct lttng_event_field *field,
3069 size_t nesting)
3070{
3071 int ret;
3072
3073 ret = _lttng_variant_type_statedump(session,
3074 &field->type, nesting);
3075 if (ret)
3076 return ret;
3077 return lttng_field_name_statedump(session, field, nesting);
3078}
3079
3080/*
3081 * Must be called with sessions_mutex held.
3082 */
3083static
3084int _lttng_array_field_statedump(struct lttng_session *session,
3085 const struct lttng_event_field *field,
3086 size_t nesting)
3087{
3088 int ret;
3089 const struct lttng_type *elem_type;
3090
3091 WARN_ON_ONCE(field->type.atype != atype_array_nestable);
3092
3093 if (field->type.u.array_nestable.alignment) {
3094 ret = print_tabs(session, nesting);
3095 if (ret)
3096 return ret;
3097 ret = lttng_metadata_printf(session,
3098 "struct { } align(%u) _%s_padding;\n",
3099 field->type.u.array_nestable.alignment * CHAR_BIT,
3100 field->name);
3101 if (ret)
3102 return ret;
3103 }
3104 /*
3105 * Nested compound types: Only array of structures and variants are
3106 * currently supported.
3107 */
3108 elem_type = field->type.u.array_nestable.elem_type;
3109 switch (elem_type->atype) {
3110 case atype_integer:
3111 case atype_struct_nestable:
3112 case atype_variant_nestable:
3113 ret = _lttng_type_statedump(session, elem_type, nesting);
3114 if (ret)
3115 return ret;
3116 break;
3117
3118 default:
3119 return -EINVAL;
3120 }
3121 ret = lttng_metadata_printf(session,
3122 " _%s[%u];\n",
3123 field->name,
3124 field->type.u.array_nestable.length);
3125 return ret;
3126}
3127
3128/*
3129 * Must be called with sessions_mutex held.
3130 */
3131static
3132int _lttng_sequence_field_statedump(struct lttng_session *session,
3133 const struct lttng_event_field *field,
3134 size_t nesting)
3135{
3136 int ret;
3137 const char *length_name;
3138 const struct lttng_type *elem_type;
3139
3140 WARN_ON_ONCE(field->type.atype != atype_sequence_nestable);
3141
3142 length_name = field->type.u.sequence_nestable.length_name;
3143
3144 if (field->type.u.sequence_nestable.alignment) {
3145 ret = print_tabs(session, nesting);
3146 if (ret)
3147 return ret;
3148 ret = lttng_metadata_printf(session,
3149 "struct { } align(%u) _%s_padding;\n",
3150 field->type.u.sequence_nestable.alignment * CHAR_BIT,
3151 field->name);
3152 if (ret)
3153 return ret;
3154 }
3155
3156 /*
3157 * Nested compound types: Only array of structures and variants are
3158 * currently supported.
3159 */
3160 elem_type = field->type.u.sequence_nestable.elem_type;
3161 switch (elem_type->atype) {
3162 case atype_integer:
3163 case atype_struct_nestable:
3164 case atype_variant_nestable:
3165 ret = _lttng_type_statedump(session, elem_type, nesting);
3166 if (ret)
3167 return ret;
3168 break;
3169
3170 default:
3171 return -EINVAL;
3172 }
3173 ret = lttng_metadata_printf(session,
3174 " _%s[ _%s ];\n",
3175 field->name,
3176 field->type.u.sequence_nestable.length_name);
3177 return ret;
3178}
3179
3180/*
3181 * Must be called with sessions_mutex held.
3182 */
3183static
3184int _lttng_enum_type_statedump(struct lttng_session *session,
3185 const struct lttng_type *type,
3186 size_t nesting)
3187{
3188 const struct lttng_enum_desc *enum_desc;
3189 const struct lttng_type *container_type;
3190 int ret;
3191 unsigned int i, nr_entries;
3192
3193 container_type = type->u.enum_nestable.container_type;
3194 if (container_type->atype != atype_integer) {
3195 ret = -EINVAL;
3196 goto end;
3197 }
3198 enum_desc = type->u.enum_nestable.desc;
3199 nr_entries = enum_desc->nr_entries;
3200
3201 ret = print_tabs(session, nesting);
3202 if (ret)
3203 goto end;
3204 ret = lttng_metadata_printf(session, "enum : ");
3205 if (ret)
3206 goto end;
3207 ret = _lttng_integer_type_statedump(session, container_type, 0);
3208 if (ret)
3209 goto end;
3210 ret = lttng_metadata_printf(session, " {\n");
3211 if (ret)
3212 goto end;
3213 /* Dump all entries */
3214 for (i = 0; i < nr_entries; i++) {
3215 const struct lttng_enum_entry *entry = &enum_desc->entries[i];
3216 int j, len;
3217
3218 ret = print_tabs(session, nesting + 1);
3219 if (ret)
3220 goto end;
3221 ret = lttng_metadata_printf(session,
3222 "\"");
3223 if (ret)
3224 goto end;
3225 len = strlen(entry->string);
3226 /* Escape the character '"' */
3227 for (j = 0; j < len; j++) {
3228 char c = entry->string[j];
3229
3230 switch (c) {
3231 case '"':
3232 ret = lttng_metadata_printf(session,
3233 "\\\"");
3234 break;
3235 case '\\':
3236 ret = lttng_metadata_printf(session,
3237 "\\\\");
3238 break;
3239 default:
3240 ret = lttng_metadata_printf(session,
3241 "%c", c);
3242 break;
3243 }
3244 if (ret)
3245 goto end;
3246 }
3247 ret = lttng_metadata_printf(session, "\"");
3248 if (ret)
3249 goto end;
3250
3251 if (entry->options.is_auto) {
3252 ret = lttng_metadata_printf(session, ",\n");
3253 if (ret)
3254 goto end;
3255 } else {
3256 ret = lttng_metadata_printf(session,
3257 " = ");
3258 if (ret)
3259 goto end;
3260 if (entry->start.signedness)
3261 ret = lttng_metadata_printf(session,
3262 "%lld", (long long) entry->start.value);
3263 else
3264 ret = lttng_metadata_printf(session,
3265 "%llu", entry->start.value);
3266 if (ret)
3267 goto end;
3268 if (entry->start.signedness == entry->end.signedness &&
3269 entry->start.value
3270 == entry->end.value) {
3271 ret = lttng_metadata_printf(session,
3272 ",\n");
3273 } else {
3274 if (entry->end.signedness) {
3275 ret = lttng_metadata_printf(session,
3276 " ... %lld,\n",
3277 (long long) entry->end.value);
3278 } else {
3279 ret = lttng_metadata_printf(session,
3280 " ... %llu,\n",
3281 entry->end.value);
3282 }
3283 }
3284 if (ret)
3285 goto end;
3286 }
3287 }
3288 ret = print_tabs(session, nesting);
3289 if (ret)
3290 goto end;
3291 ret = lttng_metadata_printf(session, "}");
3292end:
3293 return ret;
3294}
3295
3296/*
3297 * Must be called with sessions_mutex held.
3298 */
3299static
3300int _lttng_enum_field_statedump(struct lttng_session *session,
3301 const struct lttng_event_field *field,
3302 size_t nesting)
3303{
3304 int ret;
3305
3306 ret = _lttng_enum_type_statedump(session, &field->type, nesting);
3307 if (ret)
3308 return ret;
3309 return lttng_field_name_statedump(session, field, nesting);
3310}
3311
3312static
3313int _lttng_integer_field_statedump(struct lttng_session *session,
3314 const struct lttng_event_field *field,
3315 size_t nesting)
3316{
3317 int ret;
3318
3319 ret = _lttng_integer_type_statedump(session, &field->type, nesting);
3320 if (ret)
3321 return ret;
3322 return lttng_field_name_statedump(session, field, nesting);
3323}
3324
3325static
3326int _lttng_string_type_statedump(struct lttng_session *session,
3327 const struct lttng_type *type,
3328 size_t nesting)
3329{
3330 int ret;
3331
3332 WARN_ON_ONCE(type->atype != atype_string);
3333 /* Default encoding is UTF8 */
3334 ret = print_tabs(session, nesting);
3335 if (ret)
3336 return ret;
3337 ret = lttng_metadata_printf(session,
3338 "string%s",
3339 type->u.string.encoding == lttng_encode_ASCII ?
3340 " { encoding = ASCII; }" : "");
3341 return ret;
3342}
3343
3344static
3345int _lttng_string_field_statedump(struct lttng_session *session,
3346 const struct lttng_event_field *field,
3347 size_t nesting)
3348{
3349 int ret;
3350
3351 WARN_ON_ONCE(field->type.atype != atype_string);
3352 ret = _lttng_string_type_statedump(session, &field->type, nesting);
3353 if (ret)
3354 return ret;
3355 return lttng_field_name_statedump(session, field, nesting);
3356}
3357
3358/*
3359 * Must be called with sessions_mutex held.
3360 */
3361static
3362int _lttng_type_statedump(struct lttng_session *session,
3363 const struct lttng_type *type,
3364 size_t nesting)
3365{
3366 int ret = 0;
3367
3368 switch (type->atype) {
3369 case atype_integer:
3370 ret = _lttng_integer_type_statedump(session, type, nesting);
3371 break;
3372 case atype_enum_nestable:
3373 ret = _lttng_enum_type_statedump(session, type, nesting);
3374 break;
3375 case atype_string:
3376 ret = _lttng_string_type_statedump(session, type, nesting);
3377 break;
3378 case atype_struct_nestable:
3379 ret = _lttng_struct_type_statedump(session, type, nesting);
3380 break;
3381 case atype_variant_nestable:
3382 ret = _lttng_variant_type_statedump(session, type, nesting);
3383 break;
3384
3385 /* Nested arrays and sequences are not supported yet. */
3386 case atype_array_nestable:
3387 case atype_sequence_nestable:
3388 default:
3389 WARN_ON_ONCE(1);
3390 return -EINVAL;
3391 }
3392 return ret;
3393}
3394
3395/*
3396 * Must be called with sessions_mutex held.
3397 */
3398static
3399int _lttng_field_statedump(struct lttng_session *session,
3400 const struct lttng_event_field *field,
3401 size_t nesting)
3402{
3403 int ret = 0;
3404
3405 switch (field->type.atype) {
3406 case atype_integer:
3407 ret = _lttng_integer_field_statedump(session, field, nesting);
3408 break;
3409 case atype_enum_nestable:
3410 ret = _lttng_enum_field_statedump(session, field, nesting);
3411 break;
3412 case atype_string:
3413 ret = _lttng_string_field_statedump(session, field, nesting);
3414 break;
3415 case atype_struct_nestable:
3416 ret = _lttng_struct_field_statedump(session, field, nesting);
3417 break;
3418 case atype_array_nestable:
3419 ret = _lttng_array_field_statedump(session, field, nesting);
3420 break;
3421 case atype_sequence_nestable:
3422 ret = _lttng_sequence_field_statedump(session, field, nesting);
3423 break;
3424 case atype_variant_nestable:
3425 ret = _lttng_variant_field_statedump(session, field, nesting);
3426 break;
3427
3428 default:
3429 WARN_ON_ONCE(1);
3430 return -EINVAL;
3431 }
3432 return ret;
3433}
3434
3435static
3436int _lttng_context_metadata_statedump(struct lttng_session *session,
3437 struct lttng_ctx *ctx)
3438{
3439 int ret = 0;
3440 int i;
3441
3442 if (!ctx)
3443 return 0;
3444 for (i = 0; i < ctx->nr_fields; i++) {
3445 const struct lttng_ctx_field *field = &ctx->fields[i];
3446
3447 ret = _lttng_field_statedump(session, &field->event_field, 2);
3448 if (ret)
3449 return ret;
3450 }
3451 return ret;
3452}
3453
3454static
3455int _lttng_fields_metadata_statedump(struct lttng_session *session,
3456 struct lttng_event *event)
3457{
3458 const struct lttng_event_desc *desc = event->desc;
3459 int ret = 0;
3460 int i;
3461
3462 for (i = 0; i < desc->nr_fields; i++) {
3463 const struct lttng_event_field *field = &desc->fields[i];
3464
3465 ret = _lttng_field_statedump(session, field, 2);
3466 if (ret)
3467 return ret;
3468 }
3469 return ret;
3470}
3471
3472/*
3473 * Must be called with sessions_mutex held.
3474 * The entire event metadata is printed as a single atomic metadata
3475 * transaction.
3476 */
3477static
3478int _lttng_event_metadata_statedump(struct lttng_session *session,
3479 struct lttng_channel *chan,
3480 struct lttng_event *event)
3481{
3482 int ret = 0;
3483
3484 if (event->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3485 return 0;
3486 if (chan->channel_type == METADATA_CHANNEL)
3487 return 0;
3488
3489 lttng_metadata_begin(session);
3490
3491 ret = lttng_metadata_printf(session,
3492 "event {\n"
3493 " name = \"%s\";\n"
3494 " id = %u;\n"
3495 " stream_id = %u;\n",
3496 event->desc->name,
3497 event->id,
3498 event->chan->id);
3499 if (ret)
3500 goto end;
3501
3502 if (event->ctx) {
3503 ret = lttng_metadata_printf(session,
3504 " context := struct {\n");
3505 if (ret)
3506 goto end;
3507 }
3508 ret = _lttng_context_metadata_statedump(session, event->ctx);
3509 if (ret)
3510 goto end;
3511 if (event->ctx) {
3512 ret = lttng_metadata_printf(session,
3513 " };\n");
3514 if (ret)
3515 goto end;
3516 }
3517
3518 ret = lttng_metadata_printf(session,
3519 " fields := struct {\n"
3520 );
3521 if (ret)
3522 goto end;
3523
3524 ret = _lttng_fields_metadata_statedump(session, event);
3525 if (ret)
3526 goto end;
3527
3528 /*
3529 * LTTng space reservation can only reserve multiples of the
3530 * byte size.
3531 */
3532 ret = lttng_metadata_printf(session,
3533 " };\n"
3534 "};\n\n");
3535 if (ret)
3536 goto end;
3537
3538 event->metadata_dumped = 1;
3539end:
3540 lttng_metadata_end(session);
3541 return ret;
3542
3543}
3544
3545/*
3546 * Must be called with sessions_mutex held.
3547 * The entire channel metadata is printed as a single atomic metadata
3548 * transaction.
3549 */
3550static
3551int _lttng_channel_metadata_statedump(struct lttng_session *session,
3552 struct lttng_channel *chan)
3553{
3554 int ret = 0;
3555
3556 if (chan->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3557 return 0;
3558
3559 if (chan->channel_type == METADATA_CHANNEL)
3560 return 0;
3561
3562 lttng_metadata_begin(session);
3563
3564 WARN_ON_ONCE(!chan->header_type);
3565 ret = lttng_metadata_printf(session,
3566 "stream {\n"
3567 " id = %u;\n"
3568 " event.header := %s;\n"
3569 " packet.context := struct packet_context;\n",
3570 chan->id,
3571 chan->header_type == 1 ? "struct event_header_compact" :
3572 "struct event_header_large");
3573 if (ret)
3574 goto end;
3575
3576 if (chan->ctx) {
3577 ret = lttng_metadata_printf(session,
3578 " event.context := struct {\n");
3579 if (ret)
3580 goto end;
3581 }
3582 ret = _lttng_context_metadata_statedump(session, chan->ctx);
3583 if (ret)
3584 goto end;
3585 if (chan->ctx) {
3586 ret = lttng_metadata_printf(session,
3587 " };\n");
3588 if (ret)
3589 goto end;
3590 }
3591
3592 ret = lttng_metadata_printf(session,
3593 "};\n\n");
3594
3595 chan->metadata_dumped = 1;
3596end:
3597 lttng_metadata_end(session);
3598 return ret;
3599}
3600
3601/*
3602 * Must be called with sessions_mutex held.
3603 */
3604static
3605int _lttng_stream_packet_context_declare(struct lttng_session *session)
3606{
3607 return lttng_metadata_printf(session,
3608 "struct packet_context {\n"
3609 " uint64_clock_monotonic_t timestamp_begin;\n"
3610 " uint64_clock_monotonic_t timestamp_end;\n"
3611 " uint64_t content_size;\n"
3612 " uint64_t packet_size;\n"
3613 " uint64_t packet_seq_num;\n"
3614 " unsigned long events_discarded;\n"
3615 " uint32_t cpu_id;\n"
3616 "};\n\n"
3617 );
3618}
3619
3620/*
3621 * Compact header:
3622 * id: range: 0 - 30.
3623 * id 31 is reserved to indicate an extended header.
3624 *
3625 * Large header:
3626 * id: range: 0 - 65534.
3627 * id 65535 is reserved to indicate an extended header.
3628 *
3629 * Must be called with sessions_mutex held.
3630 */
3631static
3632int _lttng_event_header_declare(struct lttng_session *session)
3633{
3634 return lttng_metadata_printf(session,
3635 "struct event_header_compact {\n"
3636 " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
3637 " variant <id> {\n"
3638 " struct {\n"
3639 " uint27_clock_monotonic_t timestamp;\n"
3640 " } compact;\n"
3641 " struct {\n"
3642 " uint32_t id;\n"
3643 " uint64_clock_monotonic_t timestamp;\n"
3644 " } extended;\n"
3645 " } v;\n"
3646 "} align(%u);\n"
3647 "\n"
3648 "struct event_header_large {\n"
3649 " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
3650 " variant <id> {\n"
3651 " struct {\n"
3652 " uint32_clock_monotonic_t timestamp;\n"
3653 " } compact;\n"
3654 " struct {\n"
3655 " uint32_t id;\n"
3656 " uint64_clock_monotonic_t timestamp;\n"
3657 " } extended;\n"
3658 " } v;\n"
3659 "} align(%u);\n\n",
3660 lttng_alignof(uint32_t) * CHAR_BIT,
3661 lttng_alignof(uint16_t) * CHAR_BIT
3662 );
3663}
3664
3665 /*
3666 * Approximation of NTP time of day to clock monotonic correlation,
3667 * taken at start of trace.
3668 * Yes, this is only an approximation. Yes, we can (and will) do better
3669 * in future versions.
3670 * This function may return a negative offset. It may happen if the
3671 * system sets the REALTIME clock to 0 after boot.
3672 *
3673 * Use 64bit timespec on kernels that have it, this makes 32bit arch
3674 * y2038 compliant.
3675 */
3676static
3677int64_t measure_clock_offset(void)
3678{
3679 uint64_t monotonic_avg, monotonic[2], realtime;
3680 uint64_t tcf = trace_clock_freq();
3681 int64_t offset;
3682 unsigned long flags;
3683#ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3684 struct timespec64 rts = { 0, 0 };
3685#else
3686 struct timespec rts = { 0, 0 };
3687#endif
3688
3689 /* Disable interrupts to increase correlation precision. */
3690 local_irq_save(flags);
3691 monotonic[0] = trace_clock_read64();
3692#ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3693 ktime_get_real_ts64(&rts);
3694#else
3695 getnstimeofday(&rts);
3696#endif
3697 monotonic[1] = trace_clock_read64();
3698 local_irq_restore(flags);
3699
3700 monotonic_avg = (monotonic[0] + monotonic[1]) >> 1;
3701 realtime = (uint64_t) rts.tv_sec * tcf;
3702 if (tcf == NSEC_PER_SEC) {
3703 realtime += rts.tv_nsec;
3704 } else {
3705 uint64_t n = rts.tv_nsec * tcf;
3706
3707 do_div(n, NSEC_PER_SEC);
3708 realtime += n;
3709 }
3710 offset = (int64_t) realtime - monotonic_avg;
3711 return offset;
3712}
3713
3714static
3715int print_escaped_ctf_string(struct lttng_session *session, const char *string)
3716{
3717 int ret = 0;
3718 size_t i;
3719 char cur;
3720
3721 i = 0;
3722 cur = string[i];
3723 while (cur != '\0') {
3724 switch (cur) {
3725 case '\n':
3726 ret = lttng_metadata_printf(session, "%s", "\\n");
3727 break;
3728 case '\\':
3729 case '"':
3730 ret = lttng_metadata_printf(session, "%c", '\\');
3731 if (ret)
3732 goto error;
3733 /* We still print the current char */
3734 /* Fallthrough */
3735 default:
3736 ret = lttng_metadata_printf(session, "%c", cur);
3737 break;
3738 }
3739
3740 if (ret)
3741 goto error;
3742
3743 cur = string[++i];
3744 }
3745error:
3746 return ret;
3747}
3748
3749static
3750int print_metadata_escaped_field(struct lttng_session *session, const char *field,
3751 const char *field_value)
3752{
3753 int ret;
3754
3755 ret = lttng_metadata_printf(session, " %s = \"", field);
3756 if (ret)
3757 goto error;
3758
3759 ret = print_escaped_ctf_string(session, field_value);
3760 if (ret)
3761 goto error;
3762
3763 ret = lttng_metadata_printf(session, "\";\n");
3764
3765error:
3766 return ret;
3767}
3768
3769/*
3770 * Output metadata into this session's metadata buffers.
3771 * Must be called with sessions_mutex held.
3772 */
3773static
3774int _lttng_session_metadata_statedump(struct lttng_session *session)
3775{
3776 unsigned char *uuid_c = session->uuid.b;
3777 unsigned char uuid_s[37], clock_uuid_s[BOOT_ID_LEN];
3778 const char *product_uuid;
3779 struct lttng_channel *chan;
3780 struct lttng_event *event;
3781 int ret = 0;
3782
3783 if (!LTTNG_READ_ONCE(session->active))
3784 return 0;
3785
3786 lttng_metadata_begin(session);
3787
3788 if (session->metadata_dumped)
3789 goto skip_session;
3790
3791 snprintf(uuid_s, sizeof(uuid_s),
3792 "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
3793 uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
3794 uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
3795 uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
3796 uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
3797
3798 ret = lttng_metadata_printf(session,
3799 "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
3800 "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
3801 "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
3802 "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
3803 "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
3804 "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
3805 "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
3806 "\n"
3807 "trace {\n"
3808 " major = %u;\n"
3809 " minor = %u;\n"
3810 " uuid = \"%s\";\n"
3811 " byte_order = %s;\n"
3812 " packet.header := struct {\n"
3813 " uint32_t magic;\n"
3814 " uint8_t uuid[16];\n"
3815 " uint32_t stream_id;\n"
3816 " uint64_t stream_instance_id;\n"
3817 " };\n"
3818 "};\n\n",
3819 lttng_alignof(uint8_t) * CHAR_BIT,
3820 lttng_alignof(uint16_t) * CHAR_BIT,
3821 lttng_alignof(uint32_t) * CHAR_BIT,
3822 lttng_alignof(uint64_t) * CHAR_BIT,
3823 sizeof(unsigned long) * CHAR_BIT,
3824 lttng_alignof(unsigned long) * CHAR_BIT,
3825 CTF_SPEC_MAJOR,
3826 CTF_SPEC_MINOR,
3827 uuid_s,
3828#if __BYTE_ORDER == __BIG_ENDIAN
3829 "be"
3830#else
3831 "le"
3832#endif
3833 );
3834 if (ret)
3835 goto end;
3836
3837 ret = lttng_metadata_printf(session,
3838 "env {\n"
3839 " hostname = \"%s\";\n"
3840 " domain = \"kernel\";\n"
3841 " sysname = \"%s\";\n"
3842 " kernel_release = \"%s\";\n"
3843 " kernel_version = \"%s\";\n"
3844 " tracer_name = \"lttng-modules\";\n"
3845 " tracer_major = %d;\n"
3846 " tracer_minor = %d;\n"
3847 " tracer_patchlevel = %d;\n"
3848 " trace_buffering_scheme = \"global\";\n",
3849 current->nsproxy->uts_ns->name.nodename,
3850 utsname()->sysname,
3851 utsname()->release,
3852 utsname()->version,
3853 LTTNG_MODULES_MAJOR_VERSION,
3854 LTTNG_MODULES_MINOR_VERSION,
3855 LTTNG_MODULES_PATCHLEVEL_VERSION
3856 );
3857 if (ret)
3858 goto end;
3859
3860 ret = print_metadata_escaped_field(session, "trace_name", session->name);
3861 if (ret)
3862 goto end;
3863 ret = print_metadata_escaped_field(session, "trace_creation_datetime",
3864 session->creation_time);
3865 if (ret)
3866 goto end;
3867
3868 /* Add the product UUID to the 'env' section */
3869 product_uuid = dmi_get_system_info(DMI_PRODUCT_UUID);
3870 if (product_uuid) {
3871 ret = lttng_metadata_printf(session,
3872 " product_uuid = \"%s\";\n",
3873 product_uuid
3874 );
3875 if (ret)
3876 goto end;
3877 }
3878
3879 /* Close the 'env' section */
3880 ret = lttng_metadata_printf(session, "};\n\n");
3881 if (ret)
3882 goto end;
3883
3884 ret = lttng_metadata_printf(session,
3885 "clock {\n"
3886 " name = \"%s\";\n",
3887 trace_clock_name()
3888 );
3889 if (ret)
3890 goto end;
3891
3892 if (!trace_clock_uuid(clock_uuid_s)) {
3893 ret = lttng_metadata_printf(session,
3894 " uuid = \"%s\";\n",
3895 clock_uuid_s
3896 );
3897 if (ret)
3898 goto end;
3899 }
3900
3901 ret = lttng_metadata_printf(session,
3902 " description = \"%s\";\n"
3903 " freq = %llu; /* Frequency, in Hz */\n"
3904 " /* clock value offset from Epoch is: offset * (1/freq) */\n"
3905 " offset = %lld;\n"
3906 "};\n\n",
3907 trace_clock_description(),
3908 (unsigned long long) trace_clock_freq(),
3909 (long long) measure_clock_offset()
3910 );
3911 if (ret)
3912 goto end;
3913
3914 ret = lttng_metadata_printf(session,
3915 "typealias integer {\n"
3916 " size = 27; align = 1; signed = false;\n"
3917 " map = clock.%s.value;\n"
3918 "} := uint27_clock_monotonic_t;\n"
3919 "\n"
3920 "typealias integer {\n"
3921 " size = 32; align = %u; signed = false;\n"
3922 " map = clock.%s.value;\n"
3923 "} := uint32_clock_monotonic_t;\n"
3924 "\n"
3925 "typealias integer {\n"
3926 " size = 64; align = %u; signed = false;\n"
3927 " map = clock.%s.value;\n"
3928 "} := uint64_clock_monotonic_t;\n\n",
3929 trace_clock_name(),
3930 lttng_alignof(uint32_t) * CHAR_BIT,
3931 trace_clock_name(),
3932 lttng_alignof(uint64_t) * CHAR_BIT,
3933 trace_clock_name()
3934 );
3935 if (ret)
3936 goto end;
3937
3938 ret = _lttng_stream_packet_context_declare(session);
3939 if (ret)
3940 goto end;
3941
3942 ret = _lttng_event_header_declare(session);
3943 if (ret)
3944 goto end;
3945
3946skip_session:
3947 list_for_each_entry(chan, &session->chan, list) {
3948 ret = _lttng_channel_metadata_statedump(session, chan);
3949 if (ret)
3950 goto end;
3951 }
3952
3953 list_for_each_entry(event, &session->events, list) {
3954 ret = _lttng_event_metadata_statedump(session, event->chan, event);
3955 if (ret)
3956 goto end;
3957 }
3958 session->metadata_dumped = 1;
3959end:
3960 lttng_metadata_end(session);
3961 return ret;
3962}
3963
3964/**
3965 * lttng_transport_register - LTT transport registration
3966 * @transport: transport structure
3967 *
3968 * Registers a transport which can be used as output to extract the data out of
3969 * LTTng. The module calling this registration function must ensure that no
3970 * trap-inducing code will be executed by the transport functions. E.g.
3971 * vmalloc_sync_mappings() must be called between a vmalloc and the moment the memory
3972 * is made visible to the transport function. This registration acts as a
3973 * vmalloc_sync_mappings. Therefore, only if the module allocates virtual memory
3974 * after its registration must it synchronize the TLBs.
3975 */
3976void lttng_transport_register(struct lttng_transport *transport)
3977{
3978 /*
3979 * Make sure no page fault can be triggered by the module about to be
3980 * registered. We deal with this here so we don't have to call
3981 * vmalloc_sync_mappings() in each module's init.
3982 */
3983 wrapper_vmalloc_sync_mappings();
3984
3985 mutex_lock(&sessions_mutex);
3986 list_add_tail(&transport->node, &lttng_transport_list);
3987 mutex_unlock(&sessions_mutex);
3988}
3989EXPORT_SYMBOL_GPL(lttng_transport_register);
3990
3991/**
3992 * lttng_transport_unregister - LTT transport unregistration
3993 * @transport: transport structure
3994 */
3995void lttng_transport_unregister(struct lttng_transport *transport)
3996{
3997 mutex_lock(&sessions_mutex);
3998 list_del(&transport->node);
3999 mutex_unlock(&sessions_mutex);
4000}
4001EXPORT_SYMBOL_GPL(lttng_transport_unregister);
4002
4003void lttng_counter_transport_register(struct lttng_counter_transport *transport)
4004{
4005 /*
4006 * Make sure no page fault can be triggered by the module about to be
4007 * registered. We deal with this here so we don't have to call
4008 * vmalloc_sync_mappings() in each module's init.
4009 */
4010 wrapper_vmalloc_sync_mappings();
4011
4012 mutex_lock(&sessions_mutex);
4013 list_add_tail(&transport->node, &lttng_counter_transport_list);
4014 mutex_unlock(&sessions_mutex);
4015}
4016EXPORT_SYMBOL_GPL(lttng_counter_transport_register);
4017
4018void lttng_counter_transport_unregister(struct lttng_counter_transport *transport)
4019{
4020 mutex_lock(&sessions_mutex);
4021 list_del(&transport->node);
4022 mutex_unlock(&sessions_mutex);
4023}
4024EXPORT_SYMBOL_GPL(lttng_counter_transport_unregister);
4025
4026#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
4027
4028enum cpuhp_state lttng_hp_prepare;
4029enum cpuhp_state lttng_hp_online;
4030
4031static int lttng_hotplug_prepare(unsigned int cpu, struct hlist_node *node)
4032{
4033 struct lttng_cpuhp_node *lttng_node;
4034
4035 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4036 switch (lttng_node->component) {
4037 case LTTNG_RING_BUFFER_FRONTEND:
4038 return 0;
4039 case LTTNG_RING_BUFFER_BACKEND:
4040 return lttng_cpuhp_rb_backend_prepare(cpu, lttng_node);
4041 case LTTNG_RING_BUFFER_ITER:
4042 return 0;
4043 case LTTNG_CONTEXT_PERF_COUNTERS:
4044 return 0;
4045 default:
4046 return -EINVAL;
4047 }
4048}
4049
4050static int lttng_hotplug_dead(unsigned int cpu, struct hlist_node *node)
4051{
4052 struct lttng_cpuhp_node *lttng_node;
4053
4054 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4055 switch (lttng_node->component) {
4056 case LTTNG_RING_BUFFER_FRONTEND:
4057 return lttng_cpuhp_rb_frontend_dead(cpu, lttng_node);
4058 case LTTNG_RING_BUFFER_BACKEND:
4059 return 0;
4060 case LTTNG_RING_BUFFER_ITER:
4061 return 0;
4062 case LTTNG_CONTEXT_PERF_COUNTERS:
4063 return lttng_cpuhp_perf_counter_dead(cpu, lttng_node);
4064 default:
4065 return -EINVAL;
4066 }
4067}
4068
4069static int lttng_hotplug_online(unsigned int cpu, struct hlist_node *node)
4070{
4071 struct lttng_cpuhp_node *lttng_node;
4072
4073 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4074 switch (lttng_node->component) {
4075 case LTTNG_RING_BUFFER_FRONTEND:
4076 return lttng_cpuhp_rb_frontend_online(cpu, lttng_node);
4077 case LTTNG_RING_BUFFER_BACKEND:
4078 return 0;
4079 case LTTNG_RING_BUFFER_ITER:
4080 return lttng_cpuhp_rb_iter_online(cpu, lttng_node);
4081 case LTTNG_CONTEXT_PERF_COUNTERS:
4082 return lttng_cpuhp_perf_counter_online(cpu, lttng_node);
4083 default:
4084 return -EINVAL;
4085 }
4086}
4087
4088static int lttng_hotplug_offline(unsigned int cpu, struct hlist_node *node)
4089{
4090 struct lttng_cpuhp_node *lttng_node;
4091
4092 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4093 switch (lttng_node->component) {
4094 case LTTNG_RING_BUFFER_FRONTEND:
4095 return lttng_cpuhp_rb_frontend_offline(cpu, lttng_node);
4096 case LTTNG_RING_BUFFER_BACKEND:
4097 return 0;
4098 case LTTNG_RING_BUFFER_ITER:
4099 return 0;
4100 case LTTNG_CONTEXT_PERF_COUNTERS:
4101 return 0;
4102 default:
4103 return -EINVAL;
4104 }
4105}
4106
4107static int __init lttng_init_cpu_hotplug(void)
4108{
4109 int ret;
4110
4111 ret = cpuhp_setup_state_multi(CPUHP_BP_PREPARE_DYN, "lttng:prepare",
4112 lttng_hotplug_prepare,
4113 lttng_hotplug_dead);
4114 if (ret < 0) {
4115 return ret;
4116 }
4117 lttng_hp_prepare = ret;
4118 lttng_rb_set_hp_prepare(ret);
4119
4120 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "lttng:online",
4121 lttng_hotplug_online,
4122 lttng_hotplug_offline);
4123 if (ret < 0) {
4124 cpuhp_remove_multi_state(lttng_hp_prepare);
4125 lttng_hp_prepare = 0;
4126 return ret;
4127 }
4128 lttng_hp_online = ret;
4129 lttng_rb_set_hp_online(ret);
4130
4131 return 0;
4132}
4133
4134static void __exit lttng_exit_cpu_hotplug(void)
4135{
4136 lttng_rb_set_hp_online(0);
4137 cpuhp_remove_multi_state(lttng_hp_online);
4138 lttng_rb_set_hp_prepare(0);
4139 cpuhp_remove_multi_state(lttng_hp_prepare);
4140}
4141
4142#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
4143static int lttng_init_cpu_hotplug(void)
4144{
4145 return 0;
4146}
4147static void lttng_exit_cpu_hotplug(void)
4148{
4149}
4150#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
4151
4152
4153static int __init lttng_events_init(void)
4154{
4155 int ret;
4156
4157 ret = wrapper_lttng_fixup_sig(THIS_MODULE);
4158 if (ret)
4159 return ret;
4160 ret = wrapper_get_pfnblock_flags_mask_init();
4161 if (ret)
4162 return ret;
4163 ret = wrapper_get_pageblock_flags_mask_init();
4164 if (ret)
4165 return ret;
4166 ret = lttng_probes_init();
4167 if (ret)
4168 return ret;
4169 ret = lttng_context_init();
4170 if (ret)
4171 return ret;
4172 ret = lttng_tracepoint_init();
4173 if (ret)
4174 goto error_tp;
4175 event_cache = KMEM_CACHE(lttng_event, 0);
4176 if (!event_cache) {
4177 ret = -ENOMEM;
4178 goto error_kmem_event;
4179 }
4180 event_notifier_cache = KMEM_CACHE(lttng_event_notifier, 0);
4181 if (!event_notifier_cache) {
4182 ret = -ENOMEM;
4183 goto error_kmem_event_notifier;
4184 }
4185 ret = lttng_abi_init();
4186 if (ret)
4187 goto error_abi;
4188 ret = lttng_logger_init();
4189 if (ret)
4190 goto error_logger;
4191 ret = lttng_init_cpu_hotplug();
4192 if (ret)
4193 goto error_hotplug;
4194 printk(KERN_NOTICE "LTTng: Loaded modules v%s.%s.%s%s (%s)%s%s\n",
4195 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4196 __stringify(LTTNG_MODULES_MINOR_VERSION),
4197 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4198 LTTNG_MODULES_EXTRAVERSION,
4199 LTTNG_VERSION_NAME,
4200#ifdef LTTNG_EXTRA_VERSION_GIT
4201 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4202#else
4203 "",
4204#endif
4205#ifdef LTTNG_EXTRA_VERSION_NAME
4206 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4207#else
4208 "");
4209#endif
4210 return 0;
4211
4212error_hotplug:
4213 lttng_logger_exit();
4214error_logger:
4215 lttng_abi_exit();
4216error_abi:
4217 kmem_cache_destroy(event_notifier_cache);
4218error_kmem_event_notifier:
4219 kmem_cache_destroy(event_cache);
4220error_kmem_event:
4221 lttng_tracepoint_exit();
4222error_tp:
4223 lttng_context_exit();
4224 printk(KERN_NOTICE "LTTng: Failed to load modules v%s.%s.%s%s (%s)%s%s\n",
4225 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4226 __stringify(LTTNG_MODULES_MINOR_VERSION),
4227 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4228 LTTNG_MODULES_EXTRAVERSION,
4229 LTTNG_VERSION_NAME,
4230#ifdef LTTNG_EXTRA_VERSION_GIT
4231 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4232#else
4233 "",
4234#endif
4235#ifdef LTTNG_EXTRA_VERSION_NAME
4236 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4237#else
4238 "");
4239#endif
4240 return ret;
4241}
4242
4243module_init(lttng_events_init);
4244
4245static void __exit lttng_events_exit(void)
4246{
4247 struct lttng_session *session, *tmpsession;
4248
4249 lttng_exit_cpu_hotplug();
4250 lttng_logger_exit();
4251 lttng_abi_exit();
4252 list_for_each_entry_safe(session, tmpsession, &sessions, list)
4253 lttng_session_destroy(session);
4254 kmem_cache_destroy(event_cache);
4255 kmem_cache_destroy(event_notifier_cache);
4256 lttng_tracepoint_exit();
4257 lttng_context_exit();
4258 printk(KERN_NOTICE "LTTng: Unloaded modules v%s.%s.%s%s (%s)%s%s\n",
4259 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4260 __stringify(LTTNG_MODULES_MINOR_VERSION),
4261 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4262 LTTNG_MODULES_EXTRAVERSION,
4263 LTTNG_VERSION_NAME,
4264#ifdef LTTNG_EXTRA_VERSION_GIT
4265 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4266#else
4267 "",
4268#endif
4269#ifdef LTTNG_EXTRA_VERSION_NAME
4270 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4271#else
4272 "");
4273#endif
4274}
4275
4276module_exit(lttng_events_exit);
4277
4278#include <generated/patches.h>
4279#ifdef LTTNG_EXTRA_VERSION_GIT
4280MODULE_INFO(extra_version_git, LTTNG_EXTRA_VERSION_GIT);
4281#endif
4282#ifdef LTTNG_EXTRA_VERSION_NAME
4283MODULE_INFO(extra_version_name, LTTNG_EXTRA_VERSION_NAME);
4284#endif
4285MODULE_LICENSE("GPL and additional rights");
4286MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
4287MODULE_DESCRIPTION("LTTng tracer");
4288MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
4289 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
4290 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
4291 LTTNG_MODULES_EXTRAVERSION);
This page took 0.038202 seconds and 4 git commands to generate.