Remove dependency on kallsyms for splice_to_pipe (kernel 4.2+)
[lttng-modules.git] / lttng-events.c
1 /* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1)
2 *
3 * lttng-events.c
4 *
5 * Holds LTTng per-session event registry.
6 *
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 /*
11 * This page_alloc.h wrapper needs to be included before gfpflags.h because it
12 * overrides a function with a define.
13 */
14 #include "wrapper/page_alloc.h"
15
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/jiffies.h>
21 #include <linux/utsname.h>
22 #include <linux/err.h>
23 #include <linux/seq_file.h>
24 #include <linux/file.h>
25 #include <linux/anon_inodes.h>
26 #include <wrapper/file.h>
27 #include <linux/jhash.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30
31 #include <wrapper/uuid.h>
32 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_all() */
33 #include <wrapper/random.h>
34 #include <wrapper/tracepoint.h>
35 #include <wrapper/list.h>
36 #include <wrapper/types.h>
37 #include <lttng-kernel-version.h>
38 #include <lttng-events.h>
39 #include <lttng-tracer.h>
40 #include <lttng-abi-old.h>
41 #include <lttng-endian.h>
42 #include <lttng-string-utils.h>
43 #include <wrapper/vzalloc.h>
44 #include <wrapper/ringbuffer/backend.h>
45 #include <wrapper/ringbuffer/frontend.h>
46 #include <wrapper/time.h>
47
48 #define METADATA_CACHE_DEFAULT_SIZE 4096
49
50 static LIST_HEAD(sessions);
51 static LIST_HEAD(lttng_transport_list);
52 /*
53 * Protect the sessions and metadata caches.
54 */
55 static DEFINE_MUTEX(sessions_mutex);
56 static struct kmem_cache *event_cache;
57
58 static void lttng_session_lazy_sync_enablers(struct lttng_session *session);
59 static void lttng_session_sync_enablers(struct lttng_session *session);
60 static void lttng_enabler_destroy(struct lttng_enabler *enabler);
61
62 static void _lttng_event_destroy(struct lttng_event *event);
63 static void _lttng_channel_destroy(struct lttng_channel *chan);
64 static int _lttng_event_unregister(struct lttng_event *event);
65 static
66 int _lttng_event_metadata_statedump(struct lttng_session *session,
67 struct lttng_channel *chan,
68 struct lttng_event *event);
69 static
70 int _lttng_session_metadata_statedump(struct lttng_session *session);
71 static
72 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream);
73 static
74 int _lttng_field_statedump(struct lttng_session *session,
75 const struct lttng_event_field *field,
76 size_t nesting);
77
78 void synchronize_trace(void)
79 {
80 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0))
81 synchronize_rcu();
82 #else
83 synchronize_sched();
84 #endif
85
86 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
87 #ifdef CONFIG_PREEMPT_RT_FULL
88 synchronize_rcu();
89 #endif
90 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
91 #ifdef CONFIG_PREEMPT_RT
92 synchronize_rcu();
93 #endif
94 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
95 }
96
97 void lttng_lock_sessions(void)
98 {
99 mutex_lock(&sessions_mutex);
100 }
101
102 void lttng_unlock_sessions(void)
103 {
104 mutex_unlock(&sessions_mutex);
105 }
106
107 /*
108 * Called with sessions lock held.
109 */
110 int lttng_session_active(void)
111 {
112 struct lttng_session *iter;
113
114 list_for_each_entry(iter, &sessions, list) {
115 if (iter->active)
116 return 1;
117 }
118 return 0;
119 }
120
121 struct lttng_session *lttng_session_create(void)
122 {
123 struct lttng_session *session;
124 struct lttng_metadata_cache *metadata_cache;
125 int i;
126
127 mutex_lock(&sessions_mutex);
128 session = lttng_kvzalloc(sizeof(struct lttng_session), GFP_KERNEL);
129 if (!session)
130 goto err;
131 INIT_LIST_HEAD(&session->chan);
132 INIT_LIST_HEAD(&session->events);
133 uuid_le_gen(&session->uuid);
134
135 metadata_cache = kzalloc(sizeof(struct lttng_metadata_cache),
136 GFP_KERNEL);
137 if (!metadata_cache)
138 goto err_free_session;
139 metadata_cache->data = lttng_vzalloc(METADATA_CACHE_DEFAULT_SIZE);
140 if (!metadata_cache->data)
141 goto err_free_cache;
142 metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
143 kref_init(&metadata_cache->refcount);
144 mutex_init(&metadata_cache->lock);
145 session->metadata_cache = metadata_cache;
146 INIT_LIST_HEAD(&metadata_cache->metadata_stream);
147 memcpy(&metadata_cache->uuid, &session->uuid,
148 sizeof(metadata_cache->uuid));
149 INIT_LIST_HEAD(&session->enablers_head);
150 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
151 INIT_HLIST_HEAD(&session->events_ht.table[i]);
152 list_add(&session->list, &sessions);
153 mutex_unlock(&sessions_mutex);
154 return session;
155
156 err_free_cache:
157 kfree(metadata_cache);
158 err_free_session:
159 lttng_kvfree(session);
160 err:
161 mutex_unlock(&sessions_mutex);
162 return NULL;
163 }
164
165 void metadata_cache_destroy(struct kref *kref)
166 {
167 struct lttng_metadata_cache *cache =
168 container_of(kref, struct lttng_metadata_cache, refcount);
169 vfree(cache->data);
170 kfree(cache);
171 }
172
173 void lttng_session_destroy(struct lttng_session *session)
174 {
175 struct lttng_channel *chan, *tmpchan;
176 struct lttng_event *event, *tmpevent;
177 struct lttng_metadata_stream *metadata_stream;
178 struct lttng_enabler *enabler, *tmpenabler;
179 int ret;
180
181 mutex_lock(&sessions_mutex);
182 WRITE_ONCE(session->active, 0);
183 list_for_each_entry(chan, &session->chan, list) {
184 ret = lttng_syscalls_unregister(chan);
185 WARN_ON(ret);
186 }
187 list_for_each_entry(event, &session->events, list) {
188 ret = _lttng_event_unregister(event);
189 WARN_ON(ret);
190 }
191 synchronize_trace(); /* Wait for in-flight events to complete */
192 list_for_each_entry_safe(enabler, tmpenabler,
193 &session->enablers_head, node)
194 lttng_enabler_destroy(enabler);
195 list_for_each_entry_safe(event, tmpevent, &session->events, list)
196 _lttng_event_destroy(event);
197 list_for_each_entry_safe(chan, tmpchan, &session->chan, list) {
198 BUG_ON(chan->channel_type == METADATA_CHANNEL);
199 _lttng_channel_destroy(chan);
200 }
201 list_for_each_entry(metadata_stream, &session->metadata_cache->metadata_stream, list)
202 _lttng_metadata_channel_hangup(metadata_stream);
203 if (session->pid_tracker)
204 lttng_pid_tracker_destroy(session->pid_tracker);
205 kref_put(&session->metadata_cache->refcount, metadata_cache_destroy);
206 list_del(&session->list);
207 mutex_unlock(&sessions_mutex);
208 lttng_kvfree(session);
209 }
210
211 int lttng_session_statedump(struct lttng_session *session)
212 {
213 int ret;
214
215 mutex_lock(&sessions_mutex);
216 ret = lttng_statedump_start(session);
217 mutex_unlock(&sessions_mutex);
218 return ret;
219 }
220
221 int lttng_session_enable(struct lttng_session *session)
222 {
223 int ret = 0;
224 struct lttng_channel *chan;
225
226 mutex_lock(&sessions_mutex);
227 if (session->active) {
228 ret = -EBUSY;
229 goto end;
230 }
231
232 /* Set transient enabler state to "enabled" */
233 session->tstate = 1;
234
235 /*
236 * Snapshot the number of events per channel to know the type of header
237 * we need to use.
238 */
239 list_for_each_entry(chan, &session->chan, list) {
240 if (chan->header_type)
241 continue; /* don't change it if session stop/restart */
242 if (chan->free_event_id < 31)
243 chan->header_type = 1; /* compact */
244 else
245 chan->header_type = 2; /* large */
246 }
247
248 /* We need to sync enablers with session before activation. */
249 lttng_session_sync_enablers(session);
250
251 /* Clear each stream's quiescent state. */
252 list_for_each_entry(chan, &session->chan, list) {
253 if (chan->channel_type != METADATA_CHANNEL)
254 lib_ring_buffer_clear_quiescent_channel(chan->chan);
255 }
256
257 WRITE_ONCE(session->active, 1);
258 WRITE_ONCE(session->been_active, 1);
259 ret = _lttng_session_metadata_statedump(session);
260 if (ret) {
261 WRITE_ONCE(session->active, 0);
262 goto end;
263 }
264 ret = lttng_statedump_start(session);
265 if (ret)
266 WRITE_ONCE(session->active, 0);
267 end:
268 mutex_unlock(&sessions_mutex);
269 return ret;
270 }
271
272 int lttng_session_disable(struct lttng_session *session)
273 {
274 int ret = 0;
275 struct lttng_channel *chan;
276
277 mutex_lock(&sessions_mutex);
278 if (!session->active) {
279 ret = -EBUSY;
280 goto end;
281 }
282 WRITE_ONCE(session->active, 0);
283
284 /* Set transient enabler state to "disabled" */
285 session->tstate = 0;
286 lttng_session_sync_enablers(session);
287
288 /* Set each stream's quiescent state. */
289 list_for_each_entry(chan, &session->chan, list) {
290 if (chan->channel_type != METADATA_CHANNEL)
291 lib_ring_buffer_set_quiescent_channel(chan->chan);
292 }
293 end:
294 mutex_unlock(&sessions_mutex);
295 return ret;
296 }
297
298 int lttng_session_metadata_regenerate(struct lttng_session *session)
299 {
300 int ret = 0;
301 struct lttng_channel *chan;
302 struct lttng_event *event;
303 struct lttng_metadata_cache *cache = session->metadata_cache;
304 struct lttng_metadata_stream *stream;
305
306 mutex_lock(&sessions_mutex);
307 if (!session->active) {
308 ret = -EBUSY;
309 goto end;
310 }
311
312 mutex_lock(&cache->lock);
313 memset(cache->data, 0, cache->cache_alloc);
314 cache->metadata_written = 0;
315 cache->version++;
316 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list) {
317 stream->metadata_out = 0;
318 stream->metadata_in = 0;
319 }
320 mutex_unlock(&cache->lock);
321
322 session->metadata_dumped = 0;
323 list_for_each_entry(chan, &session->chan, list) {
324 chan->metadata_dumped = 0;
325 }
326
327 list_for_each_entry(event, &session->events, list) {
328 event->metadata_dumped = 0;
329 }
330
331 ret = _lttng_session_metadata_statedump(session);
332
333 end:
334 mutex_unlock(&sessions_mutex);
335 return ret;
336 }
337
338 int lttng_channel_enable(struct lttng_channel *channel)
339 {
340 int ret = 0;
341
342 mutex_lock(&sessions_mutex);
343 if (channel->channel_type == METADATA_CHANNEL) {
344 ret = -EPERM;
345 goto end;
346 }
347 if (channel->enabled) {
348 ret = -EEXIST;
349 goto end;
350 }
351 /* Set transient enabler state to "enabled" */
352 channel->tstate = 1;
353 lttng_session_sync_enablers(channel->session);
354 /* Set atomically the state to "enabled" */
355 WRITE_ONCE(channel->enabled, 1);
356 end:
357 mutex_unlock(&sessions_mutex);
358 return ret;
359 }
360
361 int lttng_channel_disable(struct lttng_channel *channel)
362 {
363 int ret = 0;
364
365 mutex_lock(&sessions_mutex);
366 if (channel->channel_type == METADATA_CHANNEL) {
367 ret = -EPERM;
368 goto end;
369 }
370 if (!channel->enabled) {
371 ret = -EEXIST;
372 goto end;
373 }
374 /* Set atomically the state to "disabled" */
375 WRITE_ONCE(channel->enabled, 0);
376 /* Set transient enabler state to "enabled" */
377 channel->tstate = 0;
378 lttng_session_sync_enablers(channel->session);
379 end:
380 mutex_unlock(&sessions_mutex);
381 return ret;
382 }
383
384 int lttng_event_enable(struct lttng_event *event)
385 {
386 int ret = 0;
387
388 mutex_lock(&sessions_mutex);
389 if (event->chan->channel_type == METADATA_CHANNEL) {
390 ret = -EPERM;
391 goto end;
392 }
393 if (event->enabled) {
394 ret = -EEXIST;
395 goto end;
396 }
397 switch (event->instrumentation) {
398 case LTTNG_KERNEL_TRACEPOINT:
399 case LTTNG_KERNEL_SYSCALL:
400 ret = -EINVAL;
401 break;
402 case LTTNG_KERNEL_KPROBE:
403 case LTTNG_KERNEL_FUNCTION:
404 case LTTNG_KERNEL_UPROBE:
405 case LTTNG_KERNEL_NOOP:
406 WRITE_ONCE(event->enabled, 1);
407 break;
408 case LTTNG_KERNEL_KRETPROBE:
409 ret = lttng_kretprobes_event_enable_state(event, 1);
410 break;
411 default:
412 WARN_ON_ONCE(1);
413 ret = -EINVAL;
414 }
415 end:
416 mutex_unlock(&sessions_mutex);
417 return ret;
418 }
419
420 int lttng_event_disable(struct lttng_event *event)
421 {
422 int ret = 0;
423
424 mutex_lock(&sessions_mutex);
425 if (event->chan->channel_type == METADATA_CHANNEL) {
426 ret = -EPERM;
427 goto end;
428 }
429 if (!event->enabled) {
430 ret = -EEXIST;
431 goto end;
432 }
433 switch (event->instrumentation) {
434 case LTTNG_KERNEL_TRACEPOINT:
435 case LTTNG_KERNEL_SYSCALL:
436 ret = -EINVAL;
437 break;
438 case LTTNG_KERNEL_KPROBE:
439 case LTTNG_KERNEL_FUNCTION:
440 case LTTNG_KERNEL_UPROBE:
441 case LTTNG_KERNEL_NOOP:
442 WRITE_ONCE(event->enabled, 0);
443 break;
444 case LTTNG_KERNEL_KRETPROBE:
445 ret = lttng_kretprobes_event_enable_state(event, 0);
446 break;
447 default:
448 WARN_ON_ONCE(1);
449 ret = -EINVAL;
450 }
451 end:
452 mutex_unlock(&sessions_mutex);
453 return ret;
454 }
455
456 static struct lttng_transport *lttng_transport_find(const char *name)
457 {
458 struct lttng_transport *transport;
459
460 list_for_each_entry(transport, &lttng_transport_list, node) {
461 if (!strcmp(transport->name, name))
462 return transport;
463 }
464 return NULL;
465 }
466
467 struct lttng_channel *lttng_channel_create(struct lttng_session *session,
468 const char *transport_name,
469 void *buf_addr,
470 size_t subbuf_size, size_t num_subbuf,
471 unsigned int switch_timer_interval,
472 unsigned int read_timer_interval,
473 enum channel_type channel_type)
474 {
475 struct lttng_channel *chan;
476 struct lttng_transport *transport = NULL;
477
478 mutex_lock(&sessions_mutex);
479 if (session->been_active && channel_type != METADATA_CHANNEL)
480 goto active; /* Refuse to add channel to active session */
481 transport = lttng_transport_find(transport_name);
482 if (!transport) {
483 printk(KERN_WARNING "LTTng transport %s not found\n",
484 transport_name);
485 goto notransport;
486 }
487 if (!try_module_get(transport->owner)) {
488 printk(KERN_WARNING "LTT : Can't lock transport module.\n");
489 goto notransport;
490 }
491 chan = kzalloc(sizeof(struct lttng_channel), GFP_KERNEL);
492 if (!chan)
493 goto nomem;
494 chan->session = session;
495 chan->id = session->free_chan_id++;
496 chan->ops = &transport->ops;
497 /*
498 * Note: the channel creation op already writes into the packet
499 * headers. Therefore the "chan" information used as input
500 * should be already accessible.
501 */
502 chan->chan = transport->ops.channel_create(transport_name,
503 chan, buf_addr, subbuf_size, num_subbuf,
504 switch_timer_interval, read_timer_interval);
505 if (!chan->chan)
506 goto create_error;
507 chan->tstate = 1;
508 chan->enabled = 1;
509 chan->transport = transport;
510 chan->channel_type = channel_type;
511 list_add(&chan->list, &session->chan);
512 mutex_unlock(&sessions_mutex);
513 return chan;
514
515 create_error:
516 kfree(chan);
517 nomem:
518 if (transport)
519 module_put(transport->owner);
520 notransport:
521 active:
522 mutex_unlock(&sessions_mutex);
523 return NULL;
524 }
525
526 /*
527 * Only used internally at session destruction for per-cpu channels, and
528 * when metadata channel is released.
529 * Needs to be called with sessions mutex held.
530 */
531 static
532 void _lttng_channel_destroy(struct lttng_channel *chan)
533 {
534 chan->ops->channel_destroy(chan->chan);
535 module_put(chan->transport->owner);
536 list_del(&chan->list);
537 lttng_destroy_context(chan->ctx);
538 kfree(chan);
539 }
540
541 void lttng_metadata_channel_destroy(struct lttng_channel *chan)
542 {
543 BUG_ON(chan->channel_type != METADATA_CHANNEL);
544
545 /* Protect the metadata cache with the sessions_mutex. */
546 mutex_lock(&sessions_mutex);
547 _lttng_channel_destroy(chan);
548 mutex_unlock(&sessions_mutex);
549 }
550 EXPORT_SYMBOL_GPL(lttng_metadata_channel_destroy);
551
552 static
553 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream)
554 {
555 stream->finalized = 1;
556 wake_up_interruptible(&stream->read_wait);
557 }
558
559 /*
560 * Supports event creation while tracing session is active.
561 * Needs to be called with sessions mutex held.
562 */
563 struct lttng_event *_lttng_event_create(struct lttng_channel *chan,
564 struct lttng_kernel_event *event_param,
565 void *filter,
566 const struct lttng_event_desc *event_desc,
567 enum lttng_kernel_instrumentation itype)
568 {
569 struct lttng_session *session = chan->session;
570 struct lttng_event *event;
571 const char *event_name;
572 struct hlist_head *head;
573 size_t name_len;
574 uint32_t hash;
575 int ret;
576
577 if (chan->free_event_id == -1U) {
578 ret = -EMFILE;
579 goto full;
580 }
581
582 switch (itype) {
583 case LTTNG_KERNEL_TRACEPOINT:
584 event_name = event_desc->name;
585 break;
586 case LTTNG_KERNEL_KPROBE:
587 case LTTNG_KERNEL_UPROBE:
588 case LTTNG_KERNEL_KRETPROBE:
589 case LTTNG_KERNEL_FUNCTION:
590 case LTTNG_KERNEL_NOOP:
591 case LTTNG_KERNEL_SYSCALL:
592 event_name = event_param->name;
593 break;
594 default:
595 WARN_ON_ONCE(1);
596 ret = -EINVAL;
597 goto type_error;
598 }
599 name_len = strlen(event_name);
600 hash = jhash(event_name, name_len, 0);
601 head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
602 lttng_hlist_for_each_entry(event, head, hlist) {
603 WARN_ON_ONCE(!event->desc);
604 if (!strncmp(event->desc->name, event_name,
605 LTTNG_KERNEL_SYM_NAME_LEN - 1)
606 && chan == event->chan) {
607 ret = -EEXIST;
608 goto exist;
609 }
610 }
611
612 event = kmem_cache_zalloc(event_cache, GFP_KERNEL);
613 if (!event) {
614 ret = -ENOMEM;
615 goto cache_error;
616 }
617 event->chan = chan;
618 event->filter = filter;
619 event->id = chan->free_event_id++;
620 event->instrumentation = itype;
621 event->evtype = LTTNG_TYPE_EVENT;
622 INIT_LIST_HEAD(&event->bytecode_runtime_head);
623 INIT_LIST_HEAD(&event->enablers_ref_head);
624
625 switch (itype) {
626 case LTTNG_KERNEL_TRACEPOINT:
627 /* Event will be enabled by enabler sync. */
628 event->enabled = 0;
629 event->registered = 0;
630 event->desc = lttng_event_get(event_name);
631 if (!event->desc) {
632 ret = -ENOENT;
633 goto register_error;
634 }
635 /* Populate lttng_event structure before event registration. */
636 smp_wmb();
637 break;
638 case LTTNG_KERNEL_KPROBE:
639 /*
640 * Needs to be explicitly enabled after creation, since
641 * we may want to apply filters.
642 */
643 event->enabled = 0;
644 event->registered = 1;
645 /*
646 * Populate lttng_event structure before event
647 * registration.
648 */
649 smp_wmb();
650 ret = lttng_kprobes_register(event_name,
651 event_param->u.kprobe.symbol_name,
652 event_param->u.kprobe.offset,
653 event_param->u.kprobe.addr,
654 event);
655 if (ret) {
656 ret = -EINVAL;
657 goto register_error;
658 }
659 ret = try_module_get(event->desc->owner);
660 WARN_ON_ONCE(!ret);
661 break;
662 case LTTNG_KERNEL_KRETPROBE:
663 {
664 struct lttng_event *event_return;
665
666 /* kretprobe defines 2 events */
667 /*
668 * Needs to be explicitly enabled after creation, since
669 * we may want to apply filters.
670 */
671 event->enabled = 0;
672 event->registered = 1;
673 event_return =
674 kmem_cache_zalloc(event_cache, GFP_KERNEL);
675 if (!event_return) {
676 ret = -ENOMEM;
677 goto register_error;
678 }
679 event_return->chan = chan;
680 event_return->filter = filter;
681 event_return->id = chan->free_event_id++;
682 event_return->enabled = 0;
683 event_return->registered = 1;
684 event_return->instrumentation = itype;
685 /*
686 * Populate lttng_event structure before kretprobe registration.
687 */
688 smp_wmb();
689 ret = lttng_kretprobes_register(event_name,
690 event_param->u.kretprobe.symbol_name,
691 event_param->u.kretprobe.offset,
692 event_param->u.kretprobe.addr,
693 event, event_return);
694 if (ret) {
695 kmem_cache_free(event_cache, event_return);
696 ret = -EINVAL;
697 goto register_error;
698 }
699 /* Take 2 refs on the module: one per event. */
700 ret = try_module_get(event->desc->owner);
701 WARN_ON_ONCE(!ret);
702 ret = try_module_get(event->desc->owner);
703 WARN_ON_ONCE(!ret);
704 ret = _lttng_event_metadata_statedump(chan->session, chan,
705 event_return);
706 WARN_ON_ONCE(ret > 0);
707 if (ret) {
708 kmem_cache_free(event_cache, event_return);
709 module_put(event->desc->owner);
710 module_put(event->desc->owner);
711 goto statedump_error;
712 }
713 list_add(&event_return->list, &chan->session->events);
714 break;
715 }
716 case LTTNG_KERNEL_FUNCTION:
717 /*
718 * Needs to be explicitly enabled after creation, since
719 * we may want to apply filters.
720 */
721 event->enabled = 0;
722 event->registered = 1;
723 /*
724 * Populate lttng_event structure before event
725 * registration.
726 */
727 smp_wmb();
728 ret = lttng_ftrace_register(event_name,
729 event_param->u.ftrace.symbol_name,
730 event);
731 if (ret) {
732 goto register_error;
733 }
734 ret = try_module_get(event->desc->owner);
735 WARN_ON_ONCE(!ret);
736 break;
737 case LTTNG_KERNEL_NOOP:
738 case LTTNG_KERNEL_SYSCALL:
739 /*
740 * Needs to be explicitly enabled after creation, since
741 * we may want to apply filters.
742 */
743 event->enabled = 0;
744 event->registered = 0;
745 event->desc = event_desc;
746 if (!event->desc) {
747 ret = -EINVAL;
748 goto register_error;
749 }
750 break;
751 case LTTNG_KERNEL_UPROBE:
752 /*
753 * Needs to be explicitly enabled after creation, since
754 * we may want to apply filters.
755 */
756 event->enabled = 0;
757 event->registered = 1;
758
759 /*
760 * Populate lttng_event structure before event
761 * registration.
762 */
763 smp_wmb();
764
765 ret = lttng_uprobes_register(event_param->name,
766 event_param->u.uprobe.fd,
767 event);
768 if (ret)
769 goto register_error;
770 ret = try_module_get(event->desc->owner);
771 WARN_ON_ONCE(!ret);
772 break;
773 default:
774 WARN_ON_ONCE(1);
775 ret = -EINVAL;
776 goto register_error;
777 }
778 ret = _lttng_event_metadata_statedump(chan->session, chan, event);
779 WARN_ON_ONCE(ret > 0);
780 if (ret) {
781 goto statedump_error;
782 }
783 hlist_add_head(&event->hlist, head);
784 list_add(&event->list, &chan->session->events);
785 return event;
786
787 statedump_error:
788 /* If a statedump error occurs, events will not be readable. */
789 register_error:
790 kmem_cache_free(event_cache, event);
791 cache_error:
792 exist:
793 type_error:
794 full:
795 return ERR_PTR(ret);
796 }
797
798 struct lttng_event *lttng_event_create(struct lttng_channel *chan,
799 struct lttng_kernel_event *event_param,
800 void *filter,
801 const struct lttng_event_desc *event_desc,
802 enum lttng_kernel_instrumentation itype)
803 {
804 struct lttng_event *event;
805
806 mutex_lock(&sessions_mutex);
807 event = _lttng_event_create(chan, event_param, filter, event_desc,
808 itype);
809 mutex_unlock(&sessions_mutex);
810 return event;
811 }
812
813 /* Only used for tracepoints for now. */
814 static
815 void register_event(struct lttng_event *event)
816 {
817 const struct lttng_event_desc *desc;
818 int ret = -EINVAL;
819
820 if (event->registered)
821 return;
822
823 desc = event->desc;
824 switch (event->instrumentation) {
825 case LTTNG_KERNEL_TRACEPOINT:
826 ret = lttng_wrapper_tracepoint_probe_register(desc->kname,
827 desc->probe_callback,
828 event);
829 break;
830 case LTTNG_KERNEL_SYSCALL:
831 ret = lttng_syscall_filter_enable(event->chan,
832 desc->name);
833 break;
834 case LTTNG_KERNEL_KPROBE:
835 case LTTNG_KERNEL_UPROBE:
836 case LTTNG_KERNEL_KRETPROBE:
837 case LTTNG_KERNEL_FUNCTION:
838 case LTTNG_KERNEL_NOOP:
839 ret = 0;
840 break;
841 default:
842 WARN_ON_ONCE(1);
843 }
844 if (!ret)
845 event->registered = 1;
846 }
847
848 /*
849 * Only used internally at session destruction.
850 */
851 int _lttng_event_unregister(struct lttng_event *event)
852 {
853 const struct lttng_event_desc *desc;
854 int ret = -EINVAL;
855
856 if (!event->registered)
857 return 0;
858
859 desc = event->desc;
860 switch (event->instrumentation) {
861 case LTTNG_KERNEL_TRACEPOINT:
862 ret = lttng_wrapper_tracepoint_probe_unregister(event->desc->kname,
863 event->desc->probe_callback,
864 event);
865 break;
866 case LTTNG_KERNEL_KPROBE:
867 lttng_kprobes_unregister(event);
868 ret = 0;
869 break;
870 case LTTNG_KERNEL_KRETPROBE:
871 lttng_kretprobes_unregister(event);
872 ret = 0;
873 break;
874 case LTTNG_KERNEL_FUNCTION:
875 lttng_ftrace_unregister(event);
876 ret = 0;
877 break;
878 case LTTNG_KERNEL_SYSCALL:
879 ret = lttng_syscall_filter_disable(event->chan,
880 desc->name);
881 break;
882 case LTTNG_KERNEL_NOOP:
883 ret = 0;
884 break;
885 case LTTNG_KERNEL_UPROBE:
886 lttng_uprobes_unregister(event);
887 ret = 0;
888 break;
889 default:
890 WARN_ON_ONCE(1);
891 }
892 if (!ret)
893 event->registered = 0;
894 return ret;
895 }
896
897 /*
898 * Only used internally at session destruction.
899 */
900 static
901 void _lttng_event_destroy(struct lttng_event *event)
902 {
903 switch (event->instrumentation) {
904 case LTTNG_KERNEL_TRACEPOINT:
905 lttng_event_put(event->desc);
906 break;
907 case LTTNG_KERNEL_KPROBE:
908 module_put(event->desc->owner);
909 lttng_kprobes_destroy_private(event);
910 break;
911 case LTTNG_KERNEL_KRETPROBE:
912 module_put(event->desc->owner);
913 lttng_kretprobes_destroy_private(event);
914 break;
915 case LTTNG_KERNEL_FUNCTION:
916 module_put(event->desc->owner);
917 lttng_ftrace_destroy_private(event);
918 break;
919 case LTTNG_KERNEL_NOOP:
920 case LTTNG_KERNEL_SYSCALL:
921 break;
922 case LTTNG_KERNEL_UPROBE:
923 module_put(event->desc->owner);
924 lttng_uprobes_destroy_private(event);
925 break;
926 default:
927 WARN_ON_ONCE(1);
928 }
929 list_del(&event->list);
930 lttng_destroy_context(event->ctx);
931 kmem_cache_free(event_cache, event);
932 }
933
934 int lttng_session_track_pid(struct lttng_session *session, int pid)
935 {
936 int ret;
937
938 if (pid < -1)
939 return -EINVAL;
940 mutex_lock(&sessions_mutex);
941 if (pid == -1) {
942 /* track all pids: destroy tracker. */
943 if (session->pid_tracker) {
944 struct lttng_pid_tracker *lpf;
945
946 lpf = session->pid_tracker;
947 rcu_assign_pointer(session->pid_tracker, NULL);
948 synchronize_trace();
949 lttng_pid_tracker_destroy(lpf);
950 }
951 ret = 0;
952 } else {
953 if (!session->pid_tracker) {
954 struct lttng_pid_tracker *lpf;
955
956 lpf = lttng_pid_tracker_create();
957 if (!lpf) {
958 ret = -ENOMEM;
959 goto unlock;
960 }
961 ret = lttng_pid_tracker_add(lpf, pid);
962 rcu_assign_pointer(session->pid_tracker, lpf);
963 } else {
964 ret = lttng_pid_tracker_add(session->pid_tracker, pid);
965 }
966 }
967 unlock:
968 mutex_unlock(&sessions_mutex);
969 return ret;
970 }
971
972 int lttng_session_untrack_pid(struct lttng_session *session, int pid)
973 {
974 int ret;
975
976 if (pid < -1)
977 return -EINVAL;
978 mutex_lock(&sessions_mutex);
979 if (pid == -1) {
980 /* untrack all pids: replace by empty tracker. */
981 struct lttng_pid_tracker *old_lpf = session->pid_tracker;
982 struct lttng_pid_tracker *lpf;
983
984 lpf = lttng_pid_tracker_create();
985 if (!lpf) {
986 ret = -ENOMEM;
987 goto unlock;
988 }
989 rcu_assign_pointer(session->pid_tracker, lpf);
990 synchronize_trace();
991 if (old_lpf)
992 lttng_pid_tracker_destroy(old_lpf);
993 ret = 0;
994 } else {
995 if (!session->pid_tracker) {
996 ret = -ENOENT;
997 goto unlock;
998 }
999 ret = lttng_pid_tracker_del(session->pid_tracker, pid);
1000 }
1001 unlock:
1002 mutex_unlock(&sessions_mutex);
1003 return ret;
1004 }
1005
1006 static
1007 void *pid_list_start(struct seq_file *m, loff_t *pos)
1008 {
1009 struct lttng_session *session = m->private;
1010 struct lttng_pid_tracker *lpf;
1011 struct lttng_pid_hash_node *e;
1012 int iter = 0, i;
1013
1014 mutex_lock(&sessions_mutex);
1015 lpf = session->pid_tracker;
1016 if (lpf) {
1017 for (i = 0; i < LTTNG_PID_TABLE_SIZE; i++) {
1018 struct hlist_head *head = &lpf->pid_hash[i];
1019
1020 lttng_hlist_for_each_entry(e, head, hlist) {
1021 if (iter++ >= *pos)
1022 return e;
1023 }
1024 }
1025 } else {
1026 /* PID tracker disabled. */
1027 if (iter >= *pos && iter == 0) {
1028 return session; /* empty tracker */
1029 }
1030 iter++;
1031 }
1032 /* End of list */
1033 return NULL;
1034 }
1035
1036 /* Called with sessions_mutex held. */
1037 static
1038 void *pid_list_next(struct seq_file *m, void *p, loff_t *ppos)
1039 {
1040 struct lttng_session *session = m->private;
1041 struct lttng_pid_tracker *lpf;
1042 struct lttng_pid_hash_node *e;
1043 int iter = 0, i;
1044
1045 (*ppos)++;
1046 lpf = session->pid_tracker;
1047 if (lpf) {
1048 for (i = 0; i < LTTNG_PID_TABLE_SIZE; i++) {
1049 struct hlist_head *head = &lpf->pid_hash[i];
1050
1051 lttng_hlist_for_each_entry(e, head, hlist) {
1052 if (iter++ >= *ppos)
1053 return e;
1054 }
1055 }
1056 } else {
1057 /* PID tracker disabled. */
1058 if (iter >= *ppos && iter == 0)
1059 return session; /* empty tracker */
1060 iter++;
1061 }
1062
1063 /* End of list */
1064 return NULL;
1065 }
1066
1067 static
1068 void pid_list_stop(struct seq_file *m, void *p)
1069 {
1070 mutex_unlock(&sessions_mutex);
1071 }
1072
1073 static
1074 int pid_list_show(struct seq_file *m, void *p)
1075 {
1076 int pid;
1077
1078 if (p == m->private) {
1079 /* Tracker disabled. */
1080 pid = -1;
1081 } else {
1082 const struct lttng_pid_hash_node *e = p;
1083
1084 pid = lttng_pid_tracker_get_node_pid(e);
1085 }
1086 seq_printf(m, "process { pid = %d; };\n", pid);
1087 return 0;
1088 }
1089
1090 static
1091 const struct seq_operations lttng_tracker_pids_list_seq_ops = {
1092 .start = pid_list_start,
1093 .next = pid_list_next,
1094 .stop = pid_list_stop,
1095 .show = pid_list_show,
1096 };
1097
1098 static
1099 int lttng_tracker_pids_list_open(struct inode *inode, struct file *file)
1100 {
1101 return seq_open(file, &lttng_tracker_pids_list_seq_ops);
1102 }
1103
1104 static
1105 int lttng_tracker_pids_list_release(struct inode *inode, struct file *file)
1106 {
1107 struct seq_file *m = file->private_data;
1108 struct lttng_session *session = m->private;
1109 int ret;
1110
1111 WARN_ON_ONCE(!session);
1112 ret = seq_release(inode, file);
1113 if (!ret && session)
1114 fput(session->file);
1115 return ret;
1116 }
1117
1118 const struct file_operations lttng_tracker_pids_list_fops = {
1119 .owner = THIS_MODULE,
1120 .open = lttng_tracker_pids_list_open,
1121 .read = seq_read,
1122 .llseek = seq_lseek,
1123 .release = lttng_tracker_pids_list_release,
1124 };
1125
1126 int lttng_session_list_tracker_pids(struct lttng_session *session)
1127 {
1128 struct file *tracker_pids_list_file;
1129 struct seq_file *m;
1130 int file_fd, ret;
1131
1132 file_fd = lttng_get_unused_fd();
1133 if (file_fd < 0) {
1134 ret = file_fd;
1135 goto fd_error;
1136 }
1137
1138 tracker_pids_list_file = anon_inode_getfile("[lttng_tracker_pids_list]",
1139 &lttng_tracker_pids_list_fops,
1140 NULL, O_RDWR);
1141 if (IS_ERR(tracker_pids_list_file)) {
1142 ret = PTR_ERR(tracker_pids_list_file);
1143 goto file_error;
1144 }
1145 if (!atomic_long_add_unless(&session->file->f_count, 1, LONG_MAX)) {
1146 ret = -EOVERFLOW;
1147 goto refcount_error;
1148 }
1149 ret = lttng_tracker_pids_list_fops.open(NULL, tracker_pids_list_file);
1150 if (ret < 0)
1151 goto open_error;
1152 m = tracker_pids_list_file->private_data;
1153 m->private = session;
1154 fd_install(file_fd, tracker_pids_list_file);
1155
1156 return file_fd;
1157
1158 open_error:
1159 atomic_long_dec(&session->file->f_count);
1160 refcount_error:
1161 fput(tracker_pids_list_file);
1162 file_error:
1163 put_unused_fd(file_fd);
1164 fd_error:
1165 return ret;
1166 }
1167
1168 /*
1169 * Enabler management.
1170 */
1171 static
1172 int lttng_match_enabler_star_glob(const char *desc_name,
1173 const char *pattern)
1174 {
1175 if (!strutils_star_glob_match(pattern, LTTNG_SIZE_MAX,
1176 desc_name, LTTNG_SIZE_MAX))
1177 return 0;
1178 return 1;
1179 }
1180
1181 static
1182 int lttng_match_enabler_name(const char *desc_name,
1183 const char *name)
1184 {
1185 if (strcmp(desc_name, name))
1186 return 0;
1187 return 1;
1188 }
1189
1190 static
1191 int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
1192 struct lttng_enabler *enabler)
1193 {
1194 const char *desc_name, *enabler_name;
1195
1196 enabler_name = enabler->event_param.name;
1197 switch (enabler->event_param.instrumentation) {
1198 case LTTNG_KERNEL_TRACEPOINT:
1199 desc_name = desc->name;
1200 break;
1201 case LTTNG_KERNEL_SYSCALL:
1202 desc_name = desc->name;
1203 if (!strncmp(desc_name, "compat_", strlen("compat_")))
1204 desc_name += strlen("compat_");
1205 if (!strncmp(desc_name, "syscall_exit_",
1206 strlen("syscall_exit_"))) {
1207 desc_name += strlen("syscall_exit_");
1208 } else if (!strncmp(desc_name, "syscall_entry_",
1209 strlen("syscall_entry_"))) {
1210 desc_name += strlen("syscall_entry_");
1211 } else {
1212 WARN_ON_ONCE(1);
1213 return -EINVAL;
1214 }
1215 break;
1216 default:
1217 WARN_ON_ONCE(1);
1218 return -EINVAL;
1219 }
1220 switch (enabler->type) {
1221 case LTTNG_ENABLER_STAR_GLOB:
1222 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1223 case LTTNG_ENABLER_NAME:
1224 return lttng_match_enabler_name(desc_name, enabler_name);
1225 default:
1226 return -EINVAL;
1227 }
1228 }
1229
1230 static
1231 int lttng_event_match_enabler(struct lttng_event *event,
1232 struct lttng_enabler *enabler)
1233 {
1234 if (enabler->event_param.instrumentation != event->instrumentation)
1235 return 0;
1236 if (lttng_desc_match_enabler(event->desc, enabler)
1237 && event->chan == enabler->chan)
1238 return 1;
1239 else
1240 return 0;
1241 }
1242
1243 static
1244 struct lttng_enabler_ref *lttng_event_enabler_ref(struct lttng_event *event,
1245 struct lttng_enabler *enabler)
1246 {
1247 struct lttng_enabler_ref *enabler_ref;
1248
1249 list_for_each_entry(enabler_ref,
1250 &event->enablers_ref_head, node) {
1251 if (enabler_ref->ref == enabler)
1252 return enabler_ref;
1253 }
1254 return NULL;
1255 }
1256
1257 static
1258 void lttng_create_tracepoint_if_missing(struct lttng_enabler *enabler)
1259 {
1260 struct lttng_session *session = enabler->chan->session;
1261 struct lttng_probe_desc *probe_desc;
1262 const struct lttng_event_desc *desc;
1263 int i;
1264 struct list_head *probe_list;
1265
1266 probe_list = lttng_get_probe_list_head();
1267 /*
1268 * For each probe event, if we find that a probe event matches
1269 * our enabler, create an associated lttng_event if not
1270 * already present.
1271 */
1272 list_for_each_entry(probe_desc, probe_list, head) {
1273 for (i = 0; i < probe_desc->nr_events; i++) {
1274 int found = 0;
1275 struct hlist_head *head;
1276 const char *event_name;
1277 size_t name_len;
1278 uint32_t hash;
1279 struct lttng_event *event;
1280
1281 desc = probe_desc->event_desc[i];
1282 if (!lttng_desc_match_enabler(desc, enabler))
1283 continue;
1284 event_name = desc->name;
1285 name_len = strlen(event_name);
1286
1287 /*
1288 * Check if already created.
1289 */
1290 hash = jhash(event_name, name_len, 0);
1291 head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
1292 lttng_hlist_for_each_entry(event, head, hlist) {
1293 if (event->desc == desc
1294 && event->chan == enabler->chan)
1295 found = 1;
1296 }
1297 if (found)
1298 continue;
1299
1300 /*
1301 * We need to create an event for this
1302 * event probe.
1303 */
1304 event = _lttng_event_create(enabler->chan,
1305 NULL, NULL, desc,
1306 LTTNG_KERNEL_TRACEPOINT);
1307 if (!event) {
1308 printk(KERN_INFO "Unable to create event %s\n",
1309 probe_desc->event_desc[i]->name);
1310 }
1311 }
1312 }
1313 }
1314
1315 static
1316 void lttng_create_syscall_if_missing(struct lttng_enabler *enabler)
1317 {
1318 int ret;
1319
1320 ret = lttng_syscalls_register(enabler->chan, NULL);
1321 WARN_ON_ONCE(ret);
1322 }
1323
1324 /*
1325 * Create struct lttng_event if it is missing and present in the list of
1326 * tracepoint probes.
1327 * Should be called with sessions mutex held.
1328 */
1329 static
1330 void lttng_create_event_if_missing(struct lttng_enabler *enabler)
1331 {
1332 switch (enabler->event_param.instrumentation) {
1333 case LTTNG_KERNEL_TRACEPOINT:
1334 lttng_create_tracepoint_if_missing(enabler);
1335 break;
1336 case LTTNG_KERNEL_SYSCALL:
1337 lttng_create_syscall_if_missing(enabler);
1338 break;
1339 default:
1340 WARN_ON_ONCE(1);
1341 break;
1342 }
1343 }
1344
1345 /*
1346 * Create events associated with an enabler (if not already present),
1347 * and add backward reference from the event to the enabler.
1348 * Should be called with sessions mutex held.
1349 */
1350 static
1351 int lttng_enabler_ref_events(struct lttng_enabler *enabler)
1352 {
1353 struct lttng_session *session = enabler->chan->session;
1354 struct lttng_event *event;
1355
1356 /* First ensure that probe events are created for this enabler. */
1357 lttng_create_event_if_missing(enabler);
1358
1359 /* For each event matching enabler in session event list. */
1360 list_for_each_entry(event, &session->events, list) {
1361 struct lttng_enabler_ref *enabler_ref;
1362
1363 if (!lttng_event_match_enabler(event, enabler))
1364 continue;
1365 enabler_ref = lttng_event_enabler_ref(event, enabler);
1366 if (!enabler_ref) {
1367 /*
1368 * If no backward ref, create it.
1369 * Add backward ref from event to enabler.
1370 */
1371 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
1372 if (!enabler_ref)
1373 return -ENOMEM;
1374 enabler_ref->ref = enabler;
1375 list_add(&enabler_ref->node,
1376 &event->enablers_ref_head);
1377 }
1378
1379 /*
1380 * Link filter bytecodes if not linked yet.
1381 */
1382 lttng_enabler_event_link_bytecode(event, enabler);
1383
1384 /* TODO: merge event context. */
1385 }
1386 return 0;
1387 }
1388
1389 /*
1390 * Called at module load: connect the probe on all enablers matching
1391 * this event.
1392 * Called with sessions lock held.
1393 */
1394 int lttng_fix_pending_events(void)
1395 {
1396 struct lttng_session *session;
1397
1398 list_for_each_entry(session, &sessions, list)
1399 lttng_session_lazy_sync_enablers(session);
1400 return 0;
1401 }
1402
1403 struct lttng_enabler *lttng_enabler_create(enum lttng_enabler_type type,
1404 struct lttng_kernel_event *event_param,
1405 struct lttng_channel *chan)
1406 {
1407 struct lttng_enabler *enabler;
1408
1409 enabler = kzalloc(sizeof(*enabler), GFP_KERNEL);
1410 if (!enabler)
1411 return NULL;
1412 enabler->type = type;
1413 INIT_LIST_HEAD(&enabler->filter_bytecode_head);
1414 memcpy(&enabler->event_param, event_param,
1415 sizeof(enabler->event_param));
1416 enabler->chan = chan;
1417 /* ctx left NULL */
1418 enabler->enabled = 0;
1419 enabler->evtype = LTTNG_TYPE_ENABLER;
1420 mutex_lock(&sessions_mutex);
1421 list_add(&enabler->node, &enabler->chan->session->enablers_head);
1422 lttng_session_lazy_sync_enablers(enabler->chan->session);
1423 mutex_unlock(&sessions_mutex);
1424 return enabler;
1425 }
1426
1427 int lttng_enabler_enable(struct lttng_enabler *enabler)
1428 {
1429 mutex_lock(&sessions_mutex);
1430 enabler->enabled = 1;
1431 lttng_session_lazy_sync_enablers(enabler->chan->session);
1432 mutex_unlock(&sessions_mutex);
1433 return 0;
1434 }
1435
1436 int lttng_enabler_disable(struct lttng_enabler *enabler)
1437 {
1438 mutex_lock(&sessions_mutex);
1439 enabler->enabled = 0;
1440 lttng_session_lazy_sync_enablers(enabler->chan->session);
1441 mutex_unlock(&sessions_mutex);
1442 return 0;
1443 }
1444
1445 int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
1446 struct lttng_kernel_filter_bytecode __user *bytecode)
1447 {
1448 struct lttng_filter_bytecode_node *bytecode_node;
1449 uint32_t bytecode_len;
1450 int ret;
1451
1452 ret = get_user(bytecode_len, &bytecode->len);
1453 if (ret)
1454 return ret;
1455 bytecode_node = kzalloc(sizeof(*bytecode_node) + bytecode_len,
1456 GFP_KERNEL);
1457 if (!bytecode_node)
1458 return -ENOMEM;
1459 ret = copy_from_user(&bytecode_node->bc, bytecode,
1460 sizeof(*bytecode) + bytecode_len);
1461 if (ret)
1462 goto error_free;
1463 bytecode_node->enabler = enabler;
1464 /* Enforce length based on allocated size */
1465 bytecode_node->bc.len = bytecode_len;
1466 list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
1467 lttng_session_lazy_sync_enablers(enabler->chan->session);
1468 return 0;
1469
1470 error_free:
1471 kfree(bytecode_node);
1472 return ret;
1473 }
1474
1475 int lttng_event_add_callsite(struct lttng_event *event,
1476 struct lttng_kernel_event_callsite __user *callsite)
1477 {
1478
1479 switch (event->instrumentation) {
1480 case LTTNG_KERNEL_UPROBE:
1481 return lttng_uprobes_add_callsite(event, callsite);
1482 default:
1483 return -EINVAL;
1484 }
1485 }
1486
1487 int lttng_enabler_attach_context(struct lttng_enabler *enabler,
1488 struct lttng_kernel_context *context_param)
1489 {
1490 return -ENOSYS;
1491 }
1492
1493 static
1494 void lttng_enabler_destroy(struct lttng_enabler *enabler)
1495 {
1496 struct lttng_filter_bytecode_node *filter_node, *tmp_filter_node;
1497
1498 /* Destroy filter bytecode */
1499 list_for_each_entry_safe(filter_node, tmp_filter_node,
1500 &enabler->filter_bytecode_head, node) {
1501 kfree(filter_node);
1502 }
1503
1504 /* Destroy contexts */
1505 lttng_destroy_context(enabler->ctx);
1506
1507 list_del(&enabler->node);
1508 kfree(enabler);
1509 }
1510
1511 /*
1512 * lttng_session_sync_enablers should be called just before starting a
1513 * session.
1514 * Should be called with sessions mutex held.
1515 */
1516 static
1517 void lttng_session_sync_enablers(struct lttng_session *session)
1518 {
1519 struct lttng_enabler *enabler;
1520 struct lttng_event *event;
1521
1522 list_for_each_entry(enabler, &session->enablers_head, node)
1523 lttng_enabler_ref_events(enabler);
1524 /*
1525 * For each event, if at least one of its enablers is enabled,
1526 * and its channel and session transient states are enabled, we
1527 * enable the event, else we disable it.
1528 */
1529 list_for_each_entry(event, &session->events, list) {
1530 struct lttng_enabler_ref *enabler_ref;
1531 struct lttng_bytecode_runtime *runtime;
1532 int enabled = 0, has_enablers_without_bytecode = 0;
1533
1534 switch (event->instrumentation) {
1535 case LTTNG_KERNEL_TRACEPOINT:
1536 case LTTNG_KERNEL_SYSCALL:
1537 /* Enable events */
1538 list_for_each_entry(enabler_ref,
1539 &event->enablers_ref_head, node) {
1540 if (enabler_ref->ref->enabled) {
1541 enabled = 1;
1542 break;
1543 }
1544 }
1545 break;
1546 default:
1547 /* Not handled with lazy sync. */
1548 continue;
1549 }
1550 /*
1551 * Enabled state is based on union of enablers, with
1552 * intesection of session and channel transient enable
1553 * states.
1554 */
1555 enabled = enabled && session->tstate && event->chan->tstate;
1556
1557 WRITE_ONCE(event->enabled, enabled);
1558 /*
1559 * Sync tracepoint registration with event enabled
1560 * state.
1561 */
1562 if (enabled) {
1563 register_event(event);
1564 } else {
1565 _lttng_event_unregister(event);
1566 }
1567
1568 /* Check if has enablers without bytecode enabled */
1569 list_for_each_entry(enabler_ref,
1570 &event->enablers_ref_head, node) {
1571 if (enabler_ref->ref->enabled
1572 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
1573 has_enablers_without_bytecode = 1;
1574 break;
1575 }
1576 }
1577 event->has_enablers_without_bytecode =
1578 has_enablers_without_bytecode;
1579
1580 /* Enable filters */
1581 list_for_each_entry(runtime,
1582 &event->bytecode_runtime_head, node)
1583 lttng_filter_sync_state(runtime);
1584 }
1585 }
1586
1587 /*
1588 * Apply enablers to session events, adding events to session if need
1589 * be. It is required after each modification applied to an active
1590 * session, and right before session "start".
1591 * "lazy" sync means we only sync if required.
1592 * Should be called with sessions mutex held.
1593 */
1594 static
1595 void lttng_session_lazy_sync_enablers(struct lttng_session *session)
1596 {
1597 /* We can skip if session is not active */
1598 if (!session->active)
1599 return;
1600 lttng_session_sync_enablers(session);
1601 }
1602
1603 /*
1604 * Serialize at most one packet worth of metadata into a metadata
1605 * channel.
1606 * We grab the metadata cache mutex to get exclusive access to our metadata
1607 * buffer and to the metadata cache. Exclusive access to the metadata buffer
1608 * allows us to do racy operations such as looking for remaining space left in
1609 * packet and write, since mutual exclusion protects us from concurrent writes.
1610 * Mutual exclusion on the metadata cache allow us to read the cache content
1611 * without racing against reallocation of the cache by updates.
1612 * Returns the number of bytes written in the channel, 0 if no data
1613 * was written and a negative value on error.
1614 */
1615 int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
1616 struct channel *chan)
1617 {
1618 struct lib_ring_buffer_ctx ctx;
1619 int ret = 0;
1620 size_t len, reserve_len;
1621
1622 /*
1623 * Ensure we support mutiple get_next / put sequences followed by
1624 * put_next. The metadata cache lock protects reading the metadata
1625 * cache. It can indeed be read concurrently by "get_next_subbuf" and
1626 * "flush" operations on the buffer invoked by different processes.
1627 * Moreover, since the metadata cache memory can be reallocated, we
1628 * need to have exclusive access against updates even though we only
1629 * read it.
1630 */
1631 mutex_lock(&stream->metadata_cache->lock);
1632 WARN_ON(stream->metadata_in < stream->metadata_out);
1633 if (stream->metadata_in != stream->metadata_out)
1634 goto end;
1635
1636 /* Metadata regenerated, change the version. */
1637 if (stream->metadata_cache->version != stream->version)
1638 stream->version = stream->metadata_cache->version;
1639
1640 len = stream->metadata_cache->metadata_written -
1641 stream->metadata_in;
1642 if (!len)
1643 goto end;
1644 reserve_len = min_t(size_t,
1645 stream->transport->ops.packet_avail_size(chan),
1646 len);
1647 lib_ring_buffer_ctx_init(&ctx, chan, NULL, reserve_len,
1648 sizeof(char), -1);
1649 /*
1650 * If reservation failed, return an error to the caller.
1651 */
1652 ret = stream->transport->ops.event_reserve(&ctx, 0);
1653 if (ret != 0) {
1654 printk(KERN_WARNING "LTTng: Metadata event reservation failed\n");
1655 goto end;
1656 }
1657 stream->transport->ops.event_write(&ctx,
1658 stream->metadata_cache->data + stream->metadata_in,
1659 reserve_len);
1660 stream->transport->ops.event_commit(&ctx);
1661 stream->metadata_in += reserve_len;
1662 ret = reserve_len;
1663
1664 end:
1665 mutex_unlock(&stream->metadata_cache->lock);
1666 return ret;
1667 }
1668
1669 /*
1670 * Write the metadata to the metadata cache.
1671 * Must be called with sessions_mutex held.
1672 * The metadata cache lock protects us from concurrent read access from
1673 * thread outputting metadata content to ring buffer.
1674 */
1675 int lttng_metadata_printf(struct lttng_session *session,
1676 const char *fmt, ...)
1677 {
1678 char *str;
1679 size_t len;
1680 va_list ap;
1681 struct lttng_metadata_stream *stream;
1682
1683 WARN_ON_ONCE(!READ_ONCE(session->active));
1684
1685 va_start(ap, fmt);
1686 str = kvasprintf(GFP_KERNEL, fmt, ap);
1687 va_end(ap);
1688 if (!str)
1689 return -ENOMEM;
1690
1691 len = strlen(str);
1692 mutex_lock(&session->metadata_cache->lock);
1693 if (session->metadata_cache->metadata_written + len >
1694 session->metadata_cache->cache_alloc) {
1695 char *tmp_cache_realloc;
1696 unsigned int tmp_cache_alloc_size;
1697
1698 tmp_cache_alloc_size = max_t(unsigned int,
1699 session->metadata_cache->cache_alloc + len,
1700 session->metadata_cache->cache_alloc << 1);
1701 tmp_cache_realloc = lttng_vzalloc(tmp_cache_alloc_size);
1702 if (!tmp_cache_realloc)
1703 goto err;
1704 if (session->metadata_cache->data) {
1705 memcpy(tmp_cache_realloc,
1706 session->metadata_cache->data,
1707 session->metadata_cache->cache_alloc);
1708 vfree(session->metadata_cache->data);
1709 }
1710
1711 session->metadata_cache->cache_alloc = tmp_cache_alloc_size;
1712 session->metadata_cache->data = tmp_cache_realloc;
1713 }
1714 memcpy(session->metadata_cache->data +
1715 session->metadata_cache->metadata_written,
1716 str, len);
1717 session->metadata_cache->metadata_written += len;
1718 mutex_unlock(&session->metadata_cache->lock);
1719 kfree(str);
1720
1721 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list)
1722 wake_up_interruptible(&stream->read_wait);
1723
1724 return 0;
1725
1726 err:
1727 mutex_unlock(&session->metadata_cache->lock);
1728 kfree(str);
1729 return -ENOMEM;
1730 }
1731
1732 static
1733 int print_tabs(struct lttng_session *session, size_t nesting)
1734 {
1735 size_t i;
1736
1737 for (i = 0; i < nesting; i++) {
1738 int ret;
1739
1740 ret = lttng_metadata_printf(session, " ");
1741 if (ret) {
1742 return ret;
1743 }
1744 }
1745 return 0;
1746 }
1747
1748 /*
1749 * Must be called with sessions_mutex held.
1750 */
1751 static
1752 int _lttng_struct_type_statedump(struct lttng_session *session,
1753 const struct lttng_type *type,
1754 size_t nesting)
1755 {
1756 int ret;
1757 uint32_t i, nr_fields;
1758
1759 ret = print_tabs(session, nesting);
1760 if (ret)
1761 return ret;
1762 ret = lttng_metadata_printf(session,
1763 "struct {\n");
1764 if (ret)
1765 return ret;
1766 nr_fields = type->u._struct.nr_fields;
1767 for (i = 0; i < nr_fields; i++) {
1768 const struct lttng_event_field *iter_field;
1769
1770 iter_field = &type->u._struct.fields[i];
1771 ret = _lttng_field_statedump(session, iter_field, nesting + 1);
1772 if (ret)
1773 return ret;
1774 }
1775 ret = print_tabs(session, nesting);
1776 if (ret)
1777 return ret;
1778 ret = lttng_metadata_printf(session,
1779 "}");
1780 return ret;
1781 }
1782
1783 /*
1784 * Must be called with sessions_mutex held.
1785 */
1786 static
1787 int _lttng_struct_statedump(struct lttng_session *session,
1788 const struct lttng_event_field *field,
1789 size_t nesting)
1790 {
1791 int ret;
1792
1793 ret = _lttng_struct_type_statedump(session,
1794 &field->type, nesting);
1795 if (ret)
1796 return ret;
1797 ret = lttng_metadata_printf(session,
1798 "_%s;\n",
1799 field->name);
1800 return ret;
1801 }
1802
1803 /*
1804 * Must be called with sessions_mutex held.
1805 */
1806 static
1807 int _lttng_variant_type_statedump(struct lttng_session *session,
1808 const struct lttng_type *type,
1809 size_t nesting)
1810 {
1811 int ret;
1812 uint32_t i, nr_choices;
1813
1814 ret = print_tabs(session, nesting);
1815 if (ret)
1816 return ret;
1817 ret = lttng_metadata_printf(session,
1818 "variant <_%s> {\n",
1819 type->u.variant.tag_name);
1820 if (ret)
1821 return ret;
1822 nr_choices = type->u.variant.nr_choices;
1823 for (i = 0; i < nr_choices; i++) {
1824 const struct lttng_event_field *iter_field;
1825
1826 iter_field = &type->u.variant.choices[i];
1827 ret = _lttng_field_statedump(session, iter_field, nesting + 1);
1828 if (ret)
1829 return ret;
1830 }
1831 ret = print_tabs(session, nesting);
1832 if (ret)
1833 return ret;
1834 ret = lttng_metadata_printf(session,
1835 "}");
1836 return ret;
1837 }
1838
1839 /*
1840 * Must be called with sessions_mutex held.
1841 */
1842 static
1843 int _lttng_variant_statedump(struct lttng_session *session,
1844 const struct lttng_event_field *field,
1845 size_t nesting)
1846 {
1847 int ret;
1848
1849 ret = _lttng_variant_type_statedump(session,
1850 &field->type, nesting);
1851 if (ret)
1852 return ret;
1853 ret = lttng_metadata_printf(session,
1854 "_%s;\n",
1855 field->name);
1856 return ret;
1857 }
1858
1859 /*
1860 * Must be called with sessions_mutex held.
1861 */
1862 static
1863 int _lttng_array_compound_statedump(struct lttng_session *session,
1864 const struct lttng_event_field *field,
1865 size_t nesting)
1866 {
1867 int ret;
1868 const struct lttng_type *elem_type;
1869
1870 /* Only array of structures and variants are currently supported. */
1871 elem_type = field->type.u.array_compound.elem_type;
1872 switch (elem_type->atype) {
1873 case atype_struct:
1874 ret = _lttng_struct_type_statedump(session, elem_type, nesting);
1875 if (ret)
1876 return ret;
1877 break;
1878 case atype_variant:
1879 ret = _lttng_variant_type_statedump(session, elem_type, nesting);
1880 if (ret)
1881 return ret;
1882 break;
1883 default:
1884 return -EINVAL;
1885 }
1886 ret = lttng_metadata_printf(session,
1887 " _%s[%u];\n",
1888 field->name,
1889 field->type.u.array_compound.length);
1890 return ret;
1891 }
1892
1893 /*
1894 * Must be called with sessions_mutex held.
1895 */
1896 static
1897 int _lttng_sequence_compound_statedump(struct lttng_session *session,
1898 const struct lttng_event_field *field,
1899 size_t nesting)
1900 {
1901 int ret;
1902 const char *length_name;
1903 const struct lttng_type *elem_type;
1904
1905 length_name = field->type.u.sequence_compound.length_name;
1906
1907 /* Only array of structures and variants are currently supported. */
1908 elem_type = field->type.u.sequence_compound.elem_type;
1909 switch (elem_type->atype) {
1910 case atype_struct:
1911 ret = _lttng_struct_type_statedump(session, elem_type, nesting);
1912 if (ret)
1913 return ret;
1914 break;
1915 case atype_variant:
1916 ret = _lttng_variant_type_statedump(session, elem_type, nesting);
1917 if (ret)
1918 return ret;
1919 break;
1920 default:
1921 return -EINVAL;
1922 }
1923 ret = lttng_metadata_printf(session,
1924 " _%s[ _%s ];\n",
1925 field->name,
1926 length_name);
1927 return ret;
1928 }
1929
1930 /*
1931 * Must be called with sessions_mutex held.
1932 */
1933 static
1934 int _lttng_enum_statedump(struct lttng_session *session,
1935 const struct lttng_event_field *field,
1936 size_t nesting)
1937 {
1938 const struct lttng_enum_desc *enum_desc;
1939 const struct lttng_integer_type *container_type;
1940 int ret;
1941 unsigned int i, nr_entries;
1942
1943 enum_desc = field->type.u.basic.enumeration.desc;
1944 container_type = &field->type.u.basic.enumeration.container_type;
1945 nr_entries = enum_desc->nr_entries;
1946
1947 ret = print_tabs(session, nesting);
1948 if (ret)
1949 goto end;
1950 ret = lttng_metadata_printf(session,
1951 "enum : integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } {\n",
1952 container_type->size,
1953 container_type->alignment,
1954 container_type->signedness,
1955 (container_type->encoding == lttng_encode_none)
1956 ? "none"
1957 : (container_type->encoding == lttng_encode_UTF8)
1958 ? "UTF8"
1959 : "ASCII",
1960 container_type->base,
1961 #if __BYTE_ORDER == __BIG_ENDIAN
1962 container_type->reverse_byte_order ? " byte_order = le;" : ""
1963 #else
1964 container_type->reverse_byte_order ? " byte_order = be;" : ""
1965 #endif
1966 );
1967 if (ret)
1968 goto end;
1969 /* Dump all entries */
1970 for (i = 0; i < nr_entries; i++) {
1971 const struct lttng_enum_entry *entry = &enum_desc->entries[i];
1972 int j, len;
1973
1974 ret = print_tabs(session, nesting + 1);
1975 if (ret)
1976 goto end;
1977 ret = lttng_metadata_printf(session,
1978 "\"");
1979 if (ret)
1980 goto end;
1981 len = strlen(entry->string);
1982 /* Escape the character '"' */
1983 for (j = 0; j < len; j++) {
1984 char c = entry->string[j];
1985
1986 switch (c) {
1987 case '"':
1988 ret = lttng_metadata_printf(session,
1989 "\\\"");
1990 break;
1991 case '\\':
1992 ret = lttng_metadata_printf(session,
1993 "\\\\");
1994 break;
1995 default:
1996 ret = lttng_metadata_printf(session,
1997 "%c", c);
1998 break;
1999 }
2000 if (ret)
2001 goto end;
2002 }
2003 ret = lttng_metadata_printf(session, "\"");
2004 if (ret)
2005 goto end;
2006
2007 if (entry->options.is_auto) {
2008 ret = lttng_metadata_printf(session, ",\n");
2009 if (ret)
2010 goto end;
2011 } else {
2012 ret = lttng_metadata_printf(session,
2013 " = ");
2014 if (ret)
2015 goto end;
2016 if (entry->start.signedness)
2017 ret = lttng_metadata_printf(session,
2018 "%lld", (long long) entry->start.value);
2019 else
2020 ret = lttng_metadata_printf(session,
2021 "%llu", entry->start.value);
2022 if (ret)
2023 goto end;
2024 if (entry->start.signedness == entry->end.signedness &&
2025 entry->start.value
2026 == entry->end.value) {
2027 ret = lttng_metadata_printf(session,
2028 ",\n");
2029 } else {
2030 if (entry->end.signedness) {
2031 ret = lttng_metadata_printf(session,
2032 " ... %lld,\n",
2033 (long long) entry->end.value);
2034 } else {
2035 ret = lttng_metadata_printf(session,
2036 " ... %llu,\n",
2037 entry->end.value);
2038 }
2039 }
2040 if (ret)
2041 goto end;
2042 }
2043 }
2044 ret = print_tabs(session, nesting);
2045 if (ret)
2046 goto end;
2047 ret = lttng_metadata_printf(session, "} _%s;\n",
2048 field->name);
2049 end:
2050 return ret;
2051 }
2052
2053 /*
2054 * Must be called with sessions_mutex held.
2055 */
2056 static
2057 int _lttng_field_statedump(struct lttng_session *session,
2058 const struct lttng_event_field *field,
2059 size_t nesting)
2060 {
2061 int ret = 0;
2062
2063 switch (field->type.atype) {
2064 case atype_integer:
2065 ret = print_tabs(session, nesting);
2066 if (ret)
2067 return ret;
2068 ret = lttng_metadata_printf(session,
2069 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s;\n",
2070 field->type.u.basic.integer.size,
2071 field->type.u.basic.integer.alignment,
2072 field->type.u.basic.integer.signedness,
2073 (field->type.u.basic.integer.encoding == lttng_encode_none)
2074 ? "none"
2075 : (field->type.u.basic.integer.encoding == lttng_encode_UTF8)
2076 ? "UTF8"
2077 : "ASCII",
2078 field->type.u.basic.integer.base,
2079 #if __BYTE_ORDER == __BIG_ENDIAN
2080 field->type.u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
2081 #else
2082 field->type.u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
2083 #endif
2084 field->name);
2085 break;
2086 case atype_enum:
2087 ret = _lttng_enum_statedump(session, field, nesting);
2088 break;
2089 case atype_array:
2090 case atype_array_bitfield:
2091 {
2092 const struct lttng_basic_type *elem_type;
2093
2094 elem_type = &field->type.u.array.elem_type;
2095 if (field->type.u.array.elem_alignment) {
2096 ret = print_tabs(session, nesting);
2097 if (ret)
2098 return ret;
2099 ret = lttng_metadata_printf(session,
2100 "struct { } align(%u) _%s_padding;\n",
2101 field->type.u.array.elem_alignment * CHAR_BIT,
2102 field->name);
2103 if (ret)
2104 return ret;
2105 }
2106 ret = print_tabs(session, nesting);
2107 if (ret)
2108 return ret;
2109 ret = lttng_metadata_printf(session,
2110 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s[%u];\n",
2111 elem_type->u.basic.integer.size,
2112 elem_type->u.basic.integer.alignment,
2113 elem_type->u.basic.integer.signedness,
2114 (elem_type->u.basic.integer.encoding == lttng_encode_none)
2115 ? "none"
2116 : (elem_type->u.basic.integer.encoding == lttng_encode_UTF8)
2117 ? "UTF8"
2118 : "ASCII",
2119 elem_type->u.basic.integer.base,
2120 #if __BYTE_ORDER == __BIG_ENDIAN
2121 elem_type->u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
2122 #else
2123 elem_type->u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
2124 #endif
2125 field->name, field->type.u.array.length);
2126 break;
2127 }
2128 case atype_sequence:
2129 case atype_sequence_bitfield:
2130 {
2131 const struct lttng_basic_type *elem_type;
2132 const struct lttng_basic_type *length_type;
2133
2134 elem_type = &field->type.u.sequence.elem_type;
2135 length_type = &field->type.u.sequence.length_type;
2136 ret = print_tabs(session, nesting);
2137 if (ret)
2138 return ret;
2139 ret = lttng_metadata_printf(session,
2140 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } __%s_length;\n",
2141 length_type->u.basic.integer.size,
2142 (unsigned int) length_type->u.basic.integer.alignment,
2143 length_type->u.basic.integer.signedness,
2144 (length_type->u.basic.integer.encoding == lttng_encode_none)
2145 ? "none"
2146 : ((length_type->u.basic.integer.encoding == lttng_encode_UTF8)
2147 ? "UTF8"
2148 : "ASCII"),
2149 length_type->u.basic.integer.base,
2150 #if __BYTE_ORDER == __BIG_ENDIAN
2151 length_type->u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
2152 #else
2153 length_type->u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
2154 #endif
2155 field->name);
2156 if (ret)
2157 return ret;
2158
2159 if (field->type.u.sequence.elem_alignment) {
2160 ret = print_tabs(session, nesting);
2161 if (ret)
2162 return ret;
2163 ret = lttng_metadata_printf(session,
2164 "struct { } align(%u) _%s_padding;\n",
2165 field->type.u.sequence.elem_alignment * CHAR_BIT,
2166 field->name);
2167 if (ret)
2168 return ret;
2169 }
2170 ret = print_tabs(session, nesting);
2171 if (ret)
2172 return ret;
2173 ret = lttng_metadata_printf(session,
2174 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s[ __%s_length ];\n",
2175 elem_type->u.basic.integer.size,
2176 (unsigned int) elem_type->u.basic.integer.alignment,
2177 elem_type->u.basic.integer.signedness,
2178 (elem_type->u.basic.integer.encoding == lttng_encode_none)
2179 ? "none"
2180 : ((elem_type->u.basic.integer.encoding == lttng_encode_UTF8)
2181 ? "UTF8"
2182 : "ASCII"),
2183 elem_type->u.basic.integer.base,
2184 #if __BYTE_ORDER == __BIG_ENDIAN
2185 elem_type->u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
2186 #else
2187 elem_type->u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
2188 #endif
2189 field->name,
2190 field->name);
2191 break;
2192 }
2193
2194 case atype_string:
2195 /* Default encoding is UTF8 */
2196 ret = print_tabs(session, nesting);
2197 if (ret)
2198 return ret;
2199 ret = lttng_metadata_printf(session,
2200 "string%s _%s;\n",
2201 field->type.u.basic.string.encoding == lttng_encode_ASCII ?
2202 " { encoding = ASCII; }" : "",
2203 field->name);
2204 break;
2205 case atype_struct:
2206 ret = _lttng_struct_statedump(session, field, nesting);
2207 break;
2208 case atype_array_compound:
2209 ret = _lttng_array_compound_statedump(session, field, nesting);
2210 break;
2211 case atype_sequence_compound:
2212 ret = _lttng_sequence_compound_statedump(session, field, nesting);
2213 break;
2214 case atype_variant:
2215 ret = _lttng_variant_statedump(session, field, nesting);
2216 break;
2217
2218 default:
2219 WARN_ON_ONCE(1);
2220 return -EINVAL;
2221 }
2222 return ret;
2223 }
2224
2225 static
2226 int _lttng_context_metadata_statedump(struct lttng_session *session,
2227 struct lttng_ctx *ctx)
2228 {
2229 int ret = 0;
2230 int i;
2231
2232 if (!ctx)
2233 return 0;
2234 for (i = 0; i < ctx->nr_fields; i++) {
2235 const struct lttng_ctx_field *field = &ctx->fields[i];
2236
2237 ret = _lttng_field_statedump(session, &field->event_field, 2);
2238 if (ret)
2239 return ret;
2240 }
2241 return ret;
2242 }
2243
2244 static
2245 int _lttng_fields_metadata_statedump(struct lttng_session *session,
2246 struct lttng_event *event)
2247 {
2248 const struct lttng_event_desc *desc = event->desc;
2249 int ret = 0;
2250 int i;
2251
2252 for (i = 0; i < desc->nr_fields; i++) {
2253 const struct lttng_event_field *field = &desc->fields[i];
2254
2255 ret = _lttng_field_statedump(session, field, 2);
2256 if (ret)
2257 return ret;
2258 }
2259 return ret;
2260 }
2261
2262 /*
2263 * Must be called with sessions_mutex held.
2264 */
2265 static
2266 int _lttng_event_metadata_statedump(struct lttng_session *session,
2267 struct lttng_channel *chan,
2268 struct lttng_event *event)
2269 {
2270 int ret = 0;
2271
2272 if (event->metadata_dumped || !READ_ONCE(session->active))
2273 return 0;
2274 if (chan->channel_type == METADATA_CHANNEL)
2275 return 0;
2276
2277 ret = lttng_metadata_printf(session,
2278 "event {\n"
2279 " name = \"%s\";\n"
2280 " id = %u;\n"
2281 " stream_id = %u;\n",
2282 event->desc->name,
2283 event->id,
2284 event->chan->id);
2285 if (ret)
2286 goto end;
2287
2288 if (event->ctx) {
2289 ret = lttng_metadata_printf(session,
2290 " context := struct {\n");
2291 if (ret)
2292 goto end;
2293 }
2294 ret = _lttng_context_metadata_statedump(session, event->ctx);
2295 if (ret)
2296 goto end;
2297 if (event->ctx) {
2298 ret = lttng_metadata_printf(session,
2299 " };\n");
2300 if (ret)
2301 goto end;
2302 }
2303
2304 ret = lttng_metadata_printf(session,
2305 " fields := struct {\n"
2306 );
2307 if (ret)
2308 goto end;
2309
2310 ret = _lttng_fields_metadata_statedump(session, event);
2311 if (ret)
2312 goto end;
2313
2314 /*
2315 * LTTng space reservation can only reserve multiples of the
2316 * byte size.
2317 */
2318 ret = lttng_metadata_printf(session,
2319 " };\n"
2320 "};\n\n");
2321 if (ret)
2322 goto end;
2323
2324 event->metadata_dumped = 1;
2325 end:
2326 return ret;
2327
2328 }
2329
2330 /*
2331 * Must be called with sessions_mutex held.
2332 */
2333 static
2334 int _lttng_channel_metadata_statedump(struct lttng_session *session,
2335 struct lttng_channel *chan)
2336 {
2337 int ret = 0;
2338
2339 if (chan->metadata_dumped || !READ_ONCE(session->active))
2340 return 0;
2341
2342 if (chan->channel_type == METADATA_CHANNEL)
2343 return 0;
2344
2345 WARN_ON_ONCE(!chan->header_type);
2346 ret = lttng_metadata_printf(session,
2347 "stream {\n"
2348 " id = %u;\n"
2349 " event.header := %s;\n"
2350 " packet.context := struct packet_context;\n",
2351 chan->id,
2352 chan->header_type == 1 ? "struct event_header_compact" :
2353 "struct event_header_large");
2354 if (ret)
2355 goto end;
2356
2357 if (chan->ctx) {
2358 ret = lttng_metadata_printf(session,
2359 " event.context := struct {\n");
2360 if (ret)
2361 goto end;
2362 }
2363 ret = _lttng_context_metadata_statedump(session, chan->ctx);
2364 if (ret)
2365 goto end;
2366 if (chan->ctx) {
2367 ret = lttng_metadata_printf(session,
2368 " };\n");
2369 if (ret)
2370 goto end;
2371 }
2372
2373 ret = lttng_metadata_printf(session,
2374 "};\n\n");
2375
2376 chan->metadata_dumped = 1;
2377 end:
2378 return ret;
2379 }
2380
2381 /*
2382 * Must be called with sessions_mutex held.
2383 */
2384 static
2385 int _lttng_stream_packet_context_declare(struct lttng_session *session)
2386 {
2387 return lttng_metadata_printf(session,
2388 "struct packet_context {\n"
2389 " uint64_clock_monotonic_t timestamp_begin;\n"
2390 " uint64_clock_monotonic_t timestamp_end;\n"
2391 " uint64_t content_size;\n"
2392 " uint64_t packet_size;\n"
2393 " uint64_t packet_seq_num;\n"
2394 " unsigned long events_discarded;\n"
2395 " uint32_t cpu_id;\n"
2396 "};\n\n"
2397 );
2398 }
2399
2400 /*
2401 * Compact header:
2402 * id: range: 0 - 30.
2403 * id 31 is reserved to indicate an extended header.
2404 *
2405 * Large header:
2406 * id: range: 0 - 65534.
2407 * id 65535 is reserved to indicate an extended header.
2408 *
2409 * Must be called with sessions_mutex held.
2410 */
2411 static
2412 int _lttng_event_header_declare(struct lttng_session *session)
2413 {
2414 return lttng_metadata_printf(session,
2415 "struct event_header_compact {\n"
2416 " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
2417 " variant <id> {\n"
2418 " struct {\n"
2419 " uint27_clock_monotonic_t timestamp;\n"
2420 " } compact;\n"
2421 " struct {\n"
2422 " uint32_t id;\n"
2423 " uint64_clock_monotonic_t timestamp;\n"
2424 " } extended;\n"
2425 " } v;\n"
2426 "} align(%u);\n"
2427 "\n"
2428 "struct event_header_large {\n"
2429 " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
2430 " variant <id> {\n"
2431 " struct {\n"
2432 " uint32_clock_monotonic_t timestamp;\n"
2433 " } compact;\n"
2434 " struct {\n"
2435 " uint32_t id;\n"
2436 " uint64_clock_monotonic_t timestamp;\n"
2437 " } extended;\n"
2438 " } v;\n"
2439 "} align(%u);\n\n",
2440 lttng_alignof(uint32_t) * CHAR_BIT,
2441 lttng_alignof(uint16_t) * CHAR_BIT
2442 );
2443 }
2444
2445 /*
2446 * Approximation of NTP time of day to clock monotonic correlation,
2447 * taken at start of trace.
2448 * Yes, this is only an approximation. Yes, we can (and will) do better
2449 * in future versions.
2450 * This function may return a negative offset. It may happen if the
2451 * system sets the REALTIME clock to 0 after boot.
2452 *
2453 * Use 64bit timespec on kernels that have it, this makes 32bit arch
2454 * y2038 compliant.
2455 */
2456 static
2457 int64_t measure_clock_offset(void)
2458 {
2459 uint64_t monotonic_avg, monotonic[2], realtime;
2460 uint64_t tcf = trace_clock_freq();
2461 int64_t offset;
2462 unsigned long flags;
2463 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
2464 struct timespec64 rts = { 0, 0 };
2465 #else
2466 struct timespec rts = { 0, 0 };
2467 #endif
2468
2469 /* Disable interrupts to increase correlation precision. */
2470 local_irq_save(flags);
2471 monotonic[0] = trace_clock_read64();
2472 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
2473 ktime_get_real_ts64(&rts);
2474 #else
2475 getnstimeofday(&rts);
2476 #endif
2477 monotonic[1] = trace_clock_read64();
2478 local_irq_restore(flags);
2479
2480 monotonic_avg = (monotonic[0] + monotonic[1]) >> 1;
2481 realtime = (uint64_t) rts.tv_sec * tcf;
2482 if (tcf == NSEC_PER_SEC) {
2483 realtime += rts.tv_nsec;
2484 } else {
2485 uint64_t n = rts.tv_nsec * tcf;
2486
2487 do_div(n, NSEC_PER_SEC);
2488 realtime += n;
2489 }
2490 offset = (int64_t) realtime - monotonic_avg;
2491 return offset;
2492 }
2493
2494 static
2495 int print_escaped_ctf_string(struct lttng_session *session, const char *string)
2496 {
2497 int ret;
2498 size_t i;
2499 char cur;
2500
2501 i = 0;
2502 cur = string[i];
2503 while (cur != '\0') {
2504 switch (cur) {
2505 case '\n':
2506 ret = lttng_metadata_printf(session, "%s", "\\n");
2507 break;
2508 case '\\':
2509 case '"':
2510 ret = lttng_metadata_printf(session, "%c", '\\');
2511 if (ret)
2512 goto error;
2513 /* We still print the current char */
2514 /* Fallthrough */
2515 default:
2516 ret = lttng_metadata_printf(session, "%c", cur);
2517 break;
2518 }
2519
2520 if (ret)
2521 goto error;
2522
2523 cur = string[++i];
2524 }
2525 error:
2526 return ret;
2527 }
2528
2529 static
2530 int print_metadata_escaped_field(struct lttng_session *session, const char *field,
2531 const char *field_value)
2532 {
2533 int ret;
2534
2535 ret = lttng_metadata_printf(session, " %s = \"", field);
2536 if (ret)
2537 goto error;
2538
2539 ret = print_escaped_ctf_string(session, field_value);
2540 if (ret)
2541 goto error;
2542
2543 ret = lttng_metadata_printf(session, "\";\n");
2544
2545 error:
2546 return ret;
2547 }
2548
2549 /*
2550 * Output metadata into this session's metadata buffers.
2551 * Must be called with sessions_mutex held.
2552 */
2553 static
2554 int _lttng_session_metadata_statedump(struct lttng_session *session)
2555 {
2556 unsigned char *uuid_c = session->uuid.b;
2557 unsigned char uuid_s[37], clock_uuid_s[BOOT_ID_LEN];
2558 struct lttng_channel *chan;
2559 struct lttng_event *event;
2560 int ret = 0;
2561
2562 if (!READ_ONCE(session->active))
2563 return 0;
2564 if (session->metadata_dumped)
2565 goto skip_session;
2566
2567 snprintf(uuid_s, sizeof(uuid_s),
2568 "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
2569 uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
2570 uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
2571 uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
2572 uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
2573
2574 ret = lttng_metadata_printf(session,
2575 "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
2576 "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
2577 "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
2578 "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
2579 "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
2580 "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
2581 "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
2582 "\n"
2583 "trace {\n"
2584 " major = %u;\n"
2585 " minor = %u;\n"
2586 " uuid = \"%s\";\n"
2587 " byte_order = %s;\n"
2588 " packet.header := struct {\n"
2589 " uint32_t magic;\n"
2590 " uint8_t uuid[16];\n"
2591 " uint32_t stream_id;\n"
2592 " uint64_t stream_instance_id;\n"
2593 " };\n"
2594 "};\n\n",
2595 lttng_alignof(uint8_t) * CHAR_BIT,
2596 lttng_alignof(uint16_t) * CHAR_BIT,
2597 lttng_alignof(uint32_t) * CHAR_BIT,
2598 lttng_alignof(uint64_t) * CHAR_BIT,
2599 sizeof(unsigned long) * CHAR_BIT,
2600 lttng_alignof(unsigned long) * CHAR_BIT,
2601 CTF_SPEC_MAJOR,
2602 CTF_SPEC_MINOR,
2603 uuid_s,
2604 #if __BYTE_ORDER == __BIG_ENDIAN
2605 "be"
2606 #else
2607 "le"
2608 #endif
2609 );
2610 if (ret)
2611 goto end;
2612
2613 ret = lttng_metadata_printf(session,
2614 "env {\n"
2615 " hostname = \"%s\";\n"
2616 " domain = \"kernel\";\n"
2617 " sysname = \"%s\";\n"
2618 " kernel_release = \"%s\";\n"
2619 " kernel_version = \"%s\";\n"
2620 " tracer_name = \"lttng-modules\";\n"
2621 " tracer_major = %d;\n"
2622 " tracer_minor = %d;\n"
2623 " tracer_patchlevel = %d;\n"
2624 " trace_buffering_scheme = \"global\";\n",
2625 current->nsproxy->uts_ns->name.nodename,
2626 utsname()->sysname,
2627 utsname()->release,
2628 utsname()->version,
2629 LTTNG_MODULES_MAJOR_VERSION,
2630 LTTNG_MODULES_MINOR_VERSION,
2631 LTTNG_MODULES_PATCHLEVEL_VERSION
2632 );
2633 if (ret)
2634 goto end;
2635
2636 ret = print_metadata_escaped_field(session, "trace_name", session->name);
2637 if (ret)
2638 goto end;
2639 ret = print_metadata_escaped_field(session, "trace_creation_datetime",
2640 session->creation_time);
2641 if (ret)
2642 goto end;
2643
2644 /* Close env */
2645 ret = lttng_metadata_printf(session, "};\n\n");
2646 if (ret)
2647 goto end;
2648
2649 ret = lttng_metadata_printf(session,
2650 "clock {\n"
2651 " name = \"%s\";\n",
2652 trace_clock_name()
2653 );
2654 if (ret)
2655 goto end;
2656
2657 if (!trace_clock_uuid(clock_uuid_s)) {
2658 ret = lttng_metadata_printf(session,
2659 " uuid = \"%s\";\n",
2660 clock_uuid_s
2661 );
2662 if (ret)
2663 goto end;
2664 }
2665
2666 ret = lttng_metadata_printf(session,
2667 " description = \"%s\";\n"
2668 " freq = %llu; /* Frequency, in Hz */\n"
2669 " /* clock value offset from Epoch is: offset * (1/freq) */\n"
2670 " offset = %lld;\n"
2671 "};\n\n",
2672 trace_clock_description(),
2673 (unsigned long long) trace_clock_freq(),
2674 (long long) measure_clock_offset()
2675 );
2676 if (ret)
2677 goto end;
2678
2679 ret = lttng_metadata_printf(session,
2680 "typealias integer {\n"
2681 " size = 27; align = 1; signed = false;\n"
2682 " map = clock.%s.value;\n"
2683 "} := uint27_clock_monotonic_t;\n"
2684 "\n"
2685 "typealias integer {\n"
2686 " size = 32; align = %u; signed = false;\n"
2687 " map = clock.%s.value;\n"
2688 "} := uint32_clock_monotonic_t;\n"
2689 "\n"
2690 "typealias integer {\n"
2691 " size = 64; align = %u; signed = false;\n"
2692 " map = clock.%s.value;\n"
2693 "} := uint64_clock_monotonic_t;\n\n",
2694 trace_clock_name(),
2695 lttng_alignof(uint32_t) * CHAR_BIT,
2696 trace_clock_name(),
2697 lttng_alignof(uint64_t) * CHAR_BIT,
2698 trace_clock_name()
2699 );
2700 if (ret)
2701 goto end;
2702
2703 ret = _lttng_stream_packet_context_declare(session);
2704 if (ret)
2705 goto end;
2706
2707 ret = _lttng_event_header_declare(session);
2708 if (ret)
2709 goto end;
2710
2711 skip_session:
2712 list_for_each_entry(chan, &session->chan, list) {
2713 ret = _lttng_channel_metadata_statedump(session, chan);
2714 if (ret)
2715 goto end;
2716 }
2717
2718 list_for_each_entry(event, &session->events, list) {
2719 ret = _lttng_event_metadata_statedump(session, event->chan, event);
2720 if (ret)
2721 goto end;
2722 }
2723 session->metadata_dumped = 1;
2724 end:
2725 return ret;
2726 }
2727
2728 /**
2729 * lttng_transport_register - LTT transport registration
2730 * @transport: transport structure
2731 *
2732 * Registers a transport which can be used as output to extract the data out of
2733 * LTTng. The module calling this registration function must ensure that no
2734 * trap-inducing code will be executed by the transport functions. E.g.
2735 * vmalloc_sync_all() must be called between a vmalloc and the moment the memory
2736 * is made visible to the transport function. This registration acts as a
2737 * vmalloc_sync_all. Therefore, only if the module allocates virtual memory
2738 * after its registration must it synchronize the TLBs.
2739 */
2740 void lttng_transport_register(struct lttng_transport *transport)
2741 {
2742 /*
2743 * Make sure no page fault can be triggered by the module about to be
2744 * registered. We deal with this here so we don't have to call
2745 * vmalloc_sync_all() in each module's init.
2746 */
2747 wrapper_vmalloc_sync_all();
2748
2749 mutex_lock(&sessions_mutex);
2750 list_add_tail(&transport->node, &lttng_transport_list);
2751 mutex_unlock(&sessions_mutex);
2752 }
2753 EXPORT_SYMBOL_GPL(lttng_transport_register);
2754
2755 /**
2756 * lttng_transport_unregister - LTT transport unregistration
2757 * @transport: transport structure
2758 */
2759 void lttng_transport_unregister(struct lttng_transport *transport)
2760 {
2761 mutex_lock(&sessions_mutex);
2762 list_del(&transport->node);
2763 mutex_unlock(&sessions_mutex);
2764 }
2765 EXPORT_SYMBOL_GPL(lttng_transport_unregister);
2766
2767 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
2768
2769 enum cpuhp_state lttng_hp_prepare;
2770 enum cpuhp_state lttng_hp_online;
2771
2772 static int lttng_hotplug_prepare(unsigned int cpu, struct hlist_node *node)
2773 {
2774 struct lttng_cpuhp_node *lttng_node;
2775
2776 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
2777 switch (lttng_node->component) {
2778 case LTTNG_RING_BUFFER_FRONTEND:
2779 return 0;
2780 case LTTNG_RING_BUFFER_BACKEND:
2781 return lttng_cpuhp_rb_backend_prepare(cpu, lttng_node);
2782 case LTTNG_RING_BUFFER_ITER:
2783 return 0;
2784 case LTTNG_CONTEXT_PERF_COUNTERS:
2785 return 0;
2786 default:
2787 return -EINVAL;
2788 }
2789 }
2790
2791 static int lttng_hotplug_dead(unsigned int cpu, struct hlist_node *node)
2792 {
2793 struct lttng_cpuhp_node *lttng_node;
2794
2795 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
2796 switch (lttng_node->component) {
2797 case LTTNG_RING_BUFFER_FRONTEND:
2798 return lttng_cpuhp_rb_frontend_dead(cpu, lttng_node);
2799 case LTTNG_RING_BUFFER_BACKEND:
2800 return 0;
2801 case LTTNG_RING_BUFFER_ITER:
2802 return 0;
2803 case LTTNG_CONTEXT_PERF_COUNTERS:
2804 return lttng_cpuhp_perf_counter_dead(cpu, lttng_node);
2805 default:
2806 return -EINVAL;
2807 }
2808 }
2809
2810 static int lttng_hotplug_online(unsigned int cpu, struct hlist_node *node)
2811 {
2812 struct lttng_cpuhp_node *lttng_node;
2813
2814 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
2815 switch (lttng_node->component) {
2816 case LTTNG_RING_BUFFER_FRONTEND:
2817 return lttng_cpuhp_rb_frontend_online(cpu, lttng_node);
2818 case LTTNG_RING_BUFFER_BACKEND:
2819 return 0;
2820 case LTTNG_RING_BUFFER_ITER:
2821 return lttng_cpuhp_rb_iter_online(cpu, lttng_node);
2822 case LTTNG_CONTEXT_PERF_COUNTERS:
2823 return lttng_cpuhp_perf_counter_online(cpu, lttng_node);
2824 default:
2825 return -EINVAL;
2826 }
2827 }
2828
2829 static int lttng_hotplug_offline(unsigned int cpu, struct hlist_node *node)
2830 {
2831 struct lttng_cpuhp_node *lttng_node;
2832
2833 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
2834 switch (lttng_node->component) {
2835 case LTTNG_RING_BUFFER_FRONTEND:
2836 return lttng_cpuhp_rb_frontend_offline(cpu, lttng_node);
2837 case LTTNG_RING_BUFFER_BACKEND:
2838 return 0;
2839 case LTTNG_RING_BUFFER_ITER:
2840 return 0;
2841 case LTTNG_CONTEXT_PERF_COUNTERS:
2842 return 0;
2843 default:
2844 return -EINVAL;
2845 }
2846 }
2847
2848 static int __init lttng_init_cpu_hotplug(void)
2849 {
2850 int ret;
2851
2852 ret = cpuhp_setup_state_multi(CPUHP_BP_PREPARE_DYN, "lttng:prepare",
2853 lttng_hotplug_prepare,
2854 lttng_hotplug_dead);
2855 if (ret < 0) {
2856 return ret;
2857 }
2858 lttng_hp_prepare = ret;
2859 lttng_rb_set_hp_prepare(ret);
2860
2861 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "lttng:online",
2862 lttng_hotplug_online,
2863 lttng_hotplug_offline);
2864 if (ret < 0) {
2865 cpuhp_remove_multi_state(lttng_hp_prepare);
2866 lttng_hp_prepare = 0;
2867 return ret;
2868 }
2869 lttng_hp_online = ret;
2870 lttng_rb_set_hp_online(ret);
2871
2872 return 0;
2873 }
2874
2875 static void __exit lttng_exit_cpu_hotplug(void)
2876 {
2877 lttng_rb_set_hp_online(0);
2878 cpuhp_remove_multi_state(lttng_hp_online);
2879 lttng_rb_set_hp_prepare(0);
2880 cpuhp_remove_multi_state(lttng_hp_prepare);
2881 }
2882
2883 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
2884 static int lttng_init_cpu_hotplug(void)
2885 {
2886 return 0;
2887 }
2888 static void lttng_exit_cpu_hotplug(void)
2889 {
2890 }
2891 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
2892
2893
2894 static int __init lttng_events_init(void)
2895 {
2896 int ret;
2897
2898 ret = wrapper_lttng_fixup_sig(THIS_MODULE);
2899 if (ret)
2900 return ret;
2901 ret = wrapper_get_pfnblock_flags_mask_init();
2902 if (ret)
2903 return ret;
2904 ret = wrapper_get_pageblock_flags_mask_init();
2905 if (ret)
2906 return ret;
2907 ret = lttng_probes_init();
2908 if (ret)
2909 return ret;
2910 ret = lttng_context_init();
2911 if (ret)
2912 return ret;
2913 ret = lttng_tracepoint_init();
2914 if (ret)
2915 goto error_tp;
2916 event_cache = KMEM_CACHE(lttng_event, 0);
2917 if (!event_cache) {
2918 ret = -ENOMEM;
2919 goto error_kmem;
2920 }
2921 ret = lttng_abi_init();
2922 if (ret)
2923 goto error_abi;
2924 ret = lttng_logger_init();
2925 if (ret)
2926 goto error_logger;
2927 ret = lttng_init_cpu_hotplug();
2928 if (ret)
2929 goto error_hotplug;
2930 printk(KERN_NOTICE "LTTng: Loaded modules v%s.%s.%s%s (%s)%s%s\n",
2931 __stringify(LTTNG_MODULES_MAJOR_VERSION),
2932 __stringify(LTTNG_MODULES_MINOR_VERSION),
2933 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
2934 LTTNG_MODULES_EXTRAVERSION,
2935 LTTNG_VERSION_NAME,
2936 #ifdef LTTNG_EXTRA_VERSION_GIT
2937 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
2938 #else
2939 "",
2940 #endif
2941 #ifdef LTTNG_EXTRA_VERSION_NAME
2942 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
2943 #else
2944 "");
2945 #endif
2946 return 0;
2947
2948 error_hotplug:
2949 lttng_logger_exit();
2950 error_logger:
2951 lttng_abi_exit();
2952 error_abi:
2953 kmem_cache_destroy(event_cache);
2954 error_kmem:
2955 lttng_tracepoint_exit();
2956 error_tp:
2957 lttng_context_exit();
2958 printk(KERN_NOTICE "LTTng: Failed to load modules v%s.%s.%s%s (%s)%s%s\n",
2959 __stringify(LTTNG_MODULES_MAJOR_VERSION),
2960 __stringify(LTTNG_MODULES_MINOR_VERSION),
2961 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
2962 LTTNG_MODULES_EXTRAVERSION,
2963 LTTNG_VERSION_NAME,
2964 #ifdef LTTNG_EXTRA_VERSION_GIT
2965 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
2966 #else
2967 "",
2968 #endif
2969 #ifdef LTTNG_EXTRA_VERSION_NAME
2970 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
2971 #else
2972 "");
2973 #endif
2974 return ret;
2975 }
2976
2977 module_init(lttng_events_init);
2978
2979 static void __exit lttng_events_exit(void)
2980 {
2981 struct lttng_session *session, *tmpsession;
2982
2983 lttng_exit_cpu_hotplug();
2984 lttng_logger_exit();
2985 lttng_abi_exit();
2986 list_for_each_entry_safe(session, tmpsession, &sessions, list)
2987 lttng_session_destroy(session);
2988 kmem_cache_destroy(event_cache);
2989 lttng_tracepoint_exit();
2990 lttng_context_exit();
2991 printk(KERN_NOTICE "LTTng: Unloaded modules v%s.%s.%s%s (%s)%s%s\n",
2992 __stringify(LTTNG_MODULES_MAJOR_VERSION),
2993 __stringify(LTTNG_MODULES_MINOR_VERSION),
2994 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
2995 LTTNG_MODULES_EXTRAVERSION,
2996 LTTNG_VERSION_NAME,
2997 #ifdef LTTNG_EXTRA_VERSION_GIT
2998 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
2999 #else
3000 "",
3001 #endif
3002 #ifdef LTTNG_EXTRA_VERSION_NAME
3003 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
3004 #else
3005 "");
3006 #endif
3007 }
3008
3009 module_exit(lttng_events_exit);
3010
3011 #include "extra_version/patches.i"
3012 #ifdef LTTNG_EXTRA_VERSION_GIT
3013 MODULE_INFO(extra_version_git, LTTNG_EXTRA_VERSION_GIT);
3014 #endif
3015 #ifdef LTTNG_EXTRA_VERSION_NAME
3016 MODULE_INFO(extra_version_name, LTTNG_EXTRA_VERSION_NAME);
3017 #endif
3018 MODULE_LICENSE("GPL and additional rights");
3019 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
3020 MODULE_DESCRIPTION("LTTng tracer");
3021 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
3022 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
3023 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
3024 LTTNG_MODULES_EXTRAVERSION);
This page took 0.173187 seconds and 4 git commands to generate.