1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
5 * Holds LTTng per-session event registry.
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
11 * This page_alloc.h wrapper needs to be included before gfpflags.h because it
12 * overrides a function with a define.
14 #include "wrapper/page_alloc.h"
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/jiffies.h>
21 #include <linux/utsname.h>
22 #include <linux/err.h>
23 #include <linux/seq_file.h>
24 #include <linux/file.h>
25 #include <linux/anon_inodes.h>
26 #include <wrapper/file.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
29 #include <linux/dmi.h>
31 #include <wrapper/compiler_attributes.h>
32 #include <wrapper/uuid.h>
33 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
34 #include <wrapper/random.h>
35 #include <wrapper/tracepoint.h>
36 #include <wrapper/list.h>
37 #include <wrapper/types.h>
38 #include <lttng/kernel-version.h>
39 #include <lttng/events.h>
40 #include <lttng/events-internal.h>
41 #include <lttng/lttng-bytecode.h>
42 #include <lttng/tracer.h>
43 #include <lttng/event-notifier-notification.h>
44 #include <lttng/abi-old.h>
45 #include <lttng/endian.h>
46 #include <lttng/string-utils.h>
47 #include <lttng/utils.h>
48 #include <ringbuffer/backend.h>
49 #include <ringbuffer/frontend.h>
50 #include <wrapper/time.h>
52 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,16,0))
53 #include <linux/stdarg.h>
58 #define METADATA_CACHE_DEFAULT_SIZE 4096
60 static LIST_HEAD(sessions
);
61 static LIST_HEAD(event_notifier_groups
);
62 static LIST_HEAD(lttng_transport_list
);
63 static LIST_HEAD(lttng_counter_transport_list
);
65 * Protect the sessions and metadata caches.
67 static DEFINE_MUTEX(sessions_mutex
);
68 static struct kmem_cache
*event_recorder_cache
;
69 static struct kmem_cache
*event_recorder_private_cache
;
70 static struct kmem_cache
*event_notifier_cache
;
71 static struct kmem_cache
*event_notifier_private_cache
;
73 static void lttng_session_lazy_sync_event_enablers(struct lttng_kernel_session
*session
);
74 static void lttng_session_sync_event_enablers(struct lttng_kernel_session
*session
);
75 static void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group
*event_notifier_group
);
76 static void lttng_event_enabler_sync(struct lttng_event_enabler_common
*event_enabler
);
78 static void _lttng_event_destroy(struct lttng_kernel_event_common
*event
);
79 static void _lttng_channel_destroy(struct lttng_kernel_channel_buffer
*chan
);
80 static void _lttng_event_unregister(struct lttng_kernel_event_common
*event
);
82 int _lttng_event_recorder_metadata_statedump(struct lttng_kernel_event_common
*event
);
84 int _lttng_session_metadata_statedump(struct lttng_kernel_session
*session
);
86 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream
*stream
);
88 int _lttng_type_statedump(struct lttng_kernel_session
*session
,
89 const struct lttng_kernel_type_common
*type
,
90 enum lttng_kernel_string_encoding parent_encoding
,
93 int _lttng_field_statedump(struct lttng_kernel_session
*session
,
94 const struct lttng_kernel_event_field
*field
,
95 size_t nesting
, const char **prev_field_name_p
);
97 void synchronize_trace(void)
99 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,1,0) || \
100 LTTNG_RHEL_KERNEL_RANGE(4,18,0,193,0,0, 4,19,0,0,0,0))
106 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0))
107 #ifdef CONFIG_PREEMPT_RT_FULL
110 #else /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0)) */
111 #ifdef CONFIG_PREEMPT_RT
114 #endif /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0)) */
117 void lttng_lock_sessions(void)
119 mutex_lock(&sessions_mutex
);
122 void lttng_unlock_sessions(void)
124 mutex_unlock(&sessions_mutex
);
127 static struct lttng_transport
*lttng_transport_find(const char *name
)
129 struct lttng_transport
*transport
;
131 list_for_each_entry(transport
, <tng_transport_list
, node
) {
132 if (!strcmp(transport
->name
, name
))
139 * Called with sessions lock held.
141 int lttng_session_active(void)
143 struct lttng_kernel_session_private
*iter
;
145 list_for_each_entry(iter
, &sessions
, list
) {
146 if (iter
->pub
->active
)
152 struct lttng_kernel_session
*lttng_session_create(void)
154 struct lttng_kernel_session
*session
;
155 struct lttng_kernel_session_private
*session_priv
;
156 struct lttng_metadata_cache
*metadata_cache
;
159 mutex_lock(&sessions_mutex
);
160 session
= lttng_kvzalloc(sizeof(*session
), GFP_KERNEL
);
163 session_priv
= lttng_kvzalloc(sizeof(*session_priv
), GFP_KERNEL
);
165 goto err_free_session
;
166 session
->priv
= session_priv
;
167 session_priv
->pub
= session
;
169 INIT_LIST_HEAD(&session_priv
->chan
);
170 INIT_LIST_HEAD(&session_priv
->events
);
171 lttng_guid_gen(&session_priv
->uuid
);
173 metadata_cache
= kzalloc(sizeof(struct lttng_metadata_cache
),
176 goto err_free_session_private
;
177 metadata_cache
->data
= vzalloc(METADATA_CACHE_DEFAULT_SIZE
);
178 if (!metadata_cache
->data
)
180 metadata_cache
->cache_alloc
= METADATA_CACHE_DEFAULT_SIZE
;
181 kref_init(&metadata_cache
->refcount
);
182 mutex_init(&metadata_cache
->lock
);
183 session_priv
->metadata_cache
= metadata_cache
;
184 INIT_LIST_HEAD(&metadata_cache
->metadata_stream
);
185 memcpy(&metadata_cache
->uuid
, &session_priv
->uuid
,
186 sizeof(metadata_cache
->uuid
));
187 INIT_LIST_HEAD(&session_priv
->enablers_head
);
188 for (i
= 0; i
< LTTNG_EVENT_HT_SIZE
; i
++)
189 INIT_HLIST_HEAD(&session_priv
->events_ht
.table
[i
]);
190 list_add(&session_priv
->list
, &sessions
);
192 if (lttng_id_tracker_init(&session
->pid_tracker
, session
, TRACKER_PID
))
193 goto tracker_alloc_error
;
194 if (lttng_id_tracker_init(&session
->vpid_tracker
, session
, TRACKER_VPID
))
195 goto tracker_alloc_error
;
196 if (lttng_id_tracker_init(&session
->uid_tracker
, session
, TRACKER_UID
))
197 goto tracker_alloc_error
;
198 if (lttng_id_tracker_init(&session
->vuid_tracker
, session
, TRACKER_VUID
))
199 goto tracker_alloc_error
;
200 if (lttng_id_tracker_init(&session
->gid_tracker
, session
, TRACKER_GID
))
201 goto tracker_alloc_error
;
202 if (lttng_id_tracker_init(&session
->vgid_tracker
, session
, TRACKER_VGID
))
203 goto tracker_alloc_error
;
205 mutex_unlock(&sessions_mutex
);
210 lttng_id_tracker_fini(&session
->pid_tracker
);
211 lttng_id_tracker_fini(&session
->vpid_tracker
);
212 lttng_id_tracker_fini(&session
->uid_tracker
);
213 lttng_id_tracker_fini(&session
->vuid_tracker
);
214 lttng_id_tracker_fini(&session
->gid_tracker
);
215 lttng_id_tracker_fini(&session
->vgid_tracker
);
217 kfree(metadata_cache
);
218 err_free_session_private
:
219 lttng_kvfree(session_priv
);
221 lttng_kvfree(session
);
223 mutex_unlock(&sessions_mutex
);
228 struct lttng_counter_transport
*lttng_counter_transport_find(const char *name
)
230 struct lttng_counter_transport
*transport
;
232 list_for_each_entry(transport
, <tng_counter_transport_list
, node
) {
233 if (!strcmp(transport
->name
, name
))
239 struct lttng_counter
*lttng_kernel_counter_create(
240 const char *counter_transport_name
,
241 size_t number_dimensions
, const size_t *dimensions_sizes
)
243 struct lttng_counter
*counter
= NULL
;
244 struct lttng_counter_transport
*counter_transport
= NULL
;
246 counter_transport
= lttng_counter_transport_find(counter_transport_name
);
247 if (!counter_transport
) {
248 printk(KERN_WARNING
"LTTng: counter transport %s not found.\n",
249 counter_transport_name
);
252 if (!try_module_get(counter_transport
->owner
)) {
253 printk(KERN_WARNING
"LTTng: Can't lock counter transport module.\n");
257 counter
= lttng_kvzalloc(sizeof(struct lttng_counter
), GFP_KERNEL
);
261 /* Create event notifier error counter. */
262 counter
->ops
= &counter_transport
->ops
;
263 counter
->transport
= counter_transport
;
265 counter
->counter
= counter
->ops
->counter_create(
266 number_dimensions
, dimensions_sizes
, 0);
267 if (!counter
->counter
) {
274 lttng_kvfree(counter
);
276 if (counter_transport
)
277 module_put(counter_transport
->owner
);
282 struct lttng_event_notifier_group
*lttng_event_notifier_group_create(void)
284 struct lttng_transport
*transport
= NULL
;
285 struct lttng_event_notifier_group
*event_notifier_group
;
286 const char *transport_name
= "relay-event-notifier";
287 size_t subbuf_size
= 4096; //TODO
288 size_t num_subbuf
= 16; //TODO
289 unsigned int switch_timer_interval
= 0;
290 unsigned int read_timer_interval
= 0;
293 mutex_lock(&sessions_mutex
);
295 transport
= lttng_transport_find(transport_name
);
297 printk(KERN_WARNING
"LTTng: transport %s not found\n",
301 if (!try_module_get(transport
->owner
)) {
302 printk(KERN_WARNING
"LTTng: Can't lock transport %s module.\n",
307 event_notifier_group
= lttng_kvzalloc(sizeof(struct lttng_event_notifier_group
),
309 if (!event_notifier_group
)
313 * Initialize the ring buffer used to store event notifier
316 event_notifier_group
->ops
= &transport
->ops
;
317 event_notifier_group
->chan
= transport
->ops
.priv
->channel_create(
318 transport_name
, event_notifier_group
, NULL
,
319 subbuf_size
, num_subbuf
, switch_timer_interval
,
320 read_timer_interval
);
321 if (!event_notifier_group
->chan
)
324 event_notifier_group
->transport
= transport
;
326 INIT_LIST_HEAD(&event_notifier_group
->enablers_head
);
327 INIT_LIST_HEAD(&event_notifier_group
->event_notifiers_head
);
328 for (i
= 0; i
< LTTNG_EVENT_HT_SIZE
; i
++)
329 INIT_HLIST_HEAD(&event_notifier_group
->events_ht
.table
[i
]);
331 list_add(&event_notifier_group
->node
, &event_notifier_groups
);
333 mutex_unlock(&sessions_mutex
);
335 return event_notifier_group
;
338 lttng_kvfree(event_notifier_group
);
341 module_put(transport
->owner
);
343 mutex_unlock(&sessions_mutex
);
347 void metadata_cache_destroy(struct kref
*kref
)
349 struct lttng_metadata_cache
*cache
=
350 container_of(kref
, struct lttng_metadata_cache
, refcount
);
355 void lttng_session_destroy(struct lttng_kernel_session
*session
)
357 struct lttng_kernel_channel_buffer_private
*chan_priv
, *tmpchan_priv
;
358 struct lttng_kernel_event_recorder_private
*event_recorder_priv
, *tmpevent_recorder_priv
;
359 struct lttng_metadata_stream
*metadata_stream
;
360 struct lttng_event_enabler_common
*event_enabler
, *tmp_event_enabler
;
363 mutex_lock(&sessions_mutex
);
364 WRITE_ONCE(session
->active
, 0);
365 list_for_each_entry(chan_priv
, &session
->priv
->chan
, node
) {
366 ret
= lttng_syscalls_unregister_syscall_table(&chan_priv
->parent
.syscall_table
);
369 list_for_each_entry(event_recorder_priv
, &session
->priv
->events
, parent
.node
)
370 _lttng_event_unregister(&event_recorder_priv
->pub
->parent
);
371 synchronize_trace(); /* Wait for in-flight events to complete */
372 list_for_each_entry(chan_priv
, &session
->priv
->chan
, node
) {
373 ret
= lttng_syscalls_destroy_syscall_table(&chan_priv
->parent
.syscall_table
);
376 list_for_each_entry_safe(event_enabler
, tmp_event_enabler
, &session
->priv
->enablers_head
, node
)
377 lttng_event_enabler_destroy(event_enabler
);
378 list_for_each_entry_safe(event_recorder_priv
, tmpevent_recorder_priv
, &session
->priv
->events
, parent
.node
)
379 _lttng_event_destroy(&event_recorder_priv
->pub
->parent
);
380 list_for_each_entry_safe(chan_priv
, tmpchan_priv
, &session
->priv
->chan
, node
) {
381 BUG_ON(chan_priv
->channel_type
== METADATA_CHANNEL
);
382 _lttng_channel_destroy(chan_priv
->pub
);
384 mutex_lock(&session
->priv
->metadata_cache
->lock
);
385 list_for_each_entry(metadata_stream
, &session
->priv
->metadata_cache
->metadata_stream
, list
)
386 _lttng_metadata_channel_hangup(metadata_stream
);
387 mutex_unlock(&session
->priv
->metadata_cache
->lock
);
388 lttng_id_tracker_fini(&session
->pid_tracker
);
389 lttng_id_tracker_fini(&session
->vpid_tracker
);
390 lttng_id_tracker_fini(&session
->uid_tracker
);
391 lttng_id_tracker_fini(&session
->vuid_tracker
);
392 lttng_id_tracker_fini(&session
->gid_tracker
);
393 lttng_id_tracker_fini(&session
->vgid_tracker
);
394 kref_put(&session
->priv
->metadata_cache
->refcount
, metadata_cache_destroy
);
395 list_del(&session
->priv
->list
);
396 mutex_unlock(&sessions_mutex
);
397 lttng_kvfree(session
->priv
);
398 lttng_kvfree(session
);
401 void lttng_event_notifier_group_destroy(
402 struct lttng_event_notifier_group
*event_notifier_group
)
404 struct lttng_event_enabler_common
*event_enabler
, *tmp_event_enabler
;
405 struct lttng_kernel_event_notifier_private
*event_notifier_priv
, *tmpevent_notifier_priv
;
408 if (!event_notifier_group
)
411 mutex_lock(&sessions_mutex
);
413 ret
= lttng_syscalls_unregister_syscall_table(&event_notifier_group
->syscall_table
);
416 list_for_each_entry_safe(event_notifier_priv
, tmpevent_notifier_priv
,
417 &event_notifier_group
->event_notifiers_head
, parent
.node
)
418 _lttng_event_unregister(&event_notifier_priv
->pub
->parent
);
420 /* Wait for in-flight event notifier to complete */
423 irq_work_sync(&event_notifier_group
->wakeup_pending
);
425 ret
= lttng_syscalls_destroy_syscall_table(&event_notifier_group
->syscall_table
);
428 list_for_each_entry_safe(event_enabler
, tmp_event_enabler
,
429 &event_notifier_group
->enablers_head
, node
)
430 lttng_event_enabler_destroy(event_enabler
);
432 list_for_each_entry_safe(event_notifier_priv
, tmpevent_notifier_priv
,
433 &event_notifier_group
->event_notifiers_head
, parent
.node
)
434 _lttng_event_destroy(&event_notifier_priv
->pub
->parent
);
436 if (event_notifier_group
->error_counter
) {
437 struct lttng_counter
*error_counter
= event_notifier_group
->error_counter
;
439 error_counter
->ops
->counter_destroy(error_counter
->counter
);
440 module_put(error_counter
->transport
->owner
);
441 lttng_kvfree(error_counter
);
442 event_notifier_group
->error_counter
= NULL
;
445 event_notifier_group
->ops
->priv
->channel_destroy(event_notifier_group
->chan
);
446 module_put(event_notifier_group
->transport
->owner
);
447 list_del(&event_notifier_group
->node
);
449 mutex_unlock(&sessions_mutex
);
450 lttng_kvfree(event_notifier_group
);
453 int lttng_session_statedump(struct lttng_kernel_session
*session
)
457 mutex_lock(&sessions_mutex
);
458 ret
= lttng_statedump_start(session
);
459 mutex_unlock(&sessions_mutex
);
463 int lttng_session_enable(struct lttng_kernel_session
*session
)
466 struct lttng_kernel_channel_buffer_private
*chan_priv
;
468 mutex_lock(&sessions_mutex
);
469 if (session
->active
) {
474 /* Set transient enabler state to "enabled" */
475 session
->priv
->tstate
= 1;
477 /* We need to sync enablers with session before activation. */
478 lttng_session_sync_event_enablers(session
);
481 * Snapshot the number of events per channel to know the type of header
484 list_for_each_entry(chan_priv
, &session
->priv
->chan
, node
) {
485 if (chan_priv
->header_type
)
486 continue; /* don't change it if session stop/restart */
487 if (chan_priv
->free_event_id
< 31)
488 chan_priv
->header_type
= 1; /* compact */
490 chan_priv
->header_type
= 2; /* large */
493 /* Clear each stream's quiescent state. */
494 list_for_each_entry(chan_priv
, &session
->priv
->chan
, node
) {
495 if (chan_priv
->channel_type
!= METADATA_CHANNEL
)
496 lib_ring_buffer_clear_quiescent_channel(chan_priv
->rb_chan
);
499 WRITE_ONCE(session
->active
, 1);
500 WRITE_ONCE(session
->priv
->been_active
, 1);
501 ret
= _lttng_session_metadata_statedump(session
);
503 WRITE_ONCE(session
->active
, 0);
506 ret
= lttng_statedump_start(session
);
508 WRITE_ONCE(session
->active
, 0);
510 mutex_unlock(&sessions_mutex
);
514 int lttng_session_disable(struct lttng_kernel_session
*session
)
517 struct lttng_kernel_channel_buffer_private
*chan_priv
;
519 mutex_lock(&sessions_mutex
);
520 if (!session
->active
) {
524 WRITE_ONCE(session
->active
, 0);
526 /* Set transient enabler state to "disabled" */
527 session
->priv
->tstate
= 0;
528 lttng_session_sync_event_enablers(session
);
530 /* Set each stream's quiescent state. */
531 list_for_each_entry(chan_priv
, &session
->priv
->chan
, node
) {
532 if (chan_priv
->channel_type
!= METADATA_CHANNEL
)
533 lib_ring_buffer_set_quiescent_channel(chan_priv
->rb_chan
);
536 mutex_unlock(&sessions_mutex
);
540 int lttng_session_metadata_regenerate(struct lttng_kernel_session
*session
)
543 struct lttng_kernel_channel_buffer_private
*chan_priv
;
544 struct lttng_kernel_event_recorder_private
*event_recorder_priv
;
545 struct lttng_metadata_cache
*cache
= session
->priv
->metadata_cache
;
546 struct lttng_metadata_stream
*stream
;
548 mutex_lock(&sessions_mutex
);
549 if (!session
->active
) {
554 mutex_lock(&cache
->lock
);
555 memset(cache
->data
, 0, cache
->cache_alloc
);
556 cache
->metadata_written
= 0;
558 list_for_each_entry(stream
, &session
->priv
->metadata_cache
->metadata_stream
, list
) {
559 stream
->metadata_out
= 0;
560 stream
->metadata_in
= 0;
562 mutex_unlock(&cache
->lock
);
564 session
->priv
->metadata_dumped
= 0;
565 list_for_each_entry(chan_priv
, &session
->priv
->chan
, node
) {
566 chan_priv
->metadata_dumped
= 0;
569 list_for_each_entry(event_recorder_priv
, &session
->priv
->events
, parent
.node
) {
570 event_recorder_priv
->metadata_dumped
= 0;
573 ret
= _lttng_session_metadata_statedump(session
);
576 mutex_unlock(&sessions_mutex
);
581 bool is_channel_buffer_metadata(struct lttng_kernel_channel_common
*channel
)
583 struct lttng_kernel_channel_buffer
*chan_buf
;
585 if (channel
->type
!= LTTNG_KERNEL_CHANNEL_TYPE_BUFFER
)
587 chan_buf
= container_of(channel
, struct lttng_kernel_channel_buffer
, parent
);
588 if (chan_buf
->priv
->channel_type
== METADATA_CHANNEL
)
593 int lttng_channel_enable(struct lttng_kernel_channel_common
*channel
)
597 mutex_lock(&sessions_mutex
);
598 if (is_channel_buffer_metadata(channel
)) {
602 if (channel
->enabled
) {
606 /* Set transient enabler state to "enabled" */
607 channel
->priv
->tstate
= 1;
608 lttng_session_sync_event_enablers(channel
->session
);
609 /* Set atomically the state to "enabled" */
610 WRITE_ONCE(channel
->enabled
, 1);
612 mutex_unlock(&sessions_mutex
);
616 int lttng_channel_disable(struct lttng_kernel_channel_common
*channel
)
620 mutex_lock(&sessions_mutex
);
621 if (is_channel_buffer_metadata(channel
)) {
625 if (!channel
->enabled
) {
629 /* Set atomically the state to "disabled" */
630 WRITE_ONCE(channel
->enabled
, 0);
631 /* Set transient enabler state to "enabled" */
632 channel
->priv
->tstate
= 0;
633 lttng_session_sync_event_enablers(channel
->session
);
635 mutex_unlock(&sessions_mutex
);
639 int lttng_event_enable(struct lttng_kernel_event_common
*event
)
643 mutex_lock(&sessions_mutex
);
644 switch (event
->type
) {
645 case LTTNG_KERNEL_EVENT_TYPE_RECORDER
:
647 struct lttng_kernel_event_recorder
*event_recorder
=
648 container_of(event
, struct lttng_kernel_event_recorder
, parent
);
650 if (event_recorder
->chan
->priv
->channel_type
== METADATA_CHANNEL
) {
656 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER
:
657 switch (event
->priv
->instrumentation
) {
658 case LTTNG_KERNEL_ABI_KRETPROBE
:
669 if (event
->enabled
) {
673 switch (event
->priv
->instrumentation
) {
674 case LTTNG_KERNEL_ABI_TRACEPOINT
:
676 case LTTNG_KERNEL_ABI_SYSCALL
:
680 case LTTNG_KERNEL_ABI_KPROBE
:
682 case LTTNG_KERNEL_ABI_UPROBE
:
683 WRITE_ONCE(event
->enabled
, 1);
686 case LTTNG_KERNEL_ABI_KRETPROBE
:
687 ret
= lttng_kretprobes_event_enable_state(event
, 1);
690 case LTTNG_KERNEL_ABI_FUNCTION
:
692 case LTTNG_KERNEL_ABI_NOOP
:
699 mutex_unlock(&sessions_mutex
);
703 int lttng_event_disable(struct lttng_kernel_event_common
*event
)
707 mutex_lock(&sessions_mutex
);
708 switch (event
->type
) {
709 case LTTNG_KERNEL_EVENT_TYPE_RECORDER
:
711 struct lttng_kernel_event_recorder
*event_recorder
=
712 container_of(event
, struct lttng_kernel_event_recorder
, parent
);
714 if (event_recorder
->chan
->priv
->channel_type
== METADATA_CHANNEL
) {
720 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER
:
721 switch (event
->priv
->instrumentation
) {
722 case LTTNG_KERNEL_ABI_KRETPROBE
:
733 if (!event
->enabled
) {
737 switch (event
->priv
->instrumentation
) {
738 case LTTNG_KERNEL_ABI_TRACEPOINT
:
740 case LTTNG_KERNEL_ABI_SYSCALL
:
744 case LTTNG_KERNEL_ABI_KPROBE
:
746 case LTTNG_KERNEL_ABI_UPROBE
:
747 WRITE_ONCE(event
->enabled
, 0);
750 case LTTNG_KERNEL_ABI_KRETPROBE
:
751 ret
= lttng_kretprobes_event_enable_state(event
, 0);
754 case LTTNG_KERNEL_ABI_FUNCTION
:
756 case LTTNG_KERNEL_ABI_NOOP
:
763 mutex_unlock(&sessions_mutex
);
767 struct lttng_kernel_channel_buffer
*lttng_channel_buffer_create(struct lttng_kernel_session
*session
,
768 const char *transport_name
,
770 size_t subbuf_size
, size_t num_subbuf
,
771 unsigned int switch_timer_interval
,
772 unsigned int read_timer_interval
,
773 enum channel_type channel_type
)
775 struct lttng_kernel_channel_buffer
*chan
;
776 struct lttng_kernel_channel_buffer_private
*chan_priv
;
777 struct lttng_transport
*transport
= NULL
;
779 mutex_lock(&sessions_mutex
);
780 if (session
->priv
->been_active
&& channel_type
!= METADATA_CHANNEL
)
781 goto active
; /* Refuse to add channel to active session */
782 transport
= lttng_transport_find(transport_name
);
784 printk(KERN_WARNING
"LTTng: transport %s not found\n",
788 if (!try_module_get(transport
->owner
)) {
789 printk(KERN_WARNING
"LTTng: Can't lock transport module.\n");
792 chan
= kzalloc(sizeof(struct lttng_kernel_channel_buffer
), GFP_KERNEL
);
795 chan_priv
= kzalloc(sizeof(struct lttng_kernel_channel_buffer_private
), GFP_KERNEL
);
798 chan
->priv
= chan_priv
;
799 chan_priv
->pub
= chan
;
800 chan
->parent
.type
= LTTNG_KERNEL_CHANNEL_TYPE_BUFFER
;
801 chan
->parent
.session
= session
;
802 chan
->priv
->id
= session
->priv
->free_chan_id
++;
803 chan
->ops
= &transport
->ops
;
805 * Note: the channel creation op already writes into the packet
806 * headers. Therefore the "chan" information used as input
807 * should be already accessible.
809 chan
->priv
->rb_chan
= transport
->ops
.priv
->channel_create(transport_name
,
810 chan
, buf_addr
, subbuf_size
, num_subbuf
,
811 switch_timer_interval
, read_timer_interval
);
812 if (!chan
->priv
->rb_chan
)
814 chan
->priv
->parent
.tstate
= 1;
815 chan
->parent
.enabled
= 1;
816 chan
->priv
->transport
= transport
;
817 chan
->priv
->channel_type
= channel_type
;
818 list_add(&chan
->priv
->node
, &session
->priv
->chan
);
819 mutex_unlock(&sessions_mutex
);
828 module_put(transport
->owner
);
831 mutex_unlock(&sessions_mutex
);
836 * Only used internally at session destruction for per-cpu channels, and
837 * when metadata channel is released.
838 * Needs to be called with sessions mutex held.
841 void _lttng_channel_destroy(struct lttng_kernel_channel_buffer
*chan
)
843 chan
->ops
->priv
->channel_destroy(chan
->priv
->rb_chan
);
844 module_put(chan
->priv
->transport
->owner
);
845 list_del(&chan
->priv
->node
);
846 lttng_kernel_destroy_context(chan
->priv
->ctx
);
851 void lttng_metadata_channel_destroy(struct lttng_kernel_channel_buffer
*chan
)
853 BUG_ON(chan
->priv
->channel_type
!= METADATA_CHANNEL
);
855 /* Protect the metadata cache with the sessions_mutex. */
856 mutex_lock(&sessions_mutex
);
857 _lttng_channel_destroy(chan
);
858 mutex_unlock(&sessions_mutex
);
860 EXPORT_SYMBOL_GPL(lttng_metadata_channel_destroy
);
863 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream
*stream
)
865 stream
->finalized
= 1;
866 wake_up_interruptible(&stream
->read_wait
);
870 bool lttng_kernel_event_id_available(struct lttng_event_enabler_common
*event_enabler
)
872 struct lttng_kernel_abi_event
*event_param
= &event_enabler
->event_param
;
873 enum lttng_kernel_abi_instrumentation itype
= event_param
->instrumentation
;
875 switch (event_enabler
->enabler_type
) {
876 case LTTNG_EVENT_ENABLER_TYPE_RECORDER
:
878 struct lttng_event_recorder_enabler
*event_recorder_enabler
=
879 container_of(event_enabler
, struct lttng_event_recorder_enabler
, parent
);
880 struct lttng_kernel_channel_buffer
*chan
= event_recorder_enabler
->chan
;
883 case LTTNG_KERNEL_ABI_TRACEPOINT
:
885 case LTTNG_KERNEL_ABI_KPROBE
:
887 case LTTNG_KERNEL_ABI_SYSCALL
:
889 case LTTNG_KERNEL_ABI_UPROBE
:
890 if (chan
->priv
->free_event_id
== -1U)
893 case LTTNG_KERNEL_ABI_KRETPROBE
:
894 /* kretprobes require 2 event IDs. */
895 if (chan
->priv
->free_event_id
>= -2U)
903 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER
:
912 struct lttng_kernel_event_common
*lttng_kernel_event_alloc(struct lttng_event_enabler_common
*event_enabler
)
914 struct lttng_kernel_abi_event
*event_param
= &event_enabler
->event_param
;
915 enum lttng_kernel_abi_instrumentation itype
= event_param
->instrumentation
;
917 switch (event_enabler
->enabler_type
) {
918 case LTTNG_EVENT_ENABLER_TYPE_RECORDER
:
920 struct lttng_event_recorder_enabler
*event_recorder_enabler
=
921 container_of(event_enabler
, struct lttng_event_recorder_enabler
, parent
);
922 struct lttng_kernel_event_recorder
*event_recorder
;
923 struct lttng_kernel_event_recorder_private
*event_recorder_priv
;
924 struct lttng_kernel_channel_buffer
*chan
= event_recorder_enabler
->chan
;
926 event_recorder
= kmem_cache_zalloc(event_recorder_cache
, GFP_KERNEL
);
929 event_recorder_priv
= kmem_cache_zalloc(event_recorder_private_cache
, GFP_KERNEL
);
930 if (!event_recorder_priv
) {
931 kmem_cache_free(event_recorder_private_cache
, event_recorder
);
934 event_recorder_priv
->pub
= event_recorder
;
935 event_recorder_priv
->parent
.pub
= &event_recorder
->parent
;
936 event_recorder
->priv
= event_recorder_priv
;
937 event_recorder
->parent
.priv
= &event_recorder_priv
->parent
;
939 event_recorder
->parent
.type
= LTTNG_KERNEL_EVENT_TYPE_RECORDER
;
940 event_recorder
->parent
.run_filter
= lttng_kernel_interpret_event_filter
;
941 event_recorder
->priv
->parent
.instrumentation
= itype
;
942 INIT_LIST_HEAD(&event_recorder
->priv
->parent
.filter_bytecode_runtime_head
);
943 INIT_LIST_HEAD(&event_recorder
->priv
->parent
.enablers_ref_head
);
945 event_recorder
->chan
= chan
;
946 event_recorder
->priv
->id
= chan
->priv
->free_event_id
++;
947 return &event_recorder
->parent
;
949 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER
:
951 struct lttng_event_notifier_enabler
*event_notifier_enabler
=
952 container_of(event_enabler
, struct lttng_event_notifier_enabler
, parent
);
953 struct lttng_kernel_event_notifier
*event_notifier
;
954 struct lttng_kernel_event_notifier_private
*event_notifier_priv
;
956 event_notifier
= kmem_cache_zalloc(event_notifier_cache
, GFP_KERNEL
);
959 event_notifier_priv
= kmem_cache_zalloc(event_notifier_private_cache
, GFP_KERNEL
);
960 if (!event_notifier_priv
) {
961 kmem_cache_free(event_notifier_private_cache
, event_notifier
);
964 event_notifier_priv
->pub
= event_notifier
;
965 event_notifier_priv
->parent
.pub
= &event_notifier
->parent
;
966 event_notifier
->priv
= event_notifier_priv
;
967 event_notifier
->parent
.priv
= &event_notifier_priv
->parent
;
969 event_notifier
->parent
.type
= LTTNG_KERNEL_EVENT_TYPE_NOTIFIER
;
970 event_notifier
->parent
.run_filter
= lttng_kernel_interpret_event_filter
;
971 event_notifier
->priv
->parent
.instrumentation
= itype
;
972 event_notifier
->priv
->parent
.user_token
= event_enabler
->user_token
;
973 INIT_LIST_HEAD(&event_notifier
->priv
->parent
.filter_bytecode_runtime_head
);
974 INIT_LIST_HEAD(&event_notifier
->priv
->parent
.enablers_ref_head
);
976 event_notifier
->priv
->group
= event_notifier_enabler
->group
;
977 event_notifier
->priv
->error_counter_index
= event_notifier_enabler
->error_counter_index
;
978 event_notifier
->priv
->num_captures
= 0;
979 event_notifier
->notification_send
= lttng_event_notifier_notification_send
;
980 INIT_LIST_HEAD(&event_notifier
->priv
->capture_bytecode_runtime_head
);
981 return &event_notifier
->parent
;
989 void lttng_kernel_event_free(struct lttng_kernel_event_common
*event
)
991 switch (event
->type
) {
992 case LTTNG_KERNEL_EVENT_TYPE_RECORDER
:
994 struct lttng_kernel_event_recorder
*event_recorder
=
995 container_of(event
, struct lttng_kernel_event_recorder
, parent
);
997 kmem_cache_free(event_recorder_private_cache
, event_recorder
->priv
);
998 kmem_cache_free(event_recorder_cache
, event_recorder
);
1001 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER
:
1003 struct lttng_kernel_event_notifier
*event_notifier
=
1004 container_of(event
, struct lttng_kernel_event_notifier
, parent
);
1006 kmem_cache_free(event_notifier_private_cache
, event_notifier
->priv
);
1007 kmem_cache_free(event_notifier_cache
, event_notifier
);
1016 int lttng_kernel_event_notifier_clear_error_counter(struct lttng_kernel_event_common
*event
)
1018 switch (event
->type
) {
1019 case LTTNG_KERNEL_EVENT_TYPE_RECORDER
:
1021 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER
:
1023 struct lttng_kernel_event_notifier
*event_notifier
=
1024 container_of(event
, struct lttng_kernel_event_notifier
, parent
);
1025 struct lttng_counter
*error_counter
;
1026 struct lttng_event_notifier_group
*event_notifier_group
= event_notifier
->priv
->group
;
1027 size_t dimension_index
[1];
1031 * Clear the error counter bucket. The sessiond keeps track of which
1032 * bucket is currently in use. We trust it. The session lock
1033 * synchronizes against concurrent creation of the error
1036 error_counter
= event_notifier_group
->error_counter
;
1040 * Check that the index is within the boundary of the counter.
1042 if (event_notifier
->priv
->error_counter_index
>= event_notifier_group
->error_counter_len
) {
1043 printk(KERN_INFO
"LTTng: event_notifier: Error counter index out-of-bound: counter-len=%zu, index=%llu\n",
1044 event_notifier_group
->error_counter_len
, event_notifier
->priv
->error_counter_index
);
1048 dimension_index
[0] = event_notifier
->priv
->error_counter_index
;
1049 ret
= error_counter
->ops
->counter_clear(error_counter
->counter
, dimension_index
);
1051 printk(KERN_INFO
"LTTng: event_notifier: Unable to clear error counter bucket %llu\n",
1052 event_notifier
->priv
->error_counter_index
);
1063 * Supports event creation while tracing session is active.
1064 * Needs to be called with sessions mutex held.
1066 struct lttng_kernel_event_common
*_lttng_kernel_event_create(struct lttng_event_enabler_common
*event_enabler
,
1067 const struct lttng_kernel_event_desc
*event_desc
)
1069 struct lttng_event_ht
*events_ht
= lttng_get_event_ht_from_enabler(event_enabler
);
1070 struct list_head
*event_list_head
= lttng_get_event_list_head_from_enabler(event_enabler
);
1071 struct lttng_kernel_abi_event
*event_param
= &event_enabler
->event_param
;
1072 enum lttng_kernel_abi_instrumentation itype
= event_param
->instrumentation
;
1073 struct lttng_kernel_event_common_private
*event_priv
;
1074 struct lttng_kernel_event_common
*event
;
1075 const char *event_name
;
1076 struct hlist_head
*head
;
1079 if (!lttng_kernel_event_id_available(event_enabler
)) {
1085 case LTTNG_KERNEL_ABI_TRACEPOINT
:
1086 event_name
= event_desc
->event_name
;
1089 case LTTNG_KERNEL_ABI_KPROBE
:
1091 case LTTNG_KERNEL_ABI_UPROBE
:
1093 case LTTNG_KERNEL_ABI_KRETPROBE
:
1095 case LTTNG_KERNEL_ABI_SYSCALL
:
1096 event_name
= event_param
->name
;
1099 case LTTNG_KERNEL_ABI_FUNCTION
:
1101 case LTTNG_KERNEL_ABI_NOOP
:
1109 head
= utils_borrow_hash_table_bucket(events_ht
->table
, LTTNG_EVENT_HT_SIZE
, event_name
);
1110 lttng_hlist_for_each_entry(event_priv
, head
, hlist_node
) {
1111 if (lttng_event_enabler_event_name_match_event(event_enabler
, event_name
, event_priv
->pub
)) {
1117 event
= lttng_kernel_event_alloc(event_enabler
);
1124 case LTTNG_KERNEL_ABI_TRACEPOINT
:
1125 /* Event will be enabled by enabler sync. */
1127 event
->priv
->registered
= 0;
1128 event
->priv
->desc
= lttng_event_desc_get(event_name
);
1129 if (!event
->priv
->desc
) {
1131 goto register_error
;
1133 /* Populate lttng_event structure before event registration. */
1137 case LTTNG_KERNEL_ABI_KPROBE
:
1139 * Needs to be explicitly enabled after creation, since
1140 * we may want to apply filters.
1143 event
->priv
->registered
= 1;
1145 * Populate lttng_event structure before event
1149 ret
= lttng_kprobes_register_event(event_name
,
1150 event_param
->u
.kprobe
.symbol_name
,
1151 event_param
->u
.kprobe
.offset
,
1152 event_param
->u
.kprobe
.addr
,
1156 goto register_error
;
1158 ret
= try_module_get(event
->priv
->desc
->owner
);
1162 case LTTNG_KERNEL_ABI_KRETPROBE
:
1164 struct lttng_kernel_event_common
*event_return
;
1166 /* kretprobe defines 2 events */
1168 * Needs to be explicitly enabled after creation, since
1169 * we may want to apply filters.
1172 event
->priv
->registered
= 1;
1174 event_return
= lttng_kernel_event_alloc(event_enabler
);
1180 event_return
->enabled
= 0;
1181 event_return
->priv
->registered
= 1;
1184 * Populate lttng_event structure before kretprobe registration.
1187 ret
= lttng_kretprobes_register(event_name
,
1188 event_param
->u
.kretprobe
.symbol_name
,
1189 event_param
->u
.kretprobe
.offset
,
1190 event_param
->u
.kretprobe
.addr
,
1191 event
, event_return
);
1193 lttng_kernel_event_free(event_return
);
1195 goto register_error
;
1197 /* Take 2 refs on the module: one per event. */
1198 ret
= try_module_get(event
->priv
->desc
->owner
);
1200 ret
= try_module_get(event_return
->priv
->desc
->owner
);
1202 ret
= _lttng_event_recorder_metadata_statedump(event_return
);
1203 WARN_ON_ONCE(ret
> 0);
1205 lttng_kernel_event_free(event_return
);
1206 module_put(event_return
->priv
->desc
->owner
);
1207 module_put(event
->priv
->desc
->owner
);
1208 goto statedump_error
;
1210 list_add(&event_return
->priv
->node
, event_list_head
);
1214 case LTTNG_KERNEL_ABI_SYSCALL
:
1216 * Needs to be explicitly enabled after creation, since
1217 * we may want to apply filters.
1220 event
->priv
->registered
= 0;
1221 event
->priv
->desc
= event_desc
;
1222 switch (event_param
->u
.syscall
.entryexit
) {
1223 case LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT
:
1225 goto register_error
;
1226 case LTTNG_KERNEL_ABI_SYSCALL_ENTRY
:
1227 event
->priv
->u
.syscall
.entryexit
= LTTNG_SYSCALL_ENTRY
;
1229 case LTTNG_KERNEL_ABI_SYSCALL_EXIT
:
1230 event
->priv
->u
.syscall
.entryexit
= LTTNG_SYSCALL_EXIT
;
1233 switch (event_param
->u
.syscall
.abi
) {
1234 case LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL
:
1236 goto register_error
;
1237 case LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE
:
1238 event
->priv
->u
.syscall
.abi
= LTTNG_SYSCALL_ABI_NATIVE
;
1240 case LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT
:
1241 event
->priv
->u
.syscall
.abi
= LTTNG_SYSCALL_ABI_COMPAT
;
1244 if (!event
->priv
->desc
) {
1246 goto register_error
;
1250 case LTTNG_KERNEL_ABI_UPROBE
:
1252 * Needs to be explicitly enabled after creation, since
1253 * we may want to apply filters.
1256 event
->priv
->registered
= 1;
1259 * Populate lttng_event structure before event
1264 ret
= lttng_uprobes_register_event(event_param
->name
,
1265 event_param
->u
.uprobe
.fd
,
1268 goto register_error
;
1269 ret
= try_module_get(event
->priv
->desc
->owner
);
1276 goto register_error
;
1279 ret
= _lttng_event_recorder_metadata_statedump(event
);
1280 WARN_ON_ONCE(ret
> 0);
1282 goto statedump_error
;
1285 ret
= lttng_kernel_event_notifier_clear_error_counter(event
);
1287 goto register_error
;
1289 hlist_add_head(&event
->priv
->hlist_node
, head
);
1290 list_add(&event
->priv
->node
, event_list_head
);
1295 /* If a statedump error occurs, events will not be readable. */
1297 lttng_kernel_event_free(event
);
1302 return ERR_PTR(ret
);
1305 struct lttng_kernel_event_common
*lttng_kernel_event_create(struct lttng_event_enabler_common
*event_enabler
,
1306 const struct lttng_kernel_event_desc
*event_desc
)
1308 struct lttng_kernel_event_common
*event
;
1310 mutex_lock(&sessions_mutex
);
1311 event
= _lttng_kernel_event_create(event_enabler
, event_desc
);
1312 mutex_unlock(&sessions_mutex
);
1316 int lttng_kernel_counter_read(struct lttng_counter
*counter
,
1317 const size_t *dim_indexes
, int32_t cpu
,
1318 int64_t *val
, bool *overflow
, bool *underflow
)
1320 return counter
->ops
->counter_read(counter
->counter
, dim_indexes
,
1321 cpu
, val
, overflow
, underflow
);
1324 int lttng_kernel_counter_aggregate(struct lttng_counter
*counter
,
1325 const size_t *dim_indexes
, int64_t *val
,
1326 bool *overflow
, bool *underflow
)
1328 return counter
->ops
->counter_aggregate(counter
->counter
, dim_indexes
,
1329 val
, overflow
, underflow
);
1332 int lttng_kernel_counter_clear(struct lttng_counter
*counter
,
1333 const size_t *dim_indexes
)
1335 return counter
->ops
->counter_clear(counter
->counter
, dim_indexes
);
1338 /* Only used for tracepoints and system calls for now. */
1340 void register_event(struct lttng_kernel_event_common
*event
)
1342 const struct lttng_kernel_event_desc
*desc
;
1345 WARN_ON_ONCE(event
->priv
->registered
);
1347 desc
= event
->priv
->desc
;
1348 switch (event
->priv
->instrumentation
) {
1349 case LTTNG_KERNEL_ABI_TRACEPOINT
:
1350 ret
= lttng_wrapper_tracepoint_probe_register(desc
->event_kname
,
1351 desc
->tp_class
->probe_callback
,
1355 case LTTNG_KERNEL_ABI_SYSCALL
:
1356 ret
= lttng_syscall_filter_enable_event(event
);
1359 case LTTNG_KERNEL_ABI_KPROBE
:
1361 case LTTNG_KERNEL_ABI_UPROBE
:
1365 case LTTNG_KERNEL_ABI_KRETPROBE
:
1366 switch (event
->type
) {
1367 case LTTNG_KERNEL_EVENT_TYPE_RECORDER
:
1370 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER
:
1376 case LTTNG_KERNEL_ABI_FUNCTION
:
1378 case LTTNG_KERNEL_ABI_NOOP
:
1385 event
->priv
->registered
= 1;
1389 void unregister_event(struct lttng_kernel_event_common
*event
)
1391 struct lttng_kernel_event_common_private
*event_priv
= event
->priv
;
1392 const struct lttng_kernel_event_desc
*desc
;
1395 WARN_ON_ONCE(!event
->priv
->registered
);
1397 desc
= event_priv
->desc
;
1398 switch (event_priv
->instrumentation
) {
1399 case LTTNG_KERNEL_ABI_TRACEPOINT
:
1400 ret
= lttng_wrapper_tracepoint_probe_unregister(event_priv
->desc
->event_kname
,
1401 event_priv
->desc
->tp_class
->probe_callback
,
1405 case LTTNG_KERNEL_ABI_KPROBE
:
1406 lttng_kprobes_unregister_event(event
);
1410 case LTTNG_KERNEL_ABI_KRETPROBE
:
1411 switch (event
->type
) {
1412 case LTTNG_KERNEL_EVENT_TYPE_RECORDER
:
1413 lttng_kretprobes_unregister(event
);
1416 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER
:
1422 case LTTNG_KERNEL_ABI_SYSCALL
:
1423 ret
= lttng_syscall_filter_disable_event(event
);
1426 case LTTNG_KERNEL_ABI_NOOP
:
1427 switch (event
->type
) {
1428 case LTTNG_KERNEL_EVENT_TYPE_RECORDER
:
1431 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER
:
1437 case LTTNG_KERNEL_ABI_UPROBE
:
1438 lttng_uprobes_unregister_event(event
);
1442 case LTTNG_KERNEL_ABI_FUNCTION
:
1449 event_priv
->registered
= 0;
1453 void _lttng_event_unregister(struct lttng_kernel_event_common
*event
)
1455 if (event
->priv
->registered
)
1456 unregister_event(event
);
1460 * Only used internally at session destruction.
1463 void _lttng_event_destroy(struct lttng_kernel_event_common
*event
)
1465 struct lttng_kernel_event_common_private
*event_priv
= event
->priv
;
1466 struct lttng_enabler_ref
*enabler_ref
, *tmp_enabler_ref
;
1468 lttng_free_event_filter_runtime(event
);
1469 /* Free event enabler refs */
1470 list_for_each_entry_safe(enabler_ref
, tmp_enabler_ref
,
1471 &event_priv
->enablers_ref_head
, node
)
1474 switch (event
->type
) {
1475 case LTTNG_KERNEL_EVENT_TYPE_RECORDER
:
1477 struct lttng_kernel_event_recorder
*event_recorder
=
1478 container_of(event
, struct lttng_kernel_event_recorder
, parent
);
1480 switch (event_priv
->instrumentation
) {
1481 case LTTNG_KERNEL_ABI_TRACEPOINT
:
1482 lttng_event_desc_put(event_priv
->desc
);
1485 case LTTNG_KERNEL_ABI_KPROBE
:
1486 module_put(event_priv
->desc
->owner
);
1487 lttng_kprobes_destroy_event_private(&event_recorder
->parent
);
1490 case LTTNG_KERNEL_ABI_KRETPROBE
:
1491 module_put(event_priv
->desc
->owner
);
1492 lttng_kretprobes_destroy_private(&event_recorder
->parent
);
1495 case LTTNG_KERNEL_ABI_SYSCALL
:
1498 case LTTNG_KERNEL_ABI_UPROBE
:
1499 module_put(event_priv
->desc
->owner
);
1500 lttng_uprobes_destroy_event_private(&event_recorder
->parent
);
1503 case LTTNG_KERNEL_ABI_FUNCTION
:
1505 case LTTNG_KERNEL_ABI_NOOP
:
1510 list_del(&event_recorder
->priv
->parent
.node
);
1511 kmem_cache_free(event_recorder_private_cache
, event_recorder
->priv
);
1512 kmem_cache_free(event_recorder_cache
, event_recorder
);
1515 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER
:
1517 struct lttng_kernel_event_notifier
*event_notifier
=
1518 container_of(event
, struct lttng_kernel_event_notifier
, parent
);
1520 switch (event_notifier
->priv
->parent
.instrumentation
) {
1521 case LTTNG_KERNEL_ABI_TRACEPOINT
:
1522 lttng_event_desc_put(event_notifier
->priv
->parent
.desc
);
1525 case LTTNG_KERNEL_ABI_KPROBE
:
1526 module_put(event_notifier
->priv
->parent
.desc
->owner
);
1527 lttng_kprobes_destroy_event_private(&event_notifier
->parent
);
1530 case LTTNG_KERNEL_ABI_SYSCALL
:
1533 case LTTNG_KERNEL_ABI_UPROBE
:
1534 module_put(event_notifier
->priv
->parent
.desc
->owner
);
1535 lttng_uprobes_destroy_event_private(&event_notifier
->parent
);
1538 case LTTNG_KERNEL_ABI_KRETPROBE
:
1540 case LTTNG_KERNEL_ABI_FUNCTION
:
1542 case LTTNG_KERNEL_ABI_NOOP
:
1547 list_del(&event_notifier
->priv
->parent
.node
);
1548 kmem_cache_free(event_notifier_private_cache
, event_notifier
->priv
);
1549 kmem_cache_free(event_notifier_cache
, event_notifier
);
1557 struct lttng_kernel_id_tracker
*get_tracker(struct lttng_kernel_session
*session
,
1558 enum tracker_type tracker_type
)
1560 switch (tracker_type
) {
1562 return &session
->pid_tracker
;
1564 return &session
->vpid_tracker
;
1566 return &session
->uid_tracker
;
1568 return &session
->vuid_tracker
;
1570 return &session
->gid_tracker
;
1572 return &session
->vgid_tracker
;
1579 int lttng_session_track_id(struct lttng_kernel_session
*session
,
1580 enum tracker_type tracker_type
, int id
)
1582 struct lttng_kernel_id_tracker
*tracker
;
1585 tracker
= get_tracker(session
, tracker_type
);
1590 mutex_lock(&sessions_mutex
);
1592 /* track all ids: destroy tracker. */
1593 lttng_id_tracker_destroy(tracker
, true);
1596 ret
= lttng_id_tracker_add(tracker
, id
);
1598 mutex_unlock(&sessions_mutex
);
1602 int lttng_session_untrack_id(struct lttng_kernel_session
*session
,
1603 enum tracker_type tracker_type
, int id
)
1605 struct lttng_kernel_id_tracker
*tracker
;
1608 tracker
= get_tracker(session
, tracker_type
);
1613 mutex_lock(&sessions_mutex
);
1615 /* untrack all ids: replace by empty tracker. */
1616 ret
= lttng_id_tracker_empty_set(tracker
);
1618 ret
= lttng_id_tracker_del(tracker
, id
);
1620 mutex_unlock(&sessions_mutex
);
1625 void *id_list_start(struct seq_file
*m
, loff_t
*pos
)
1627 struct lttng_kernel_id_tracker
*id_tracker
= m
->private;
1628 struct lttng_kernel_id_tracker_rcu
*id_tracker_p
= id_tracker
->p
;
1629 struct lttng_id_hash_node
*e
;
1632 mutex_lock(&sessions_mutex
);
1634 for (i
= 0; i
< LTTNG_ID_TABLE_SIZE
; i
++) {
1635 struct hlist_head
*head
= &id_tracker_p
->id_hash
[i
];
1637 lttng_hlist_for_each_entry(e
, head
, hlist
) {
1643 /* ID tracker disabled. */
1644 if (iter
>= *pos
&& iter
== 0) {
1645 return id_tracker_p
; /* empty tracker */
1653 /* Called with sessions_mutex held. */
1655 void *id_list_next(struct seq_file
*m
, void *p
, loff_t
*ppos
)
1657 struct lttng_kernel_id_tracker
*id_tracker
= m
->private;
1658 struct lttng_kernel_id_tracker_rcu
*id_tracker_p
= id_tracker
->p
;
1659 struct lttng_id_hash_node
*e
;
1664 for (i
= 0; i
< LTTNG_ID_TABLE_SIZE
; i
++) {
1665 struct hlist_head
*head
= &id_tracker_p
->id_hash
[i
];
1667 lttng_hlist_for_each_entry(e
, head
, hlist
) {
1668 if (iter
++ >= *ppos
)
1673 /* ID tracker disabled. */
1674 if (iter
>= *ppos
&& iter
== 0)
1675 return p
; /* empty tracker */
1684 void id_list_stop(struct seq_file
*m
, void *p
)
1686 mutex_unlock(&sessions_mutex
);
1690 int id_list_show(struct seq_file
*m
, void *p
)
1692 struct lttng_kernel_id_tracker
*id_tracker
= m
->private;
1693 struct lttng_kernel_id_tracker_rcu
*id_tracker_p
= id_tracker
->p
;
1696 if (p
== id_tracker_p
) {
1697 /* Tracker disabled. */
1700 const struct lttng_id_hash_node
*e
= p
;
1702 id
= lttng_id_tracker_get_node_id(e
);
1704 switch (id_tracker
->priv
->tracker_type
) {
1706 seq_printf(m
, "process { pid = %d; };\n", id
);
1709 seq_printf(m
, "process { vpid = %d; };\n", id
);
1712 seq_printf(m
, "user { uid = %d; };\n", id
);
1715 seq_printf(m
, "user { vuid = %d; };\n", id
);
1718 seq_printf(m
, "group { gid = %d; };\n", id
);
1721 seq_printf(m
, "group { vgid = %d; };\n", id
);
1724 seq_printf(m
, "UNKNOWN { field = %d };\n", id
);
1730 const struct seq_operations lttng_tracker_ids_list_seq_ops
= {
1731 .start
= id_list_start
,
1732 .next
= id_list_next
,
1733 .stop
= id_list_stop
,
1734 .show
= id_list_show
,
1738 int lttng_tracker_ids_list_open(struct inode
*inode
, struct file
*file
)
1740 return seq_open(file
, <tng_tracker_ids_list_seq_ops
);
1744 int lttng_tracker_ids_list_release(struct inode
*inode
, struct file
*file
)
1746 struct seq_file
*m
= file
->private_data
;
1747 struct lttng_kernel_id_tracker
*id_tracker
= m
->private;
1750 WARN_ON_ONCE(!id_tracker
);
1751 ret
= seq_release(inode
, file
);
1753 fput(id_tracker
->priv
->session
->priv
->file
);
1757 const struct file_operations lttng_tracker_ids_list_fops
= {
1758 .owner
= THIS_MODULE
,
1759 .open
= lttng_tracker_ids_list_open
,
1761 .llseek
= seq_lseek
,
1762 .release
= lttng_tracker_ids_list_release
,
1765 int lttng_session_list_tracker_ids(struct lttng_kernel_session
*session
,
1766 enum tracker_type tracker_type
)
1768 struct file
*tracker_ids_list_file
;
1772 file_fd
= lttng_get_unused_fd();
1778 tracker_ids_list_file
= anon_inode_getfile("[lttng_tracker_ids_list]",
1779 <tng_tracker_ids_list_fops
,
1781 if (IS_ERR(tracker_ids_list_file
)) {
1782 ret
= PTR_ERR(tracker_ids_list_file
);
1785 if (!atomic_long_add_unless(&session
->priv
->file
->f_count
, 1, LONG_MAX
)) {
1787 goto refcount_error
;
1789 ret
= lttng_tracker_ids_list_fops
.open(NULL
, tracker_ids_list_file
);
1792 m
= tracker_ids_list_file
->private_data
;
1794 m
->private = get_tracker(session
, tracker_type
);
1795 BUG_ON(!m
->private);
1796 fd_install(file_fd
, tracker_ids_list_file
);
1801 atomic_long_dec(&session
->priv
->file
->f_count
);
1803 fput(tracker_ids_list_file
);
1805 put_unused_fd(file_fd
);
1811 * Enabler management.
1814 int lttng_match_enabler_star_glob(const char *desc_name
,
1815 const char *pattern
)
1817 if (!strutils_star_glob_match(pattern
, LTTNG_SIZE_MAX
,
1818 desc_name
, LTTNG_SIZE_MAX
))
1824 int lttng_match_enabler_name(const char *desc_name
,
1827 if (strcmp(desc_name
, name
))
1833 int lttng_desc_match_enabler_check(const struct lttng_kernel_event_desc
*desc
,
1834 struct lttng_event_enabler_common
*enabler
)
1836 const char *desc_name
, *enabler_name
;
1837 bool compat
= false, entry
= false;
1839 enabler_name
= enabler
->event_param
.name
;
1840 switch (enabler
->event_param
.instrumentation
) {
1841 case LTTNG_KERNEL_ABI_TRACEPOINT
:
1842 desc_name
= desc
->event_name
;
1843 switch (enabler
->format_type
) {
1844 case LTTNG_ENABLER_FORMAT_STAR_GLOB
:
1845 return lttng_match_enabler_star_glob(desc_name
, enabler_name
);
1846 case LTTNG_ENABLER_FORMAT_NAME
:
1847 return lttng_match_enabler_name(desc_name
, enabler_name
);
1853 case LTTNG_KERNEL_ABI_SYSCALL
:
1854 desc_name
= desc
->event_name
;
1855 if (!strncmp(desc_name
, "compat_", strlen("compat_"))) {
1856 desc_name
+= strlen("compat_");
1859 if (!strncmp(desc_name
, "syscall_exit_",
1860 strlen("syscall_exit_"))) {
1861 desc_name
+= strlen("syscall_exit_");
1862 } else if (!strncmp(desc_name
, "syscall_entry_",
1863 strlen("syscall_entry_"))) {
1864 desc_name
+= strlen("syscall_entry_");
1870 switch (enabler
->event_param
.u
.syscall
.entryexit
) {
1871 case LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT
:
1873 case LTTNG_KERNEL_ABI_SYSCALL_ENTRY
:
1877 case LTTNG_KERNEL_ABI_SYSCALL_EXIT
:
1884 switch (enabler
->event_param
.u
.syscall
.abi
) {
1885 case LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL
:
1887 case LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE
:
1891 case LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT
:
1898 switch (enabler
->event_param
.u
.syscall
.match
) {
1899 case LTTNG_KERNEL_ABI_SYSCALL_MATCH_NAME
:
1900 switch (enabler
->format_type
) {
1901 case LTTNG_ENABLER_FORMAT_STAR_GLOB
:
1902 return lttng_match_enabler_star_glob(desc_name
, enabler_name
);
1903 case LTTNG_ENABLER_FORMAT_NAME
:
1904 return lttng_match_enabler_name(desc_name
, enabler_name
);
1909 case LTTNG_KERNEL_ABI_SYSCALL_MATCH_NR
:
1910 return -EINVAL
; /* Not implemented. */
1922 bool lttng_desc_match_enabler(const struct lttng_kernel_event_desc
*desc
,
1923 struct lttng_event_enabler_common
*enabler
)
1927 ret
= lttng_desc_match_enabler_check(desc
, enabler
);
1935 bool lttng_event_enabler_match_event(struct lttng_event_enabler_common
*event_enabler
,
1936 struct lttng_kernel_event_common
*event
)
1938 if (event_enabler
->event_param
.instrumentation
!= event
->priv
->instrumentation
)
1941 switch (event_enabler
->enabler_type
) {
1942 case LTTNG_EVENT_ENABLER_TYPE_RECORDER
:
1944 struct lttng_event_recorder_enabler
*event_recorder_enabler
=
1945 container_of(event_enabler
, struct lttng_event_recorder_enabler
, parent
);
1946 struct lttng_kernel_event_recorder
*event_recorder
=
1947 container_of(event
, struct lttng_kernel_event_recorder
, parent
);
1949 if (lttng_desc_match_enabler(event
->priv
->desc
, event_enabler
)
1950 && event_recorder
->chan
== event_recorder_enabler
->chan
)
1955 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER
:
1957 struct lttng_event_notifier_enabler
*event_notifier_enabler
=
1958 container_of(event_enabler
, struct lttng_event_notifier_enabler
, parent
);
1959 struct lttng_kernel_event_notifier
*event_notifier
=
1960 container_of(event
, struct lttng_kernel_event_notifier
, parent
);
1962 if (lttng_desc_match_enabler(event
->priv
->desc
, event_enabler
)
1963 && event_notifier
->priv
->group
== event_notifier_enabler
->group
1964 && event
->priv
->user_token
== event_enabler
->user_token
)
1975 bool lttng_event_enabler_desc_match_event(struct lttng_event_enabler_common
*event_enabler
,
1976 const struct lttng_kernel_event_desc
*desc
,
1977 struct lttng_kernel_event_common
*event
)
1979 if (event_enabler
->event_param
.instrumentation
!= event
->priv
->instrumentation
)
1982 switch (event_enabler
->enabler_type
) {
1983 case LTTNG_EVENT_ENABLER_TYPE_RECORDER
:
1985 struct lttng_event_recorder_enabler
*event_recorder_enabler
=
1986 container_of(event_enabler
, struct lttng_event_recorder_enabler
, parent
);
1987 struct lttng_kernel_event_recorder
*event_recorder
=
1988 container_of(event
, struct lttng_kernel_event_recorder
, parent
);
1990 if (event
->priv
->desc
== desc
&& event_recorder
->chan
== event_recorder_enabler
->chan
)
1995 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER
:
1997 struct lttng_event_notifier_enabler
*event_notifier_enabler
=
1998 container_of(event_enabler
, struct lttng_event_notifier_enabler
, parent
);
1999 struct lttng_kernel_event_notifier
*event_notifier
=
2000 container_of(event
, struct lttng_kernel_event_notifier
, parent
);
2002 if (event
->priv
->desc
== desc
2003 && event_notifier
->priv
->group
== event_notifier_enabler
->group
2004 && event
->priv
->user_token
== event_enabler
->user_token
)
2015 bool lttng_event_enabler_event_name_match_event(struct lttng_event_enabler_common
*event_enabler
,
2016 const char *event_name
,
2017 struct lttng_kernel_event_common
*event
)
2019 if (event_enabler
->event_param
.instrumentation
!= event
->priv
->instrumentation
)
2022 switch (event_enabler
->enabler_type
) {
2023 case LTTNG_EVENT_ENABLER_TYPE_RECORDER
:
2025 struct lttng_event_recorder_enabler
*event_recorder_enabler
=
2026 container_of(event_enabler
, struct lttng_event_recorder_enabler
, parent
);
2027 struct lttng_kernel_event_recorder
*event_recorder
=
2028 container_of(event
, struct lttng_kernel_event_recorder
, parent
);
2030 if (!strncmp(event
->priv
->desc
->event_name
, event_name
, LTTNG_KERNEL_ABI_SYM_NAME_LEN
- 1)
2031 && event_recorder
->chan
== event_recorder_enabler
->chan
)
2036 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER
:
2038 struct lttng_event_notifier_enabler
*event_notifier_enabler
=
2039 container_of(event_enabler
, struct lttng_event_notifier_enabler
, parent
);
2040 struct lttng_kernel_event_notifier
*event_notifier
=
2041 container_of(event
, struct lttng_kernel_event_notifier
, parent
);
2043 if (!strncmp(event
->priv
->desc
->event_name
, event_name
, LTTNG_KERNEL_ABI_SYM_NAME_LEN
- 1)
2044 && event_notifier
->priv
->group
== event_notifier_enabler
->group
2045 && event
->priv
->user_token
== event_enabler
->user_token
)
2057 struct lttng_enabler_ref
*lttng_enabler_ref(
2058 struct list_head
*enablers_ref_list
,
2059 struct lttng_event_enabler_common
*enabler
)
2061 struct lttng_enabler_ref
*enabler_ref
;
2063 list_for_each_entry(enabler_ref
, enablers_ref_list
, node
) {
2064 if (enabler_ref
->ref
== enabler
)
2071 void lttng_event_enabler_create_tracepoint_events_if_missing(struct lttng_event_enabler_common
*event_enabler
)
2073 struct lttng_event_ht
*events_ht
= lttng_get_event_ht_from_enabler(event_enabler
);
2074 struct lttng_kernel_probe_desc
*probe_desc
;
2075 const struct lttng_kernel_event_desc
*desc
;
2076 struct list_head
*probe_list
;
2079 probe_list
= lttng_get_probe_list_head();
2081 * For each probe event, if we find that a probe event matches
2082 * our enabler, create an associated lttng_event if not
2085 list_for_each_entry(probe_desc
, probe_list
, head
) {
2086 for (i
= 0; i
< probe_desc
->nr_events
; i
++) {
2088 struct hlist_head
*head
;
2089 struct lttng_kernel_event_common
*event
;
2090 struct lttng_kernel_event_common_private
*event_priv
;
2092 desc
= probe_desc
->event_desc
[i
];
2093 if (!lttng_desc_match_enabler(desc
, event_enabler
))
2097 * Check if already created.
2099 head
= utils_borrow_hash_table_bucket(events_ht
->table
, LTTNG_EVENT_HT_SIZE
, desc
->event_name
);
2100 lttng_hlist_for_each_entry(event_priv
, head
, hlist_node
) {
2101 if (lttng_event_enabler_desc_match_event(event_enabler
, desc
, event_priv
->pub
)) {
2110 * We need to create an event for this event probe.
2112 event
= _lttng_kernel_event_create(event_enabler
, desc
);
2113 if (IS_ERR(event
)) {
2114 printk(KERN_INFO
"LTTng: Unable to create event %s\n",
2115 probe_desc
->event_desc
[i
]->event_name
);
2122 * Create event if it is missing and present in the list of tracepoint probes.
2123 * Should be called with sessions mutex held.
2126 void lttng_event_enabler_create_events_if_missing(struct lttng_event_enabler_common
*event_enabler
)
2130 switch (event_enabler
->event_param
.instrumentation
) {
2131 case LTTNG_KERNEL_ABI_TRACEPOINT
:
2132 lttng_event_enabler_create_tracepoint_events_if_missing(event_enabler
);
2135 case LTTNG_KERNEL_ABI_SYSCALL
:
2136 ret
= lttng_event_enabler_create_syscall_events_if_missing(event_enabler
);
2147 void lttng_event_enabler_init_event_filter(struct lttng_event_enabler_common
*event_enabler
,
2148 struct lttng_kernel_event_common
*event
)
2150 /* Link filter bytecodes if not linked yet. */
2151 lttng_enabler_link_bytecode(event
->priv
->desc
, lttng_static_ctx
,
2152 &event
->priv
->filter_bytecode_runtime_head
, &event_enabler
->filter_bytecode_head
);
2156 void lttng_event_enabler_init_event_capture(struct lttng_event_enabler_common
*event_enabler
,
2157 struct lttng_kernel_event_common
*event
)
2159 switch (event_enabler
->enabler_type
) {
2160 case LTTNG_EVENT_ENABLER_TYPE_RECORDER
:
2162 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER
:
2164 struct lttng_event_notifier_enabler
*event_notifier_enabler
=
2165 container_of(event_enabler
, struct lttng_event_notifier_enabler
, parent
);
2166 struct lttng_kernel_event_notifier
*event_notifier
=
2167 container_of(event
, struct lttng_kernel_event_notifier
, parent
);
2169 /* Link capture bytecodes if not linked yet. */
2170 lttng_enabler_link_bytecode(event
->priv
->desc
,
2171 lttng_static_ctx
, &event_notifier
->priv
->capture_bytecode_runtime_head
,
2172 &event_notifier_enabler
->capture_bytecode_head
);
2173 event_notifier
->priv
->num_captures
= event_notifier_enabler
->num_captures
;
2182 * Create events associated with an event_enabler (if not already present),
2183 * and add backward reference from the event to the enabler.
2184 * Should be called with sessions mutex held.
2187 int lttng_event_enabler_ref_events(struct lttng_event_enabler_common
*event_enabler
)
2189 struct list_head
*event_list_head
= lttng_get_event_list_head_from_enabler(event_enabler
);
2190 struct lttng_kernel_event_common_private
*event_priv
;
2192 lttng_syscall_table_set_wildcard_all(event_enabler
);
2194 /* First ensure that probe events are created for this enabler. */
2195 lttng_event_enabler_create_events_if_missing(event_enabler
);
2197 /* Link the created event with its associated enabler. */
2198 list_for_each_entry(event_priv
, event_list_head
, node
) {
2199 struct lttng_kernel_event_common
*event
= event_priv
->pub
;
2200 struct lttng_enabler_ref
*enabler_ref
;
2202 if (!lttng_event_enabler_match_event(event_enabler
, event
))
2205 enabler_ref
= lttng_enabler_ref(&event_priv
->enablers_ref_head
, event_enabler
);
2208 * If no backward ref, create it.
2209 * Add backward ref from event_notifier to enabler.
2211 enabler_ref
= kzalloc(sizeof(*enabler_ref
), GFP_KERNEL
);
2215 enabler_ref
->ref
= event_enabler
;
2216 list_add(&enabler_ref
->node
, &event_priv
->enablers_ref_head
);
2219 lttng_event_enabler_init_event_filter(event_enabler
, event
);
2220 lttng_event_enabler_init_event_capture(event_enabler
, event
);
2226 * Called at module load: connect the probe on all enablers matching
2228 * Called with sessions lock held.
2230 int lttng_fix_pending_events(void)
2232 struct lttng_kernel_session_private
*session_priv
;
2234 list_for_each_entry(session_priv
, &sessions
, list
)
2235 lttng_session_lazy_sync_event_enablers(session_priv
->pub
);
2239 static bool lttng_event_notifier_group_has_active_event_notifiers(
2240 struct lttng_event_notifier_group
*event_notifier_group
)
2242 struct lttng_event_enabler_common
*event_enabler
;
2244 list_for_each_entry(event_enabler
, &event_notifier_group
->enablers_head
, node
) {
2245 if (event_enabler
->enabled
)
2251 bool lttng_event_notifier_active(void)
2253 struct lttng_event_notifier_group
*event_notifier_group
;
2255 list_for_each_entry(event_notifier_group
, &event_notifier_groups
, node
) {
2256 if (lttng_event_notifier_group_has_active_event_notifiers(event_notifier_group
))
2262 int lttng_fix_pending_event_notifiers(void)
2264 struct lttng_event_notifier_group
*event_notifier_group
;
2266 list_for_each_entry(event_notifier_group
, &event_notifier_groups
, node
)
2267 lttng_event_notifier_group_sync_enablers(event_notifier_group
);
2271 struct lttng_event_recorder_enabler
*lttng_event_recorder_enabler_create(
2272 enum lttng_enabler_format_type format_type
,
2273 struct lttng_kernel_abi_event
*event_param
,
2274 struct lttng_kernel_channel_buffer
*chan
)
2276 struct lttng_event_recorder_enabler
*event_enabler
;
2278 event_enabler
= kzalloc(sizeof(*event_enabler
), GFP_KERNEL
);
2281 event_enabler
->parent
.enabler_type
= LTTNG_EVENT_ENABLER_TYPE_RECORDER
;
2282 event_enabler
->parent
.format_type
= format_type
;
2283 INIT_LIST_HEAD(&event_enabler
->parent
.filter_bytecode_head
);
2284 memcpy(&event_enabler
->parent
.event_param
, event_param
,
2285 sizeof(event_enabler
->parent
.event_param
));
2286 event_enabler
->chan
= chan
;
2288 event_enabler
->parent
.enabled
= 0;
2289 return event_enabler
;
2292 void lttng_event_enabler_session_add(struct lttng_kernel_session
*session
,
2293 struct lttng_event_recorder_enabler
*event_enabler
)
2295 mutex_lock(&sessions_mutex
);
2296 list_add(&event_enabler
->parent
.node
, &session
->priv
->enablers_head
);
2297 event_enabler
->parent
.published
= true;
2298 lttng_session_lazy_sync_event_enablers(session
);
2299 mutex_unlock(&sessions_mutex
);
2302 int lttng_event_enabler_enable(struct lttng_event_enabler_common
*event_enabler
)
2304 mutex_lock(&sessions_mutex
);
2305 event_enabler
->enabled
= 1;
2306 lttng_event_enabler_sync(event_enabler
);
2307 mutex_unlock(&sessions_mutex
);
2311 int lttng_event_enabler_disable(struct lttng_event_enabler_common
*event_enabler
)
2313 mutex_lock(&sessions_mutex
);
2314 event_enabler
->enabled
= 0;
2315 lttng_event_enabler_sync(event_enabler
);
2316 mutex_unlock(&sessions_mutex
);
2321 int lttng_enabler_attach_filter_bytecode(struct lttng_event_enabler_common
*enabler
,
2322 struct lttng_kernel_abi_filter_bytecode __user
*bytecode
)
2324 struct lttng_kernel_bytecode_node
*bytecode_node
;
2325 uint32_t bytecode_len
;
2328 ret
= get_user(bytecode_len
, &bytecode
->len
);
2331 bytecode_node
= lttng_kvzalloc(sizeof(*bytecode_node
) + bytecode_len
,
2335 ret
= copy_from_user(&bytecode_node
->bc
, bytecode
,
2336 sizeof(*bytecode
) + bytecode_len
);
2340 bytecode_node
->type
= LTTNG_KERNEL_BYTECODE_TYPE_FILTER
;
2341 bytecode_node
->enabler
= enabler
;
2342 /* Enforce length based on allocated size */
2343 bytecode_node
->bc
.len
= bytecode_len
;
2344 list_add_tail(&bytecode_node
->node
, &enabler
->filter_bytecode_head
);
2349 lttng_kvfree(bytecode_node
);
2353 int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler_common
*event_enabler
,
2354 struct lttng_kernel_abi_filter_bytecode __user
*bytecode
)
2357 ret
= lttng_enabler_attach_filter_bytecode(event_enabler
, bytecode
);
2360 lttng_event_enabler_sync(event_enabler
);
2367 int lttng_event_add_callsite(struct lttng_kernel_event_common
*event
,
2368 struct lttng_kernel_abi_event_callsite __user
*callsite
)
2371 switch (event
->priv
->instrumentation
) {
2372 case LTTNG_KERNEL_ABI_UPROBE
:
2373 return lttng_uprobes_event_add_callsite(event
, callsite
);
2380 void lttng_enabler_destroy(struct lttng_event_enabler_common
*enabler
)
2382 struct lttng_kernel_bytecode_node
*filter_node
, *tmp_filter_node
;
2384 /* Destroy filter bytecode */
2385 list_for_each_entry_safe(filter_node
, tmp_filter_node
,
2386 &enabler
->filter_bytecode_head
, node
) {
2387 lttng_kvfree(filter_node
);
2391 void lttng_event_enabler_destroy(struct lttng_event_enabler_common
*event_enabler
)
2393 lttng_enabler_destroy(event_enabler
);
2394 if (event_enabler
->published
)
2395 list_del(&event_enabler
->node
);
2397 switch (event_enabler
->enabler_type
) {
2398 case LTTNG_EVENT_ENABLER_TYPE_RECORDER
:
2400 struct lttng_event_recorder_enabler
*event_recorder_enabler
=
2401 container_of(event_enabler
, struct lttng_event_recorder_enabler
, parent
);
2403 kfree(event_recorder_enabler
);
2406 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER
:
2408 struct lttng_event_notifier_enabler
*event_notifier_enabler
=
2409 container_of(event_enabler
, struct lttng_event_notifier_enabler
, parent
);
2411 kfree(event_notifier_enabler
);
2419 struct lttng_event_notifier_enabler
*lttng_event_notifier_enabler_create(
2420 enum lttng_enabler_format_type format_type
,
2421 struct lttng_kernel_abi_event_notifier
*event_notifier_param
,
2422 struct lttng_event_notifier_group
*event_notifier_group
)
2424 struct lttng_event_notifier_enabler
*event_notifier_enabler
;
2426 event_notifier_enabler
= kzalloc(sizeof(*event_notifier_enabler
), GFP_KERNEL
);
2427 if (!event_notifier_enabler
)
2430 event_notifier_enabler
->parent
.enabler_type
= LTTNG_EVENT_ENABLER_TYPE_NOTIFIER
;
2431 event_notifier_enabler
->parent
.format_type
= format_type
;
2432 INIT_LIST_HEAD(&event_notifier_enabler
->parent
.filter_bytecode_head
);
2433 INIT_LIST_HEAD(&event_notifier_enabler
->capture_bytecode_head
);
2435 event_notifier_enabler
->error_counter_index
= event_notifier_param
->error_counter_index
;
2436 event_notifier_enabler
->num_captures
= 0;
2438 memcpy(&event_notifier_enabler
->parent
.event_param
, &event_notifier_param
->event
,
2439 sizeof(event_notifier_enabler
->parent
.event_param
));
2441 event_notifier_enabler
->parent
.enabled
= 0;
2442 event_notifier_enabler
->parent
.user_token
= event_notifier_param
->event
.token
;
2443 event_notifier_enabler
->group
= event_notifier_group
;
2444 return event_notifier_enabler
;
2447 void lttng_event_notifier_enabler_group_add(struct lttng_event_notifier_group
*event_notifier_group
,
2448 struct lttng_event_notifier_enabler
*event_notifier_enabler
)
2450 mutex_lock(&sessions_mutex
);
2451 list_add(&event_notifier_enabler
->parent
.node
, &event_notifier_enabler
->group
->enablers_head
);
2452 event_notifier_enabler
->parent
.published
= true;
2453 lttng_event_notifier_group_sync_enablers(event_notifier_enabler
->group
);
2454 mutex_unlock(&sessions_mutex
);
2457 int lttng_event_notifier_enabler_enable(
2458 struct lttng_event_notifier_enabler
*event_notifier_enabler
)
2460 mutex_lock(&sessions_mutex
);
2461 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler
)->enabled
= 1;
2462 lttng_event_notifier_group_sync_enablers(event_notifier_enabler
->group
);
2463 mutex_unlock(&sessions_mutex
);
2467 int lttng_event_notifier_enabler_disable(
2468 struct lttng_event_notifier_enabler
*event_notifier_enabler
)
2470 mutex_lock(&sessions_mutex
);
2471 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler
)->enabled
= 0;
2472 lttng_event_notifier_group_sync_enablers(event_notifier_enabler
->group
);
2473 mutex_unlock(&sessions_mutex
);
2477 int lttng_event_notifier_enabler_attach_capture_bytecode(
2478 struct lttng_event_notifier_enabler
*event_notifier_enabler
,
2479 struct lttng_kernel_abi_capture_bytecode __user
*bytecode
)
2481 struct lttng_kernel_bytecode_node
*bytecode_node
;
2482 struct lttng_event_enabler_common
*enabler
=
2483 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler
);
2484 uint32_t bytecode_len
;
2487 ret
= get_user(bytecode_len
, &bytecode
->len
);
2491 bytecode_node
= lttng_kvzalloc(sizeof(*bytecode_node
) + bytecode_len
,
2496 ret
= copy_from_user(&bytecode_node
->bc
, bytecode
,
2497 sizeof(*bytecode
) + bytecode_len
);
2501 bytecode_node
->type
= LTTNG_KERNEL_BYTECODE_TYPE_CAPTURE
;
2502 bytecode_node
->enabler
= enabler
;
2504 /* Enforce length based on allocated size */
2505 bytecode_node
->bc
.len
= bytecode_len
;
2506 list_add_tail(&bytecode_node
->node
, &event_notifier_enabler
->capture_bytecode_head
);
2508 event_notifier_enabler
->num_captures
++;
2510 lttng_event_notifier_group_sync_enablers(event_notifier_enabler
->group
);
2514 lttng_kvfree(bytecode_node
);
2520 void lttng_event_sync_filter_state(struct lttng_kernel_event_common
*event
)
2522 int has_enablers_without_filter_bytecode
= 0, nr_filters
= 0;
2523 struct lttng_kernel_bytecode_runtime
*runtime
;
2524 struct lttng_enabler_ref
*enabler_ref
;
2526 /* Check if has enablers without bytecode enabled */
2527 list_for_each_entry(enabler_ref
, &event
->priv
->enablers_ref_head
, node
) {
2528 if (enabler_ref
->ref
->enabled
2529 && list_empty(&enabler_ref
->ref
->filter_bytecode_head
)) {
2530 has_enablers_without_filter_bytecode
= 1;
2534 event
->priv
->has_enablers_without_filter_bytecode
= has_enablers_without_filter_bytecode
;
2536 /* Enable filters */
2537 list_for_each_entry(runtime
, &event
->priv
->filter_bytecode_runtime_head
, node
) {
2538 lttng_bytecode_sync_state(runtime
);
2541 WRITE_ONCE(event
->eval_filter
, !(has_enablers_without_filter_bytecode
|| !nr_filters
));
2545 void lttng_event_sync_capture_state(struct lttng_kernel_event_common
*event
)
2547 switch (event
->type
) {
2548 case LTTNG_KERNEL_EVENT_TYPE_RECORDER
:
2550 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER
:
2552 struct lttng_kernel_event_notifier
*event_notifier
=
2553 container_of(event
, struct lttng_kernel_event_notifier
, parent
);
2554 struct lttng_kernel_bytecode_runtime
*runtime
;
2555 int nr_captures
= 0;
2557 /* Enable captures */
2558 list_for_each_entry(runtime
, &event_notifier
->priv
->capture_bytecode_runtime_head
, node
) {
2559 lttng_bytecode_sync_state(runtime
);
2562 WRITE_ONCE(event_notifier
->eval_capture
, !!nr_captures
);
2571 bool lttng_get_event_enabled_state(struct lttng_kernel_event_common
*event
)
2573 struct lttng_enabler_ref
*enabler_ref
;
2574 bool enabled
= false;
2576 switch (event
->priv
->instrumentation
) {
2577 case LTTNG_KERNEL_ABI_TRACEPOINT
:
2579 case LTTNG_KERNEL_ABI_SYSCALL
:
2581 list_for_each_entry(enabler_ref
, &event
->priv
->enablers_ref_head
, node
) {
2582 if (enabler_ref
->ref
->enabled
) {
2593 switch (event
->type
) {
2594 case LTTNG_KERNEL_EVENT_TYPE_RECORDER
:
2596 struct lttng_kernel_event_recorder
*event_recorder
=
2597 container_of(event
, struct lttng_kernel_event_recorder
, parent
);
2600 * Enabled state is based on union of enablers, with
2601 * intersection of session and channel transient enable
2604 return enabled
&& event_recorder
->chan
->parent
.session
->priv
->tstate
&& event_recorder
->chan
->priv
->parent
.tstate
;
2606 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER
:
2615 bool lttng_event_is_lazy_sync(struct lttng_kernel_event_common
*event
)
2617 switch (event
->priv
->instrumentation
) {
2618 case LTTNG_KERNEL_ABI_TRACEPOINT
:
2620 case LTTNG_KERNEL_ABI_SYSCALL
:
2624 /* Not handled with lazy sync. */
2630 * Should be called with sessions mutex held.
2633 void lttng_sync_event_list(struct list_head
*event_enabler_list
,
2634 struct list_head
*event_list
)
2636 struct lttng_kernel_event_common_private
*event_priv
;
2637 struct lttng_event_enabler_common
*event_enabler
;
2639 list_for_each_entry(event_enabler
, event_enabler_list
, node
)
2640 lttng_event_enabler_ref_events(event_enabler
);
2643 * For each event, if at least one of its enablers is enabled,
2644 * and its channel and session transient states are enabled, we
2645 * enable the event, else we disable it.
2647 list_for_each_entry(event_priv
, event_list
, node
) {
2648 struct lttng_kernel_event_common
*event
= event_priv
->pub
;
2651 if (!lttng_event_is_lazy_sync(event
))
2654 enabled
= lttng_get_event_enabled_state(event
);
2655 WRITE_ONCE(event
->enabled
, enabled
);
2657 * Sync tracepoint registration with event enabled state.
2660 if (!event_priv
->registered
)
2661 register_event(event
);
2663 if (event_priv
->registered
)
2664 unregister_event(event
);
2667 lttng_event_sync_filter_state(event
);
2668 lttng_event_sync_capture_state(event
);
2673 * lttng_session_sync_event_enablers should be called just before starting a
2677 void lttng_session_sync_event_enablers(struct lttng_kernel_session
*session
)
2679 lttng_sync_event_list(&session
->priv
->enablers_head
, &session
->priv
->events
);
2683 * Apply enablers to session events, adding events to session if need
2684 * be. It is required after each modification applied to an active
2685 * session, and right before session "start".
2686 * "lazy" sync means we only sync if required.
2687 * Should be called with sessions mutex held.
2690 void lttng_session_lazy_sync_event_enablers(struct lttng_kernel_session
*session
)
2692 /* We can skip if session is not active */
2693 if (!session
->active
)
2695 lttng_session_sync_event_enablers(session
);
2699 void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group
*event_notifier_group
)
2701 lttng_sync_event_list(&event_notifier_group
->enablers_head
, &event_notifier_group
->event_notifiers_head
);
2705 void lttng_event_enabler_sync(struct lttng_event_enabler_common
*event_enabler
)
2707 switch (event_enabler
->enabler_type
) {
2708 case LTTNG_EVENT_ENABLER_TYPE_RECORDER
:
2710 struct lttng_event_recorder_enabler
*event_recorder_enabler
=
2711 container_of(event_enabler
, struct lttng_event_recorder_enabler
, parent
);
2712 lttng_session_lazy_sync_event_enablers(event_recorder_enabler
->chan
->parent
.session
);
2715 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER
:
2717 struct lttng_event_notifier_enabler
*event_notifier_enabler
=
2718 container_of(event_enabler
, struct lttng_event_notifier_enabler
, parent
);
2719 lttng_event_notifier_group_sync_enablers(event_notifier_enabler
->group
);
2728 * Serialize at most one packet worth of metadata into a metadata
2730 * We grab the metadata cache mutex to get exclusive access to our metadata
2731 * buffer and to the metadata cache. Exclusive access to the metadata buffer
2732 * allows us to do racy operations such as looking for remaining space left in
2733 * packet and write, since mutual exclusion protects us from concurrent writes.
2734 * Mutual exclusion on the metadata cache allow us to read the cache content
2735 * without racing against reallocation of the cache by updates.
2736 * Returns the number of bytes written in the channel, 0 if no data
2737 * was written and a negative value on error.
2739 int lttng_metadata_output_channel(struct lttng_metadata_stream
*stream
,
2740 struct lttng_kernel_ring_buffer_channel
*chan
, bool *coherent
)
2742 struct lttng_kernel_ring_buffer_ctx ctx
;
2744 size_t len
, reserve_len
;
2747 * Ensure we support mutiple get_next / put sequences followed by
2748 * put_next. The metadata cache lock protects reading the metadata
2749 * cache. It can indeed be read concurrently by "get_next_subbuf" and
2750 * "flush" operations on the buffer invoked by different processes.
2751 * Moreover, since the metadata cache memory can be reallocated, we
2752 * need to have exclusive access against updates even though we only
2755 mutex_lock(&stream
->metadata_cache
->lock
);
2756 WARN_ON(stream
->metadata_in
< stream
->metadata_out
);
2757 if (stream
->metadata_in
!= stream
->metadata_out
)
2760 /* Metadata regenerated, change the version. */
2761 if (stream
->metadata_cache
->version
!= stream
->version
)
2762 stream
->version
= stream
->metadata_cache
->version
;
2764 len
= stream
->metadata_cache
->metadata_written
-
2765 stream
->metadata_in
;
2768 reserve_len
= min_t(size_t,
2769 stream
->transport
->ops
.priv
->packet_avail_size(chan
),
2771 lib_ring_buffer_ctx_init(&ctx
, chan
, reserve_len
,
2772 sizeof(char), NULL
);
2774 * If reservation failed, return an error to the caller.
2776 ret
= stream
->transport
->ops
.event_reserve(&ctx
);
2778 printk(KERN_WARNING
"LTTng: Metadata event reservation failed\n");
2779 stream
->coherent
= false;
2782 stream
->transport
->ops
.event_write(&ctx
,
2783 stream
->metadata_cache
->data
+ stream
->metadata_in
,
2785 stream
->transport
->ops
.event_commit(&ctx
);
2786 stream
->metadata_in
+= reserve_len
;
2787 if (reserve_len
< len
)
2788 stream
->coherent
= false;
2790 stream
->coherent
= true;
2795 *coherent
= stream
->coherent
;
2796 mutex_unlock(&stream
->metadata_cache
->lock
);
2801 void lttng_metadata_begin(struct lttng_kernel_session
*session
)
2803 if (atomic_inc_return(&session
->priv
->metadata_cache
->producing
) == 1)
2804 mutex_lock(&session
->priv
->metadata_cache
->lock
);
2808 void lttng_metadata_end(struct lttng_kernel_session
*session
)
2810 WARN_ON_ONCE(!atomic_read(&session
->priv
->metadata_cache
->producing
));
2811 if (atomic_dec_return(&session
->priv
->metadata_cache
->producing
) == 0) {
2812 struct lttng_metadata_stream
*stream
;
2814 list_for_each_entry(stream
, &session
->priv
->metadata_cache
->metadata_stream
, list
)
2815 wake_up_interruptible(&stream
->read_wait
);
2816 mutex_unlock(&session
->priv
->metadata_cache
->lock
);
2821 * Write the metadata to the metadata cache.
2822 * Must be called with sessions_mutex held.
2823 * The metadata cache lock protects us from concurrent read access from
2824 * thread outputting metadata content to ring buffer.
2825 * The content of the printf is printed as a single atomic metadata
2828 int lttng_metadata_printf(struct lttng_kernel_session
*session
,
2829 const char *fmt
, ...)
2835 WARN_ON_ONCE(!LTTNG_READ_ONCE(session
->active
));
2838 str
= kvasprintf(GFP_KERNEL
, fmt
, ap
);
2844 WARN_ON_ONCE(!atomic_read(&session
->priv
->metadata_cache
->producing
));
2845 if (session
->priv
->metadata_cache
->metadata_written
+ len
>
2846 session
->priv
->metadata_cache
->cache_alloc
) {
2847 char *tmp_cache_realloc
;
2848 unsigned int tmp_cache_alloc_size
;
2850 tmp_cache_alloc_size
= max_t(unsigned int,
2851 session
->priv
->metadata_cache
->cache_alloc
+ len
,
2852 session
->priv
->metadata_cache
->cache_alloc
<< 1);
2853 tmp_cache_realloc
= vzalloc(tmp_cache_alloc_size
);
2854 if (!tmp_cache_realloc
)
2856 if (session
->priv
->metadata_cache
->data
) {
2857 memcpy(tmp_cache_realloc
,
2858 session
->priv
->metadata_cache
->data
,
2859 session
->priv
->metadata_cache
->cache_alloc
);
2860 vfree(session
->priv
->metadata_cache
->data
);
2863 session
->priv
->metadata_cache
->cache_alloc
= tmp_cache_alloc_size
;
2864 session
->priv
->metadata_cache
->data
= tmp_cache_realloc
;
2866 memcpy(session
->priv
->metadata_cache
->data
+
2867 session
->priv
->metadata_cache
->metadata_written
,
2869 session
->priv
->metadata_cache
->metadata_written
+= len
;
2880 int print_tabs(struct lttng_kernel_session
*session
, size_t nesting
)
2884 for (i
= 0; i
< nesting
; i
++) {
2887 ret
= lttng_metadata_printf(session
, " ");
2896 int lttng_field_name_statedump(struct lttng_kernel_session
*session
,
2897 const struct lttng_kernel_event_field
*field
,
2900 return lttng_metadata_printf(session
, " _%s;\n", field
->name
);
2904 int _lttng_integer_type_statedump(struct lttng_kernel_session
*session
,
2905 const struct lttng_kernel_type_integer
*type
,
2906 enum lttng_kernel_string_encoding parent_encoding
,
2911 ret
= print_tabs(session
, nesting
);
2914 ret
= lttng_metadata_printf(session
,
2915 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s }",
2919 (parent_encoding
== lttng_kernel_string_encoding_none
)
2921 : (parent_encoding
== lttng_kernel_string_encoding_UTF8
)
2925 #if __BYTE_ORDER == __BIG_ENDIAN
2926 type
->reverse_byte_order
? " byte_order = le;" : ""
2928 type
->reverse_byte_order
? " byte_order = be;" : ""
2935 * Must be called with sessions_mutex held.
2938 int _lttng_struct_type_statedump(struct lttng_kernel_session
*session
,
2939 const struct lttng_kernel_type_struct
*type
,
2942 const char *prev_field_name
= NULL
;
2944 uint32_t i
, nr_fields
;
2945 unsigned int alignment
;
2947 ret
= print_tabs(session
, nesting
);
2950 ret
= lttng_metadata_printf(session
,
2954 nr_fields
= type
->nr_fields
;
2955 for (i
= 0; i
< nr_fields
; i
++) {
2956 const struct lttng_kernel_event_field
*iter_field
;
2958 iter_field
= type
->fields
[i
];
2959 ret
= _lttng_field_statedump(session
, iter_field
, nesting
+ 1, &prev_field_name
);
2963 ret
= print_tabs(session
, nesting
);
2966 alignment
= type
->alignment
;
2968 ret
= lttng_metadata_printf(session
,
2972 ret
= lttng_metadata_printf(session
,
2979 * Must be called with sessions_mutex held.
2982 int _lttng_struct_field_statedump(struct lttng_kernel_session
*session
,
2983 const struct lttng_kernel_event_field
*field
,
2988 ret
= _lttng_struct_type_statedump(session
,
2989 lttng_kernel_get_type_struct(field
->type
), nesting
);
2992 return lttng_field_name_statedump(session
, field
, nesting
);
2996 * Must be called with sessions_mutex held.
2999 int _lttng_variant_type_statedump(struct lttng_kernel_session
*session
,
3000 const struct lttng_kernel_type_variant
*type
,
3002 const char *prev_field_name
)
3004 const char *tag_name
;
3006 uint32_t i
, nr_choices
;
3008 tag_name
= type
->tag_name
;
3010 tag_name
= prev_field_name
;
3014 * CTF 1.8 does not allow expressing nonzero variant alignment in a nestable way.
3016 if (type
->alignment
!= 0)
3018 ret
= print_tabs(session
, nesting
);
3021 ret
= lttng_metadata_printf(session
,
3022 "variant <_%s> {\n",
3026 nr_choices
= type
->nr_choices
;
3027 for (i
= 0; i
< nr_choices
; i
++) {
3028 const struct lttng_kernel_event_field
*iter_field
;
3030 iter_field
= type
->choices
[i
];
3031 ret
= _lttng_field_statedump(session
, iter_field
, nesting
+ 1, NULL
);
3035 ret
= print_tabs(session
, nesting
);
3038 ret
= lttng_metadata_printf(session
,
3044 * Must be called with sessions_mutex held.
3047 int _lttng_variant_field_statedump(struct lttng_kernel_session
*session
,
3048 const struct lttng_kernel_event_field
*field
,
3050 const char *prev_field_name
)
3054 ret
= _lttng_variant_type_statedump(session
,
3055 lttng_kernel_get_type_variant(field
->type
), nesting
,
3059 return lttng_field_name_statedump(session
, field
, nesting
);
3063 * Must be called with sessions_mutex held.
3066 int _lttng_array_field_statedump(struct lttng_kernel_session
*session
,
3067 const struct lttng_kernel_event_field
*field
,
3071 const struct lttng_kernel_type_array
*array_type
;
3072 const struct lttng_kernel_type_common
*elem_type
;
3074 array_type
= lttng_kernel_get_type_array(field
->type
);
3075 WARN_ON_ONCE(!array_type
);
3077 if (array_type
->alignment
) {
3078 ret
= print_tabs(session
, nesting
);
3081 ret
= lttng_metadata_printf(session
,
3082 "struct { } align(%u) _%s_padding;\n",
3083 array_type
->alignment
* CHAR_BIT
,
3089 * Nested compound types: Only array of structures and variants are
3090 * currently supported.
3092 elem_type
= array_type
->elem_type
;
3093 switch (elem_type
->type
) {
3094 case lttng_kernel_type_integer
:
3095 case lttng_kernel_type_struct
:
3096 case lttng_kernel_type_variant
:
3097 ret
= _lttng_type_statedump(session
, elem_type
,
3098 array_type
->encoding
, nesting
);
3106 ret
= lttng_metadata_printf(session
,
3109 array_type
->length
);
3114 * Must be called with sessions_mutex held.
3117 int _lttng_sequence_field_statedump(struct lttng_kernel_session
*session
,
3118 const struct lttng_kernel_event_field
*field
,
3120 const char *prev_field_name
)
3123 const char *length_name
;
3124 const struct lttng_kernel_type_sequence
*sequence_type
;
3125 const struct lttng_kernel_type_common
*elem_type
;
3127 sequence_type
= lttng_kernel_get_type_sequence(field
->type
);
3128 WARN_ON_ONCE(!sequence_type
);
3130 length_name
= sequence_type
->length_name
;
3132 length_name
= prev_field_name
;
3136 if (sequence_type
->alignment
) {
3137 ret
= print_tabs(session
, nesting
);
3140 ret
= lttng_metadata_printf(session
,
3141 "struct { } align(%u) _%s_padding;\n",
3142 sequence_type
->alignment
* CHAR_BIT
,
3149 * Nested compound types: Only array of structures and variants are
3150 * currently supported.
3152 elem_type
= sequence_type
->elem_type
;
3153 switch (elem_type
->type
) {
3154 case lttng_kernel_type_integer
:
3155 case lttng_kernel_type_struct
:
3156 case lttng_kernel_type_variant
:
3157 ret
= _lttng_type_statedump(session
, elem_type
,
3158 sequence_type
->encoding
, nesting
);
3166 ret
= lttng_metadata_printf(session
,
3174 * Must be called with sessions_mutex held.
3177 int _lttng_enum_type_statedump(struct lttng_kernel_session
*session
,
3178 const struct lttng_kernel_type_enum
*type
,
3181 const struct lttng_kernel_enum_desc
*enum_desc
;
3182 const struct lttng_kernel_type_common
*container_type
;
3184 unsigned int i
, nr_entries
;
3186 container_type
= type
->container_type
;
3187 if (container_type
->type
!= lttng_kernel_type_integer
) {
3191 enum_desc
= type
->desc
;
3192 nr_entries
= enum_desc
->nr_entries
;
3194 ret
= print_tabs(session
, nesting
);
3197 ret
= lttng_metadata_printf(session
, "enum : ");
3200 ret
= _lttng_integer_type_statedump(session
, lttng_kernel_get_type_integer(container_type
),
3201 lttng_kernel_string_encoding_none
, 0);
3204 ret
= lttng_metadata_printf(session
, " {\n");
3207 /* Dump all entries */
3208 for (i
= 0; i
< nr_entries
; i
++) {
3209 const struct lttng_kernel_enum_entry
*entry
= enum_desc
->entries
[i
];
3212 ret
= print_tabs(session
, nesting
+ 1);
3215 ret
= lttng_metadata_printf(session
,
3219 len
= strlen(entry
->string
);
3220 /* Escape the character '"' */
3221 for (j
= 0; j
< len
; j
++) {
3222 char c
= entry
->string
[j
];
3226 ret
= lttng_metadata_printf(session
,
3230 ret
= lttng_metadata_printf(session
,
3234 ret
= lttng_metadata_printf(session
,
3241 ret
= lttng_metadata_printf(session
, "\"");
3245 if (entry
->options
.is_auto
) {
3246 ret
= lttng_metadata_printf(session
, ",\n");
3250 ret
= lttng_metadata_printf(session
,
3254 if (entry
->start
.signedness
)
3255 ret
= lttng_metadata_printf(session
,
3256 "%lld", (long long) entry
->start
.value
);
3258 ret
= lttng_metadata_printf(session
,
3259 "%llu", entry
->start
.value
);
3262 if (entry
->start
.signedness
== entry
->end
.signedness
&&
3264 == entry
->end
.value
) {
3265 ret
= lttng_metadata_printf(session
,
3268 if (entry
->end
.signedness
) {
3269 ret
= lttng_metadata_printf(session
,
3271 (long long) entry
->end
.value
);
3273 ret
= lttng_metadata_printf(session
,
3282 ret
= print_tabs(session
, nesting
);
3285 ret
= lttng_metadata_printf(session
, "}");
3291 * Must be called with sessions_mutex held.
3294 int _lttng_enum_field_statedump(struct lttng_kernel_session
*session
,
3295 const struct lttng_kernel_event_field
*field
,
3299 const struct lttng_kernel_type_enum
*enum_type
;
3301 enum_type
= lttng_kernel_get_type_enum(field
->type
);
3302 WARN_ON_ONCE(!enum_type
);
3303 ret
= _lttng_enum_type_statedump(session
, enum_type
, nesting
);
3306 return lttng_field_name_statedump(session
, field
, nesting
);
3310 int _lttng_integer_field_statedump(struct lttng_kernel_session
*session
,
3311 const struct lttng_kernel_event_field
*field
,
3316 ret
= _lttng_integer_type_statedump(session
, lttng_kernel_get_type_integer(field
->type
),
3317 lttng_kernel_string_encoding_none
, nesting
);
3320 return lttng_field_name_statedump(session
, field
, nesting
);
3324 int _lttng_string_type_statedump(struct lttng_kernel_session
*session
,
3325 const struct lttng_kernel_type_string
*type
,
3330 /* Default encoding is UTF8 */
3331 ret
= print_tabs(session
, nesting
);
3334 ret
= lttng_metadata_printf(session
,
3336 type
->encoding
== lttng_kernel_string_encoding_ASCII
?
3337 " { encoding = ASCII; }" : "");
3342 int _lttng_string_field_statedump(struct lttng_kernel_session
*session
,
3343 const struct lttng_kernel_event_field
*field
,
3346 const struct lttng_kernel_type_string
*string_type
;
3349 string_type
= lttng_kernel_get_type_string(field
->type
);
3350 WARN_ON_ONCE(!string_type
);
3351 ret
= _lttng_string_type_statedump(session
, string_type
, nesting
);
3354 return lttng_field_name_statedump(session
, field
, nesting
);
3358 * Must be called with sessions_mutex held.
3361 int _lttng_type_statedump(struct lttng_kernel_session
*session
,
3362 const struct lttng_kernel_type_common
*type
,
3363 enum lttng_kernel_string_encoding parent_encoding
,
3368 switch (type
->type
) {
3369 case lttng_kernel_type_integer
:
3370 ret
= _lttng_integer_type_statedump(session
,
3371 lttng_kernel_get_type_integer(type
),
3372 parent_encoding
, nesting
);
3374 case lttng_kernel_type_enum
:
3375 ret
= _lttng_enum_type_statedump(session
,
3376 lttng_kernel_get_type_enum(type
),
3379 case lttng_kernel_type_string
:
3380 ret
= _lttng_string_type_statedump(session
,
3381 lttng_kernel_get_type_string(type
),
3384 case lttng_kernel_type_struct
:
3385 ret
= _lttng_struct_type_statedump(session
,
3386 lttng_kernel_get_type_struct(type
),
3389 case lttng_kernel_type_variant
:
3390 ret
= _lttng_variant_type_statedump(session
,
3391 lttng_kernel_get_type_variant(type
),
3395 /* Nested arrays and sequences are not supported yet. */
3396 case lttng_kernel_type_array
:
3397 case lttng_kernel_type_sequence
:
3406 * Must be called with sessions_mutex held.
3409 int _lttng_field_statedump(struct lttng_kernel_session
*session
,
3410 const struct lttng_kernel_event_field
*field
,
3412 const char **prev_field_name_p
)
3414 const char *prev_field_name
= NULL
;
3417 if (prev_field_name_p
)
3418 prev_field_name
= *prev_field_name_p
;
3419 switch (field
->type
->type
) {
3420 case lttng_kernel_type_integer
:
3421 ret
= _lttng_integer_field_statedump(session
, field
, nesting
);
3423 case lttng_kernel_type_enum
:
3424 ret
= _lttng_enum_field_statedump(session
, field
, nesting
);
3426 case lttng_kernel_type_string
:
3427 ret
= _lttng_string_field_statedump(session
, field
, nesting
);
3429 case lttng_kernel_type_struct
:
3430 ret
= _lttng_struct_field_statedump(session
, field
, nesting
);
3432 case lttng_kernel_type_array
:
3433 ret
= _lttng_array_field_statedump(session
, field
, nesting
);
3435 case lttng_kernel_type_sequence
:
3436 ret
= _lttng_sequence_field_statedump(session
, field
, nesting
, prev_field_name
);
3438 case lttng_kernel_type_variant
:
3439 ret
= _lttng_variant_field_statedump(session
, field
, nesting
, prev_field_name
);
3446 if (prev_field_name_p
)
3447 *prev_field_name_p
= field
->name
;
3452 int _lttng_context_metadata_statedump(struct lttng_kernel_session
*session
,
3453 struct lttng_kernel_ctx
*ctx
)
3455 const char *prev_field_name
= NULL
;
3461 for (i
= 0; i
< ctx
->nr_fields
; i
++) {
3462 const struct lttng_kernel_ctx_field
*field
= &ctx
->fields
[i
];
3464 ret
= _lttng_field_statedump(session
, field
->event_field
, 2, &prev_field_name
);
3472 int _lttng_fields_metadata_statedump(struct lttng_kernel_session
*session
,
3473 struct lttng_kernel_event_recorder
*event_recorder
)
3475 const char *prev_field_name
= NULL
;
3476 const struct lttng_kernel_event_desc
*desc
= event_recorder
->priv
->parent
.desc
;
3480 for (i
= 0; i
< desc
->tp_class
->nr_fields
; i
++) {
3481 const struct lttng_kernel_event_field
*field
= desc
->tp_class
->fields
[i
];
3483 ret
= _lttng_field_statedump(session
, field
, 2, &prev_field_name
);
3491 * Must be called with sessions_mutex held.
3492 * The entire event metadata is printed as a single atomic metadata
3496 int _lttng_event_recorder_metadata_statedump(struct lttng_kernel_event_common
*event
)
3498 struct lttng_kernel_event_recorder
*event_recorder
;
3499 struct lttng_kernel_channel_buffer
*chan
;
3500 struct lttng_kernel_session
*session
;
3503 if (event
->type
!= LTTNG_KERNEL_EVENT_TYPE_RECORDER
)
3505 event_recorder
= container_of(event
, struct lttng_kernel_event_recorder
, parent
);
3506 chan
= event_recorder
->chan
;
3507 session
= chan
->parent
.session
;
3509 if (event_recorder
->priv
->metadata_dumped
|| !LTTNG_READ_ONCE(session
->active
))
3511 if (chan
->priv
->channel_type
== METADATA_CHANNEL
)
3514 lttng_metadata_begin(session
);
3516 ret
= lttng_metadata_printf(session
,
3520 " stream_id = %u;\n",
3521 event_recorder
->priv
->parent
.desc
->event_name
,
3522 event_recorder
->priv
->id
,
3523 event_recorder
->chan
->priv
->id
);
3527 ret
= lttng_metadata_printf(session
,
3528 " fields := struct {\n"
3533 ret
= _lttng_fields_metadata_statedump(session
, event_recorder
);
3538 * LTTng space reservation can only reserve multiples of the
3541 ret
= lttng_metadata_printf(session
,
3547 event_recorder
->priv
->metadata_dumped
= 1;
3549 lttng_metadata_end(session
);
3555 * Must be called with sessions_mutex held.
3556 * The entire channel metadata is printed as a single atomic metadata
3560 int _lttng_channel_metadata_statedump(struct lttng_kernel_session
*session
,
3561 struct lttng_kernel_channel_buffer
*chan
)
3565 if (chan
->priv
->metadata_dumped
|| !LTTNG_READ_ONCE(session
->active
))
3568 if (chan
->priv
->channel_type
== METADATA_CHANNEL
)
3571 lttng_metadata_begin(session
);
3573 WARN_ON_ONCE(!chan
->priv
->header_type
);
3574 ret
= lttng_metadata_printf(session
,
3577 " event.header := %s;\n"
3578 " packet.context := struct packet_context;\n",
3580 chan
->priv
->header_type
== 1 ? "struct event_header_compact" :
3581 "struct event_header_large");
3585 if (chan
->priv
->ctx
) {
3586 ret
= lttng_metadata_printf(session
,
3587 " event.context := struct {\n");
3591 ret
= _lttng_context_metadata_statedump(session
, chan
->priv
->ctx
);
3594 if (chan
->priv
->ctx
) {
3595 ret
= lttng_metadata_printf(session
,
3601 ret
= lttng_metadata_printf(session
,
3604 chan
->priv
->metadata_dumped
= 1;
3606 lttng_metadata_end(session
);
3611 * Must be called with sessions_mutex held.
3614 int _lttng_stream_packet_context_declare(struct lttng_kernel_session
*session
)
3616 return lttng_metadata_printf(session
,
3617 "struct packet_context {\n"
3618 " uint64_clock_monotonic_t timestamp_begin;\n"
3619 " uint64_clock_monotonic_t timestamp_end;\n"
3620 " uint64_t content_size;\n"
3621 " uint64_t packet_size;\n"
3622 " uint64_t packet_seq_num;\n"
3623 " unsigned long events_discarded;\n"
3624 " uint32_t cpu_id;\n"
3631 * id: range: 0 - 30.
3632 * id 31 is reserved to indicate an extended header.
3635 * id: range: 0 - 65534.
3636 * id 65535 is reserved to indicate an extended header.
3638 * Must be called with sessions_mutex held.
3641 int _lttng_event_header_declare(struct lttng_kernel_session
*session
)
3643 return lttng_metadata_printf(session
,
3644 "struct event_header_compact {\n"
3645 " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
3648 " uint27_clock_monotonic_t timestamp;\n"
3652 " uint64_clock_monotonic_t timestamp;\n"
3657 "struct event_header_large {\n"
3658 " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
3661 " uint32_clock_monotonic_t timestamp;\n"
3665 " uint64_clock_monotonic_t timestamp;\n"
3669 lttng_alignof(uint32_t) * CHAR_BIT
,
3670 lttng_alignof(uint16_t) * CHAR_BIT
3675 * Approximation of NTP time of day to clock monotonic correlation,
3676 * taken at start of trace.
3677 * Yes, this is only an approximation. Yes, we can (and will) do better
3678 * in future versions.
3679 * This function may return a negative offset. It may happen if the
3680 * system sets the REALTIME clock to 0 after boot.
3682 * Use 64bit timespec on kernels that have it, this makes 32bit arch
3686 int64_t measure_clock_offset(void)
3688 uint64_t monotonic_avg
, monotonic
[2], realtime
;
3689 uint64_t tcf
= trace_clock_freq();
3691 unsigned long flags
;
3692 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3693 struct timespec64 rts
= { 0, 0 };
3695 struct timespec rts
= { 0, 0 };
3698 /* Disable interrupts to increase correlation precision. */
3699 local_irq_save(flags
);
3700 monotonic
[0] = trace_clock_read64();
3701 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3702 ktime_get_real_ts64(&rts
);
3704 getnstimeofday(&rts
);
3706 monotonic
[1] = trace_clock_read64();
3707 local_irq_restore(flags
);
3709 monotonic_avg
= (monotonic
[0] + monotonic
[1]) >> 1;
3710 realtime
= (uint64_t) rts
.tv_sec
* tcf
;
3711 if (tcf
== NSEC_PER_SEC
) {
3712 realtime
+= rts
.tv_nsec
;
3714 uint64_t n
= rts
.tv_nsec
* tcf
;
3716 do_div(n
, NSEC_PER_SEC
);
3719 offset
= (int64_t) realtime
- monotonic_avg
;
3724 int print_escaped_ctf_string(struct lttng_kernel_session
*session
, const char *string
)
3732 while (cur
!= '\0') {
3735 ret
= lttng_metadata_printf(session
, "%s", "\\n");
3739 ret
= lttng_metadata_printf(session
, "%c", '\\');
3742 /* We still print the current char */
3745 ret
= lttng_metadata_printf(session
, "%c", cur
);
3759 int print_metadata_escaped_field(struct lttng_kernel_session
*session
, const char *field
,
3760 const char *field_value
)
3764 ret
= lttng_metadata_printf(session
, " %s = \"", field
);
3768 ret
= print_escaped_ctf_string(session
, field_value
);
3772 ret
= lttng_metadata_printf(session
, "\";\n");
3779 * Output metadata into this session's metadata buffers.
3780 * Must be called with sessions_mutex held.
3783 int _lttng_session_metadata_statedump(struct lttng_kernel_session
*session
)
3785 unsigned char *uuid_c
= session
->priv
->uuid
.b
;
3786 unsigned char uuid_s
[37], clock_uuid_s
[BOOT_ID_LEN
];
3787 const char *product_uuid
;
3788 struct lttng_kernel_channel_buffer_private
*chan_priv
;
3789 struct lttng_kernel_event_recorder_private
*event_recorder_priv
;
3792 if (!LTTNG_READ_ONCE(session
->active
))
3795 lttng_metadata_begin(session
);
3797 if (session
->priv
->metadata_dumped
)
3800 snprintf(uuid_s
, sizeof(uuid_s
),
3801 "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
3802 uuid_c
[0], uuid_c
[1], uuid_c
[2], uuid_c
[3],
3803 uuid_c
[4], uuid_c
[5], uuid_c
[6], uuid_c
[7],
3804 uuid_c
[8], uuid_c
[9], uuid_c
[10], uuid_c
[11],
3805 uuid_c
[12], uuid_c
[13], uuid_c
[14], uuid_c
[15]);
3807 ret
= lttng_metadata_printf(session
,
3808 "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
3809 "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
3810 "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
3811 "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
3812 "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
3813 "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
3814 "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
3820 " byte_order = %s;\n"
3821 " packet.header := struct {\n"
3822 " uint32_t magic;\n"
3823 " uint8_t uuid[16];\n"
3824 " uint32_t stream_id;\n"
3825 " uint64_t stream_instance_id;\n"
3828 lttng_alignof(uint8_t) * CHAR_BIT
,
3829 lttng_alignof(uint16_t) * CHAR_BIT
,
3830 lttng_alignof(uint32_t) * CHAR_BIT
,
3831 lttng_alignof(uint64_t) * CHAR_BIT
,
3832 sizeof(unsigned long) * CHAR_BIT
,
3833 lttng_alignof(unsigned long) * CHAR_BIT
,
3837 #if __BYTE_ORDER == __BIG_ENDIAN
3846 ret
= lttng_metadata_printf(session
,
3848 " hostname = \"%s\";\n"
3849 " domain = \"kernel\";\n"
3850 " sysname = \"%s\";\n"
3851 " kernel_release = \"%s\";\n"
3852 " kernel_version = \"%s\";\n"
3853 " tracer_name = \"lttng-modules\";\n"
3854 " tracer_major = %d;\n"
3855 " tracer_minor = %d;\n"
3856 " tracer_patchlevel = %d;\n"
3857 " trace_buffering_scheme = \"global\";\n",
3858 current
->nsproxy
->uts_ns
->name
.nodename
,
3862 LTTNG_MODULES_MAJOR_VERSION
,
3863 LTTNG_MODULES_MINOR_VERSION
,
3864 LTTNG_MODULES_PATCHLEVEL_VERSION
3869 ret
= print_metadata_escaped_field(session
, "trace_name", session
->priv
->name
);
3872 ret
= print_metadata_escaped_field(session
, "trace_creation_datetime",
3873 session
->priv
->creation_time
);
3877 /* Add the product UUID to the 'env' section */
3878 product_uuid
= dmi_get_system_info(DMI_PRODUCT_UUID
);
3880 ret
= lttng_metadata_printf(session
,
3881 " product_uuid = \"%s\";\n",
3888 /* Close the 'env' section */
3889 ret
= lttng_metadata_printf(session
, "};\n\n");
3893 ret
= lttng_metadata_printf(session
,
3895 " name = \"%s\";\n",
3901 if (!trace_clock_uuid(clock_uuid_s
)) {
3902 ret
= lttng_metadata_printf(session
,
3903 " uuid = \"%s\";\n",
3910 ret
= lttng_metadata_printf(session
,
3911 " description = \"%s\";\n"
3912 " freq = %llu; /* Frequency, in Hz */\n"
3913 " /* clock value offset from Epoch is: offset * (1/freq) */\n"
3916 trace_clock_description(),
3917 (unsigned long long) trace_clock_freq(),
3918 (long long) measure_clock_offset()
3923 ret
= lttng_metadata_printf(session
,
3924 "typealias integer {\n"
3925 " size = 27; align = 1; signed = false;\n"
3926 " map = clock.%s.value;\n"
3927 "} := uint27_clock_monotonic_t;\n"
3929 "typealias integer {\n"
3930 " size = 32; align = %u; signed = false;\n"
3931 " map = clock.%s.value;\n"
3932 "} := uint32_clock_monotonic_t;\n"
3934 "typealias integer {\n"
3935 " size = 64; align = %u; signed = false;\n"
3936 " map = clock.%s.value;\n"
3937 "} := uint64_clock_monotonic_t;\n\n",
3939 lttng_alignof(uint32_t) * CHAR_BIT
,
3941 lttng_alignof(uint64_t) * CHAR_BIT
,
3947 ret
= _lttng_stream_packet_context_declare(session
);
3951 ret
= _lttng_event_header_declare(session
);
3956 list_for_each_entry(chan_priv
, &session
->priv
->chan
, node
) {
3957 ret
= _lttng_channel_metadata_statedump(session
, chan_priv
->pub
);
3962 list_for_each_entry(event_recorder_priv
, &session
->priv
->events
, parent
.node
) {
3963 ret
= _lttng_event_recorder_metadata_statedump(&event_recorder_priv
->pub
->parent
);
3967 session
->priv
->metadata_dumped
= 1;
3969 lttng_metadata_end(session
);
3974 * lttng_transport_register - LTT transport registration
3975 * @transport: transport structure
3977 * Registers a transport which can be used as output to extract the data out of
3978 * LTTng. The module calling this registration function must ensure that no
3979 * trap-inducing code will be executed by the transport functions. E.g.
3980 * vmalloc_sync_mappings() must be called between a vmalloc and the moment the memory
3981 * is made visible to the transport function. This registration acts as a
3982 * vmalloc_sync_mappings. Therefore, only if the module allocates virtual memory
3983 * after its registration must it synchronize the TLBs.
3985 void lttng_transport_register(struct lttng_transport
*transport
)
3988 * Make sure no page fault can be triggered by the module about to be
3989 * registered. We deal with this here so we don't have to call
3990 * vmalloc_sync_mappings() in each module's init.
3992 wrapper_vmalloc_sync_mappings();
3994 mutex_lock(&sessions_mutex
);
3995 list_add_tail(&transport
->node
, <tng_transport_list
);
3996 mutex_unlock(&sessions_mutex
);
3998 EXPORT_SYMBOL_GPL(lttng_transport_register
);
4001 * lttng_transport_unregister - LTT transport unregistration
4002 * @transport: transport structure
4004 void lttng_transport_unregister(struct lttng_transport
*transport
)
4006 mutex_lock(&sessions_mutex
);
4007 list_del(&transport
->node
);
4008 mutex_unlock(&sessions_mutex
);
4010 EXPORT_SYMBOL_GPL(lttng_transport_unregister
);
4012 void lttng_counter_transport_register(struct lttng_counter_transport
*transport
)
4015 * Make sure no page fault can be triggered by the module about to be
4016 * registered. We deal with this here so we don't have to call
4017 * vmalloc_sync_mappings() in each module's init.
4019 wrapper_vmalloc_sync_mappings();
4021 mutex_lock(&sessions_mutex
);
4022 list_add_tail(&transport
->node
, <tng_counter_transport_list
);
4023 mutex_unlock(&sessions_mutex
);
4025 EXPORT_SYMBOL_GPL(lttng_counter_transport_register
);
4027 void lttng_counter_transport_unregister(struct lttng_counter_transport
*transport
)
4029 mutex_lock(&sessions_mutex
);
4030 list_del(&transport
->node
);
4031 mutex_unlock(&sessions_mutex
);
4033 EXPORT_SYMBOL_GPL(lttng_counter_transport_unregister
);
4035 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
4037 enum cpuhp_state lttng_hp_prepare
;
4038 enum cpuhp_state lttng_hp_online
;
4040 static int lttng_hotplug_prepare(unsigned int cpu
, struct hlist_node
*node
)
4042 struct lttng_cpuhp_node
*lttng_node
;
4044 lttng_node
= container_of(node
, struct lttng_cpuhp_node
, node
);
4045 switch (lttng_node
->component
) {
4046 case LTTNG_RING_BUFFER_FRONTEND
:
4048 case LTTNG_RING_BUFFER_BACKEND
:
4049 return lttng_cpuhp_rb_backend_prepare(cpu
, lttng_node
);
4050 case LTTNG_RING_BUFFER_ITER
:
4052 case LTTNG_CONTEXT_PERF_COUNTERS
:
4059 static int lttng_hotplug_dead(unsigned int cpu
, struct hlist_node
*node
)
4061 struct lttng_cpuhp_node
*lttng_node
;
4063 lttng_node
= container_of(node
, struct lttng_cpuhp_node
, node
);
4064 switch (lttng_node
->component
) {
4065 case LTTNG_RING_BUFFER_FRONTEND
:
4066 return lttng_cpuhp_rb_frontend_dead(cpu
, lttng_node
);
4067 case LTTNG_RING_BUFFER_BACKEND
:
4069 case LTTNG_RING_BUFFER_ITER
:
4071 case LTTNG_CONTEXT_PERF_COUNTERS
:
4072 return lttng_cpuhp_perf_counter_dead(cpu
, lttng_node
);
4078 static int lttng_hotplug_online(unsigned int cpu
, struct hlist_node
*node
)
4080 struct lttng_cpuhp_node
*lttng_node
;
4082 lttng_node
= container_of(node
, struct lttng_cpuhp_node
, node
);
4083 switch (lttng_node
->component
) {
4084 case LTTNG_RING_BUFFER_FRONTEND
:
4085 return lttng_cpuhp_rb_frontend_online(cpu
, lttng_node
);
4086 case LTTNG_RING_BUFFER_BACKEND
:
4088 case LTTNG_RING_BUFFER_ITER
:
4089 return lttng_cpuhp_rb_iter_online(cpu
, lttng_node
);
4090 case LTTNG_CONTEXT_PERF_COUNTERS
:
4091 return lttng_cpuhp_perf_counter_online(cpu
, lttng_node
);
4097 static int lttng_hotplug_offline(unsigned int cpu
, struct hlist_node
*node
)
4099 struct lttng_cpuhp_node
*lttng_node
;
4101 lttng_node
= container_of(node
, struct lttng_cpuhp_node
, node
);
4102 switch (lttng_node
->component
) {
4103 case LTTNG_RING_BUFFER_FRONTEND
:
4104 return lttng_cpuhp_rb_frontend_offline(cpu
, lttng_node
);
4105 case LTTNG_RING_BUFFER_BACKEND
:
4107 case LTTNG_RING_BUFFER_ITER
:
4109 case LTTNG_CONTEXT_PERF_COUNTERS
:
4116 static int __init
lttng_init_cpu_hotplug(void)
4120 ret
= cpuhp_setup_state_multi(CPUHP_BP_PREPARE_DYN
, "lttng:prepare",
4121 lttng_hotplug_prepare
,
4122 lttng_hotplug_dead
);
4126 lttng_hp_prepare
= ret
;
4127 lttng_rb_set_hp_prepare(ret
);
4129 ret
= cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN
, "lttng:online",
4130 lttng_hotplug_online
,
4131 lttng_hotplug_offline
);
4133 cpuhp_remove_multi_state(lttng_hp_prepare
);
4134 lttng_hp_prepare
= 0;
4137 lttng_hp_online
= ret
;
4138 lttng_rb_set_hp_online(ret
);
4143 static void __exit
lttng_exit_cpu_hotplug(void)
4145 lttng_rb_set_hp_online(0);
4146 cpuhp_remove_multi_state(lttng_hp_online
);
4147 lttng_rb_set_hp_prepare(0);
4148 cpuhp_remove_multi_state(lttng_hp_prepare
);
4151 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
4152 static int lttng_init_cpu_hotplug(void)
4156 static void lttng_exit_cpu_hotplug(void)
4159 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
4161 static int __init
lttng_events_init(void)
4165 ret
= wrapper_lttng_fixup_sig(THIS_MODULE
);
4168 ret
= wrapper_get_pfnblock_flags_mask_init();
4171 ret
= wrapper_get_pageblock_flags_mask_init();
4174 ret
= lttng_probes_init();
4177 ret
= lttng_context_init();
4180 ret
= lttng_tracepoint_init();
4183 event_recorder_cache
= KMEM_CACHE(lttng_kernel_event_recorder
, 0);
4184 if (!event_recorder_cache
) {
4186 goto error_kmem_event_recorder
;
4188 event_recorder_private_cache
= KMEM_CACHE(lttng_kernel_event_recorder_private
, 0);
4189 if (!event_recorder_private_cache
) {
4191 goto error_kmem_event_recorder_private
;
4193 event_notifier_cache
= KMEM_CACHE(lttng_kernel_event_notifier
, 0);
4194 if (!event_notifier_cache
) {
4196 goto error_kmem_event_notifier
;
4198 event_notifier_private_cache
= KMEM_CACHE(lttng_kernel_event_notifier_private
, 0);
4199 if (!event_notifier_private_cache
) {
4201 goto error_kmem_event_notifier_private
;
4203 ret
= lttng_abi_init();
4206 ret
= lttng_logger_init();
4209 ret
= lttng_init_cpu_hotplug();
4212 printk(KERN_NOTICE
"LTTng: Loaded modules v%s.%s.%s%s (%s)%s%s\n",
4213 __stringify(LTTNG_MODULES_MAJOR_VERSION
),
4214 __stringify(LTTNG_MODULES_MINOR_VERSION
),
4215 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION
),
4216 LTTNG_MODULES_EXTRAVERSION
,
4218 #ifdef LTTNG_EXTRA_VERSION_GIT
4219 LTTNG_EXTRA_VERSION_GIT
[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT
,
4223 #ifdef LTTNG_EXTRA_VERSION_NAME
4224 LTTNG_EXTRA_VERSION_NAME
[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME
);
4228 #ifdef CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM
4229 printk(KERN_NOTICE
"LTTng: Experimental bitwise enum enabled.\n");
4230 #endif /* CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM */
4234 lttng_logger_exit();
4238 kmem_cache_destroy(event_notifier_private_cache
);
4239 error_kmem_event_notifier_private
:
4240 kmem_cache_destroy(event_notifier_cache
);
4241 error_kmem_event_notifier
:
4242 kmem_cache_destroy(event_recorder_private_cache
);
4243 error_kmem_event_recorder_private
:
4244 kmem_cache_destroy(event_recorder_cache
);
4245 error_kmem_event_recorder
:
4246 lttng_tracepoint_exit();
4248 lttng_context_exit();
4249 printk(KERN_NOTICE
"LTTng: Failed to load modules v%s.%s.%s%s (%s)%s%s\n",
4250 __stringify(LTTNG_MODULES_MAJOR_VERSION
),
4251 __stringify(LTTNG_MODULES_MINOR_VERSION
),
4252 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION
),
4253 LTTNG_MODULES_EXTRAVERSION
,
4255 #ifdef LTTNG_EXTRA_VERSION_GIT
4256 LTTNG_EXTRA_VERSION_GIT
[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT
,
4260 #ifdef LTTNG_EXTRA_VERSION_NAME
4261 LTTNG_EXTRA_VERSION_NAME
[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME
);
4268 module_init(lttng_events_init
);
4270 static void __exit
lttng_events_exit(void)
4272 struct lttng_kernel_session_private
*session_priv
, *tmpsession_priv
;
4274 lttng_exit_cpu_hotplug();
4275 lttng_logger_exit();
4277 list_for_each_entry_safe(session_priv
, tmpsession_priv
, &sessions
, list
)
4278 lttng_session_destroy(session_priv
->pub
);
4279 kmem_cache_destroy(event_recorder_cache
);
4280 kmem_cache_destroy(event_recorder_private_cache
);
4281 kmem_cache_destroy(event_notifier_cache
);
4282 kmem_cache_destroy(event_notifier_private_cache
);
4283 lttng_tracepoint_exit();
4284 lttng_context_exit();
4285 printk(KERN_NOTICE
"LTTng: Unloaded modules v%s.%s.%s%s (%s)%s%s\n",
4286 __stringify(LTTNG_MODULES_MAJOR_VERSION
),
4287 __stringify(LTTNG_MODULES_MINOR_VERSION
),
4288 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION
),
4289 LTTNG_MODULES_EXTRAVERSION
,
4291 #ifdef LTTNG_EXTRA_VERSION_GIT
4292 LTTNG_EXTRA_VERSION_GIT
[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT
,
4296 #ifdef LTTNG_EXTRA_VERSION_NAME
4297 LTTNG_EXTRA_VERSION_NAME
[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME
);
4303 module_exit(lttng_events_exit
);
4305 #include <generated/patches.h>
4306 #ifdef LTTNG_EXTRA_VERSION_GIT
4307 MODULE_INFO(extra_version_git
, LTTNG_EXTRA_VERSION_GIT
);
4309 #ifdef LTTNG_EXTRA_VERSION_NAME
4310 MODULE_INFO(extra_version_name
, LTTNG_EXTRA_VERSION_NAME
);
4312 MODULE_LICENSE("GPL and additional rights");
4313 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
4314 MODULE_DESCRIPTION("LTTng tracer");
4315 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION
) "."
4316 __stringify(LTTNG_MODULES_MINOR_VERSION
) "."
4317 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION
)
4318 LTTNG_MODULES_EXTRAVERSION
);