1 /* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1)
5 * Linux Trace Toolkit Next Generation Kernel State Dump
7 * Copyright 2005 Jean-Hugues Deschenes <jean-hugues.deschenes@polymtl.ca>
8 * Copyright 2006-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
11 * Eric Clement: Add listing of network IP interface
12 * 2006, 2007 Mathieu Desnoyers Fix kernel threads
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/netlink.h>
19 #include <linux/inet.h>
21 #include <linux/kthread.h>
22 #include <linux/proc_fs.h>
23 #include <linux/file.h>
24 #include <linux/interrupt.h>
25 #include <linux/irqnr.h>
26 #include <linux/cpu.h>
27 #include <linux/netdevice.h>
28 #include <linux/inetdevice.h>
29 #include <linux/sched.h>
31 #include <linux/swap.h>
32 #include <linux/wait.h>
33 #include <linux/mutex.h>
34 #include <linux/device.h>
36 #include <lttng-events.h>
37 #include <lttng-tracer.h>
38 #include <wrapper/irqdesc.h>
39 #include <wrapper/spinlock.h>
40 #include <wrapper/fdtable.h>
41 #include <wrapper/irq.h>
42 #include <wrapper/tracepoint.h>
43 #include <wrapper/genhd.h>
44 #include <wrapper/file.h>
45 #include <wrapper/fdtable.h>
47 #ifdef CONFIG_LTTNG_HAS_LIST_IRQ
48 #include <linux/irq.h>
51 /* Define the tracepoints, but do not build the probes */
52 #define CREATE_TRACE_POINTS
53 #define TRACE_INCLUDE_PATH instrumentation/events/lttng-module
54 #define TRACE_INCLUDE_FILE lttng-statedump
55 #define LTTNG_INSTRUMENTATION
56 #include <instrumentation/events/lttng-module/lttng-statedump.h>
58 DEFINE_TRACE(lttng_statedump_block_device
);
59 DEFINE_TRACE(lttng_statedump_end
);
60 DEFINE_TRACE(lttng_statedump_interrupt
);
61 DEFINE_TRACE(lttng_statedump_file_descriptor
);
62 DEFINE_TRACE(lttng_statedump_start
);
63 DEFINE_TRACE(lttng_statedump_process_state
);
64 DEFINE_TRACE(lttng_statedump_network_interface
);
68 struct lttng_session
*session
;
69 struct task_struct
*p
;
70 struct files_struct
*files
;
74 * Protected by the trace lock.
76 static struct delayed_work cpu_work
[NR_CPUS
];
77 static DECLARE_WAIT_QUEUE_HEAD(statedump_wq
);
78 static atomic_t kernel_threads_to_run
;
80 enum lttng_thread_type
{
81 LTTNG_USER_THREAD
= 0,
82 LTTNG_KERNEL_THREAD
= 1,
85 enum lttng_execution_mode
{
91 LTTNG_MODE_UNKNOWN
= 5,
94 enum lttng_execution_submode
{
99 enum lttng_process_status
{
111 int lttng_enumerate_block_devices(struct lttng_session
*session
)
113 struct class *ptr_block_class
;
114 struct device_type
*ptr_disk_type
;
115 struct class_dev_iter iter
;
118 ptr_block_class
= wrapper_get_block_class();
119 if (!ptr_block_class
)
121 ptr_disk_type
= wrapper_get_disk_type();
122 if (!ptr_disk_type
) {
125 class_dev_iter_init(&iter
, ptr_block_class
, NULL
, ptr_disk_type
);
126 while ((dev
= class_dev_iter_next(&iter
))) {
127 struct disk_part_iter piter
;
128 struct gendisk
*disk
= dev_to_disk(dev
);
129 struct hd_struct
*part
;
132 * Don't show empty devices or things that have been
135 if (get_capacity(disk
) == 0 ||
136 (disk
->flags
& GENHD_FL_SUPPRESS_PARTITION_INFO
))
139 disk_part_iter_init(&piter
, disk
, DISK_PITER_INCL_PART0
);
140 while ((part
= disk_part_iter_next(&piter
))) {
141 char name_buf
[BDEVNAME_SIZE
];
144 p
= wrapper_disk_name(disk
, part
->partno
, name_buf
);
146 disk_part_iter_exit(&piter
);
147 class_dev_iter_exit(&iter
);
150 trace_lttng_statedump_block_device(session
,
151 part_devt(part
), name_buf
);
153 disk_part_iter_exit(&piter
);
155 class_dev_iter_exit(&iter
);
162 void lttng_enumerate_device(struct lttng_session
*session
,
163 struct net_device
*dev
)
165 struct in_device
*in_dev
;
166 struct in_ifaddr
*ifa
;
168 if (dev
->flags
& IFF_UP
) {
169 in_dev
= in_dev_get(dev
);
171 for (ifa
= in_dev
->ifa_list
; ifa
!= NULL
;
172 ifa
= ifa
->ifa_next
) {
173 trace_lttng_statedump_network_interface(
179 trace_lttng_statedump_network_interface(
185 int lttng_enumerate_network_ip_interface(struct lttng_session
*session
)
187 struct net_device
*dev
;
189 read_lock(&dev_base_lock
);
190 for_each_netdev(&init_net
, dev
)
191 lttng_enumerate_device(session
, dev
);
192 read_unlock(&dev_base_lock
);
196 #else /* CONFIG_INET */
198 int lttng_enumerate_network_ip_interface(struct lttng_session
*session
)
202 #endif /* CONFIG_INET */
205 int lttng_dump_one_fd(const void *p
, struct file
*file
, unsigned int fd
)
207 const struct lttng_fd_ctx
*ctx
= p
;
208 const char *s
= d_path(&file
->f_path
, ctx
->page
, PAGE_SIZE
);
209 unsigned int flags
= file
->f_flags
;
213 * We don't expose kernel internal flags, only userspace-visible
216 flags
&= ~FMODE_NONOTIFY
;
217 fdt
= files_fdtable(ctx
->files
);
219 * We need to check here again whether fd is within the fdt
220 * max_fds range, because we might be seeing a different
221 * files_fdtable() than iterate_fd(), assuming only RCU is
222 * protecting the read. In reality, iterate_fd() holds
223 * file_lock, which should ensure the fdt does not change while
224 * the lock is taken, but we are not aware whether this is
225 * guaranteed or not, so play safe.
227 if (fd
< fdt
->max_fds
&& lttng_close_on_exec(fd
, fdt
))
230 struct dentry
*dentry
= file
->f_path
.dentry
;
232 /* Make sure we give at least some info */
233 spin_lock(&dentry
->d_lock
);
234 trace_lttng_statedump_file_descriptor(ctx
->session
, ctx
->p
, fd
,
235 dentry
->d_name
.name
, flags
, file
->f_mode
);
236 spin_unlock(&dentry
->d_lock
);
239 trace_lttng_statedump_file_descriptor(ctx
->session
, ctx
->p
, fd
, s
,
240 flags
, file
->f_mode
);
246 void lttng_enumerate_task_fd(struct lttng_session
*session
,
247 struct task_struct
*p
, char *tmp
)
249 struct lttng_fd_ctx ctx
= { .page
= tmp
, .session
= session
, .p
= p
};
250 struct files_struct
*files
;
257 lttng_iterate_fd(files
, 0, lttng_dump_one_fd
, &ctx
);
263 int lttng_enumerate_file_descriptors(struct lttng_session
*session
)
265 struct task_struct
*p
;
268 tmp
= (char *) __get_free_page(GFP_KERNEL
);
272 /* Enumerate active file descriptors */
275 lttng_enumerate_task_fd(session
, p
, tmp
);
277 free_page((unsigned long) tmp
);
283 * FIXME: we cannot take a mmap_sem while in a RCU read-side critical section
284 * (scheduling in atomic). Normally, the tasklist lock protects this kind of
285 * iteration, but it is not exported to modules.
288 void lttng_enumerate_task_vm_maps(struct lttng_session
*session
,
289 struct task_struct
*p
)
291 struct mm_struct
*mm
;
292 struct vm_area_struct
*map
;
295 /* get_task_mm does a task_lock... */
302 down_read(&mm
->mmap_sem
);
305 ino
= map
->vm_file
->lttng_f_dentry
->d_inode
->i_ino
;
308 trace_lttng_statedump_vm_map(session
, p
, map
, ino
);
311 up_read(&mm
->mmap_sem
);
317 int lttng_enumerate_vm_maps(struct lttng_session
*session
)
319 struct task_struct
*p
;
323 lttng_enumerate_task_vm_maps(session
, p
);
329 #ifdef CONFIG_LTTNG_HAS_LIST_IRQ
331 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39))
332 #define irq_desc_get_chip(desc) get_irq_desc_chip(desc)
336 int lttng_list_interrupts(struct lttng_session
*session
)
339 unsigned long flags
= 0;
340 struct irq_desc
*desc
;
342 #define irq_to_desc wrapper_irq_to_desc
344 for_each_irq_desc(irq
, desc
) {
345 struct irqaction
*action
;
346 const char *irq_chip_name
=
347 irq_desc_get_chip(desc
)->name
? : "unnamed_irq_chip";
349 local_irq_save(flags
);
350 wrapper_desc_spin_lock(&desc
->lock
);
351 for (action
= desc
->action
; action
; action
= action
->next
) {
352 trace_lttng_statedump_interrupt(session
,
353 irq
, irq_chip_name
, action
);
355 wrapper_desc_spin_unlock(&desc
->lock
);
356 local_irq_restore(flags
);
363 int lttng_list_interrupts(struct lttng_session
*session
)
370 * Called with task lock held.
373 void lttng_statedump_process_ns(struct lttng_session
*session
,
374 struct task_struct
*p
,
375 enum lttng_thread_type type
,
376 enum lttng_execution_mode mode
,
377 enum lttng_execution_submode submode
,
378 enum lttng_process_status status
)
380 struct pid_namespace
*pid_ns
;
382 pid_ns
= task_active_pid_ns(p
);
384 trace_lttng_statedump_process_state(session
,
385 p
, type
, mode
, submode
, status
, pid_ns
);
386 pid_ns
= pid_ns
? pid_ns
->parent
: NULL
;
391 int lttng_enumerate_process_states(struct lttng_session
*session
)
393 struct task_struct
*g
, *p
;
396 for_each_process(g
) {
399 enum lttng_execution_mode mode
=
401 enum lttng_execution_submode submode
=
403 enum lttng_process_status status
;
404 enum lttng_thread_type type
;
407 if (p
->exit_state
== EXIT_ZOMBIE
)
408 status
= LTTNG_ZOMBIE
;
409 else if (p
->exit_state
== EXIT_DEAD
)
411 else if (p
->state
== TASK_RUNNING
) {
412 /* Is this a forked child that has not run yet? */
413 if (list_empty(&p
->rt
.run_list
))
414 status
= LTTNG_WAIT_FORK
;
417 * All tasks are considered as wait_cpu;
418 * the viewer will sort out if the task
419 * was really running at this time.
421 status
= LTTNG_WAIT_CPU
;
422 } else if (p
->state
&
423 (TASK_INTERRUPTIBLE
| TASK_UNINTERRUPTIBLE
)) {
424 /* Task is waiting for something to complete */
427 status
= LTTNG_UNNAMED
;
428 submode
= LTTNG_NONE
;
431 * Verification of t->mm is to filter out kernel
432 * threads; Viewer will further filter out if a
433 * user-space thread was in syscall mode or not.
436 type
= LTTNG_USER_THREAD
;
438 type
= LTTNG_KERNEL_THREAD
;
439 lttng_statedump_process_ns(session
,
440 p
, type
, mode
, submode
, status
);
442 } while_each_thread(g
, p
);
450 void lttng_statedump_work_func(struct work_struct
*work
)
452 if (atomic_dec_and_test(&kernel_threads_to_run
))
453 /* If we are the last thread, wake up do_lttng_statedump */
454 wake_up(&statedump_wq
);
458 int do_lttng_statedump(struct lttng_session
*session
)
462 trace_lttng_statedump_start(session
);
463 ret
= lttng_enumerate_process_states(session
);
466 ret
= lttng_enumerate_file_descriptors(session
);
471 * ret = lttng_enumerate_vm_maps(session);
475 ret
= lttng_list_interrupts(session
);
478 ret
= lttng_enumerate_network_ip_interface(session
);
481 ret
= lttng_enumerate_block_devices(session
);
486 printk(KERN_WARNING
"LTTng: block device enumeration is not supported by kernel\n");
492 /* TODO lttng_dump_idt_table(session); */
493 /* TODO lttng_dump_softirq_vec(session); */
494 /* TODO lttng_list_modules(session); */
495 /* TODO lttng_dump_swap_files(session); */
498 * Fire off a work queue on each CPU. Their sole purpose in life
499 * is to guarantee that each CPU has been in a state where is was in
500 * syscall mode (i.e. not in a trap, an IRQ or a soft IRQ).
503 atomic_set(&kernel_threads_to_run
, num_online_cpus());
504 for_each_online_cpu(cpu
) {
505 INIT_DELAYED_WORK(&cpu_work
[cpu
], lttng_statedump_work_func
);
506 schedule_delayed_work_on(cpu
, &cpu_work
[cpu
], 0);
508 /* Wait for all threads to run */
509 __wait_event(statedump_wq
, (atomic_read(&kernel_threads_to_run
) == 0));
511 /* Our work is done */
512 trace_lttng_statedump_end(session
);
517 * Called with session mutex held.
519 int lttng_statedump_start(struct lttng_session
*session
)
521 return do_lttng_statedump(session
);
523 EXPORT_SYMBOL_GPL(lttng_statedump_start
);
526 int __init
lttng_statedump_init(void)
529 * Allow module to load even if the fixup cannot be done. This
530 * will allow seemless transition when the underlying issue fix
531 * is merged into the Linux kernel, and when tracepoint.c
532 * "tracepoint_module_notify" is turned into a static function.
534 (void) wrapper_lttng_fixup_sig(THIS_MODULE
);
538 module_init(lttng_statedump_init
);
541 void __exit
lttng_statedump_exit(void)
545 module_exit(lttng_statedump_exit
);
547 MODULE_LICENSE("GPL and additional rights");
548 MODULE_AUTHOR("Jean-Hugues Deschenes");
549 MODULE_DESCRIPTION("LTTng statedump provider");
550 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION
) "."
551 __stringify(LTTNG_MODULES_MINOR_VERSION
) "."
552 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION
)
553 LTTNG_MODULES_EXTRAVERSION
);