aarch64: blacklist gcc prior to 5.1
[lttng-modules.git] / lttng-statedump-impl.c
CommitLineData
9f36eaed
MJ
1/* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1)
2 *
886d51a3
MD
3 * lttng-statedump.c
4 *
c337ddc2
MD
5 * Linux Trace Toolkit Next Generation Kernel State Dump
6 *
7 * Copyright 2005 Jean-Hugues Deschenes <jean-hugues.deschenes@polymtl.ca>
8 * Copyright 2006-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9 *
10 * Changes:
11 * Eric Clement: Add listing of network IP interface
12 * 2006, 2007 Mathieu Desnoyers Fix kernel threads
13 * Various updates
c337ddc2
MD
14 */
15
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/netlink.h>
19#include <linux/inet.h>
20#include <linux/ip.h>
21#include <linux/kthread.h>
22#include <linux/proc_fs.h>
23#include <linux/file.h>
24#include <linux/interrupt.h>
25#include <linux/irqnr.h>
26#include <linux/cpu.h>
27#include <linux/netdevice.h>
28#include <linux/inetdevice.h>
29#include <linux/sched.h>
30#include <linux/mm.h>
c337ddc2
MD
31#include <linux/swap.h>
32#include <linux/wait.h>
33#include <linux/mutex.h>
f0dbdefb 34#include <linux/device.h>
c337ddc2 35
241ae9a8
MD
36#include <lttng-events.h>
37#include <lttng-tracer.h>
38#include <wrapper/irqdesc.h>
39#include <wrapper/spinlock.h>
40#include <wrapper/fdtable.h>
241ae9a8
MD
41#include <wrapper/irq.h>
42#include <wrapper/tracepoint.h>
43#include <wrapper/genhd.h>
44#include <wrapper/file.h>
a117385f 45#include <wrapper/fdtable.h>
c337ddc2 46
29784493 47#ifdef CONFIG_LTTNG_HAS_LIST_IRQ
c337ddc2
MD
48#include <linux/irq.h>
49#endif
50
51/* Define the tracepoints, but do not build the probes */
52#define CREATE_TRACE_POINTS
241ae9a8 53#define TRACE_INCLUDE_PATH instrumentation/events/lttng-module
c337ddc2 54#define TRACE_INCLUDE_FILE lttng-statedump
3bc29f0a 55#define LTTNG_INSTRUMENTATION
241ae9a8 56#include <instrumentation/events/lttng-module/lttng-statedump.h>
c337ddc2 57
c6b31b34
MJ
58LTTNG_DEFINE_TRACE(lttng_statedump_block_device,
59 TP_PROTO(struct lttng_session *session,
60 dev_t dev, const char *diskname),
61 TP_ARGS(session, dev, diskname));
62
63LTTNG_DEFINE_TRACE(lttng_statedump_end,
64 TP_PROTO(struct lttng_session *session),
65 TP_ARGS(session));
66
67LTTNG_DEFINE_TRACE(lttng_statedump_interrupt,
68 TP_PROTO(struct lttng_session *session,
69 unsigned int irq, const char *chip_name,
70 struct irqaction *action),
71 TP_ARGS(session, irq, chip_name, action));
72
73LTTNG_DEFINE_TRACE(lttng_statedump_file_descriptor,
74 TP_PROTO(struct lttng_session *session,
f8922333 75 struct task_struct *p, int fd, const char *filename,
c6b31b34 76 unsigned int flags, fmode_t fmode),
f8922333 77 TP_ARGS(session, p, fd, filename, flags, fmode));
c6b31b34
MJ
78
79LTTNG_DEFINE_TRACE(lttng_statedump_start,
80 TP_PROTO(struct lttng_session *session),
81 TP_ARGS(session));
82
83LTTNG_DEFINE_TRACE(lttng_statedump_process_state,
84 TP_PROTO(struct lttng_session *session,
85 struct task_struct *p,
86 int type, int mode, int submode, int status,
f8922333
MJ
87 struct pid_namespace *pid_ns),
88 TP_ARGS(session, p, type, mode, submode, status, pid_ns));
c6b31b34
MJ
89
90LTTNG_DEFINE_TRACE(lttng_statedump_network_interface,
91 TP_PROTO(struct lttng_session *session,
92 struct net_device *dev, struct in_ifaddr *ifa),
93 TP_ARGS(session, dev, ifa));
20591cf7 94
361c023a
MD
95struct lttng_fd_ctx {
96 char *page;
97 struct lttng_session *session;
98 struct task_struct *p;
d561ecfb 99 struct files_struct *files;
361c023a
MD
100};
101
c337ddc2
MD
102/*
103 * Protected by the trace lock.
104 */
105static struct delayed_work cpu_work[NR_CPUS];
106static DECLARE_WAIT_QUEUE_HEAD(statedump_wq);
107static atomic_t kernel_threads_to_run;
108
109enum lttng_thread_type {
110 LTTNG_USER_THREAD = 0,
111 LTTNG_KERNEL_THREAD = 1,
112};
113
114enum lttng_execution_mode {
115 LTTNG_USER_MODE = 0,
116 LTTNG_SYSCALL = 1,
117 LTTNG_TRAP = 2,
118 LTTNG_IRQ = 3,
119 LTTNG_SOFTIRQ = 4,
120 LTTNG_MODE_UNKNOWN = 5,
121};
122
123enum lttng_execution_submode {
124 LTTNG_NONE = 0,
125 LTTNG_UNKNOWN = 1,
126};
127
128enum lttng_process_status {
129 LTTNG_UNNAMED = 0,
130 LTTNG_WAIT_FORK = 1,
131 LTTNG_WAIT_CPU = 2,
132 LTTNG_EXIT = 3,
133 LTTNG_ZOMBIE = 4,
134 LTTNG_WAIT = 5,
135 LTTNG_RUN = 6,
136 LTTNG_DEAD = 7,
137};
138
6bcf4dc1
MJ
139
140#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,11,0))
141
142#define LTTNG_PART_STRUCT_TYPE struct block_device
143
144static
145int lttng_get_part_name(struct gendisk *disk, struct block_device *part, char *name_buf)
146{
147 const char *p;
148
149 p = bdevname(part, name_buf);
150 if (!p)
151 return -ENOSYS;
152
153 return 0;
154}
155
156static
157dev_t lttng_get_part_devt(struct block_device *part)
158{
159 return part->bd_dev;
160}
161
162#else
163
164#define LTTNG_PART_STRUCT_TYPE struct hd_struct
165
166static
167int lttng_get_part_name(struct gendisk *disk, struct hd_struct *part, char *name_buf)
168{
169 const char *p;
170 struct block_device bdev;
171
172 /*
173 * Create a partial 'struct blockdevice' to use
174 * 'bdevname()' which is a simple wrapper over
175 * 'disk_name()' but has the honor to be EXPORT_SYMBOL.
176 */
177 bdev.bd_disk = disk;
178 bdev.bd_part = part;
179
180 p = bdevname(&bdev, name_buf);
181 if (!p)
182 return -ENOSYS;
183
184 return 0;
185}
186
187static
188dev_t lttng_get_part_devt(struct hd_struct *part)
189{
190 return part_devt(part);
191}
192#endif
193
f0dbdefb
HD
194static
195int lttng_enumerate_block_devices(struct lttng_session *session)
196{
197 struct class *ptr_block_class;
198 struct device_type *ptr_disk_type;
199 struct class_dev_iter iter;
200 struct device *dev;
201
202 ptr_block_class = wrapper_get_block_class();
203 if (!ptr_block_class)
204 return -ENOSYS;
205 ptr_disk_type = wrapper_get_disk_type();
206 if (!ptr_disk_type) {
207 return -ENOSYS;
208 }
209 class_dev_iter_init(&iter, ptr_block_class, NULL, ptr_disk_type);
210 while ((dev = class_dev_iter_next(&iter))) {
211 struct disk_part_iter piter;
212 struct gendisk *disk = dev_to_disk(dev);
6bcf4dc1 213 LTTNG_PART_STRUCT_TYPE *part;
f0dbdefb 214
5a91f3df
MD
215 /*
216 * Don't show empty devices or things that have been
217 * suppressed
218 */
219 if (get_capacity(disk) == 0 ||
220 (disk->flags & GENHD_FL_SUPPRESS_PARTITION_INFO))
221 continue;
222
f0dbdefb
HD
223 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
224 while ((part = disk_part_iter_next(&piter))) {
225 char name_buf[BDEVNAME_SIZE];
6bcf4dc1
MJ
226
227 if (lttng_get_part_name(disk, part, name_buf) == -ENOSYS) {
f0dbdefb
HD
228 disk_part_iter_exit(&piter);
229 class_dev_iter_exit(&iter);
230 return -ENOSYS;
231 }
232 trace_lttng_statedump_block_device(session,
6bcf4dc1 233 lttng_get_part_devt(part), name_buf);
f0dbdefb
HD
234 }
235 disk_part_iter_exit(&piter);
236 }
237 class_dev_iter_exit(&iter);
238 return 0;
239}
240
c337ddc2 241#ifdef CONFIG_INET
f0dbdefb 242
c337ddc2
MD
243static
244void lttng_enumerate_device(struct lttng_session *session,
245 struct net_device *dev)
246{
247 struct in_device *in_dev;
248 struct in_ifaddr *ifa;
249
250 if (dev->flags & IFF_UP) {
251 in_dev = in_dev_get(dev);
252 if (in_dev) {
253 for (ifa = in_dev->ifa_list; ifa != NULL;
254 ifa = ifa->ifa_next) {
255 trace_lttng_statedump_network_interface(
256 session, dev, ifa);
257 }
258 in_dev_put(in_dev);
259 }
260 } else {
261 trace_lttng_statedump_network_interface(
262 session, dev, NULL);
263 }
264}
265
266static
267int lttng_enumerate_network_ip_interface(struct lttng_session *session)
268{
269 struct net_device *dev;
270
271 read_lock(&dev_base_lock);
272 for_each_netdev(&init_net, dev)
273 lttng_enumerate_device(session, dev);
274 read_unlock(&dev_base_lock);
275
276 return 0;
277}
278#else /* CONFIG_INET */
279static inline
280int lttng_enumerate_network_ip_interface(struct lttng_session *session)
281{
282 return 0;
283}
284#endif /* CONFIG_INET */
285
361c023a
MD
286static
287int lttng_dump_one_fd(const void *p, struct file *file, unsigned int fd)
288{
289 const struct lttng_fd_ctx *ctx = p;
290 const char *s = d_path(&file->f_path, ctx->page, PAGE_SIZE);
29021503 291 unsigned int flags = file->f_flags;
d561ecfb 292 struct fdtable *fdt;
361c023a 293
29021503
MD
294 /*
295 * We don't expose kernel internal flags, only userspace-visible
296 * flags.
297 */
298 flags &= ~FMODE_NONOTIFY;
d561ecfb
MD
299 fdt = files_fdtable(ctx->files);
300 /*
301 * We need to check here again whether fd is within the fdt
302 * max_fds range, because we might be seeing a different
303 * files_fdtable() than iterate_fd(), assuming only RCU is
304 * protecting the read. In reality, iterate_fd() holds
305 * file_lock, which should ensure the fdt does not change while
306 * the lock is taken, but we are not aware whether this is
307 * guaranteed or not, so play safe.
308 */
aa29f2d3 309 if (fd < fdt->max_fds && lttng_close_on_exec(fd, fdt))
29021503 310 flags |= O_CLOEXEC;
361c023a
MD
311 if (IS_ERR(s)) {
312 struct dentry *dentry = file->f_path.dentry;
313
314 /* Make sure we give at least some info */
315 spin_lock(&dentry->d_lock);
316 trace_lttng_statedump_file_descriptor(ctx->session, ctx->p, fd,
29021503 317 dentry->d_name.name, flags, file->f_mode);
361c023a
MD
318 spin_unlock(&dentry->d_lock);
319 goto end;
320 }
29021503
MD
321 trace_lttng_statedump_file_descriptor(ctx->session, ctx->p, fd, s,
322 flags, file->f_mode);
361c023a
MD
323end:
324 return 0;
325}
c337ddc2
MD
326
327static
328void lttng_enumerate_task_fd(struct lttng_session *session,
329 struct task_struct *p, char *tmp)
330{
361c023a 331 struct lttng_fd_ctx ctx = { .page = tmp, .session = session, .p = p };
d561ecfb 332 struct files_struct *files;
c337ddc2
MD
333
334 task_lock(p);
d561ecfb
MD
335 files = p->files;
336 if (!files)
337 goto end;
338 ctx.files = files;
339 lttng_iterate_fd(files, 0, lttng_dump_one_fd, &ctx);
340end:
c337ddc2
MD
341 task_unlock(p);
342}
343
344static
345int lttng_enumerate_file_descriptors(struct lttng_session *session)
346{
347 struct task_struct *p;
cfcee1c7
MD
348 char *tmp;
349
350 tmp = (char *) __get_free_page(GFP_KERNEL);
351 if (!tmp)
352 return -ENOMEM;
c337ddc2
MD
353
354 /* Enumerate active file descriptors */
355 rcu_read_lock();
356 for_each_process(p)
357 lttng_enumerate_task_fd(session, p, tmp);
358 rcu_read_unlock();
359 free_page((unsigned long) tmp);
360 return 0;
361}
362
0658bdda
MD
363#if 0
364/*
365 * FIXME: we cannot take a mmap_sem while in a RCU read-side critical section
366 * (scheduling in atomic). Normally, the tasklist lock protects this kind of
367 * iteration, but it is not exported to modules.
368 */
c337ddc2
MD
369static
370void lttng_enumerate_task_vm_maps(struct lttng_session *session,
371 struct task_struct *p)
372{
373 struct mm_struct *mm;
374 struct vm_area_struct *map;
375 unsigned long ino;
376
377 /* get_task_mm does a task_lock... */
378 mm = get_task_mm(p);
379 if (!mm)
380 return;
381
382 map = mm->mmap;
383 if (map) {
384 down_read(&mm->mmap_sem);
385 while (map) {
386 if (map->vm_file)
b06ed645 387 ino = map->vm_file->lttng_f_dentry->d_inode->i_ino;
c337ddc2
MD
388 else
389 ino = 0;
390 trace_lttng_statedump_vm_map(session, p, map, ino);
391 map = map->vm_next;
392 }
393 up_read(&mm->mmap_sem);
394 }
395 mmput(mm);
396}
397
398static
399int lttng_enumerate_vm_maps(struct lttng_session *session)
400{
401 struct task_struct *p;
402
403 rcu_read_lock();
404 for_each_process(p)
405 lttng_enumerate_task_vm_maps(session, p);
406 rcu_read_unlock();
407 return 0;
408}
0658bdda 409#endif
c337ddc2 410
29784493 411#ifdef CONFIG_LTTNG_HAS_LIST_IRQ
47faec4b
JN
412
413#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39))
414#define irq_desc_get_chip(desc) get_irq_desc_chip(desc)
415#endif
416
c337ddc2 417static
cfcee1c7 418int lttng_list_interrupts(struct lttng_session *session)
c337ddc2
MD
419{
420 unsigned int irq;
421 unsigned long flags = 0;
422 struct irq_desc *desc;
423
424#define irq_to_desc wrapper_irq_to_desc
425 /* needs irq_desc */
426 for_each_irq_desc(irq, desc) {
427 struct irqaction *action;
428 const char *irq_chip_name =
429 irq_desc_get_chip(desc)->name ? : "unnamed_irq_chip";
430
431 local_irq_save(flags);
3a523f5b 432 wrapper_desc_spin_lock(&desc->lock);
c337ddc2
MD
433 for (action = desc->action; action; action = action->next) {
434 trace_lttng_statedump_interrupt(session,
435 irq, irq_chip_name, action);
436 }
3a523f5b 437 wrapper_desc_spin_unlock(&desc->lock);
c337ddc2
MD
438 local_irq_restore(flags);
439 }
cfcee1c7 440 return 0;
c337ddc2
MD
441#undef irq_to_desc
442}
443#else
444static inline
cfcee1c7 445int lttng_list_interrupts(struct lttng_session *session)
c337ddc2 446{
cfcee1c7 447 return 0;
c337ddc2
MD
448}
449#endif
450
4ba1f53c
MD
451/*
452 * Called with task lock held.
453 */
73e8ba37
JD
454static
455void lttng_statedump_process_ns(struct lttng_session *session,
456 struct task_struct *p,
457 enum lttng_thread_type type,
458 enum lttng_execution_mode mode,
459 enum lttng_execution_submode submode,
460 enum lttng_process_status status)
461{
73e8ba37
JD
462 struct pid_namespace *pid_ns;
463
887bcdac
MJ
464 pid_ns = task_active_pid_ns(p);
465 do {
73e8ba37 466 trace_lttng_statedump_process_state(session,
887bcdac 467 p, type, mode, submode, status, pid_ns);
d16203a5 468 pid_ns = pid_ns ? pid_ns->parent : NULL;
887bcdac 469 } while (pid_ns);
73e8ba37
JD
470}
471
c337ddc2
MD
472static
473int lttng_enumerate_process_states(struct lttng_session *session)
474{
475 struct task_struct *g, *p;
476
477 rcu_read_lock();
478 for_each_process(g) {
479 p = g;
480 do {
481 enum lttng_execution_mode mode =
482 LTTNG_MODE_UNKNOWN;
483 enum lttng_execution_submode submode =
484 LTTNG_UNKNOWN;
485 enum lttng_process_status status;
486 enum lttng_thread_type type;
487
488 task_lock(p);
489 if (p->exit_state == EXIT_ZOMBIE)
490 status = LTTNG_ZOMBIE;
491 else if (p->exit_state == EXIT_DEAD)
492 status = LTTNG_DEAD;
493 else if (p->state == TASK_RUNNING) {
494 /* Is this a forked child that has not run yet? */
495 if (list_empty(&p->rt.run_list))
496 status = LTTNG_WAIT_FORK;
497 else
498 /*
499 * All tasks are considered as wait_cpu;
500 * the viewer will sort out if the task
501 * was really running at this time.
502 */
503 status = LTTNG_WAIT_CPU;
504 } else if (p->state &
505 (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)) {
506 /* Task is waiting for something to complete */
507 status = LTTNG_WAIT;
508 } else
509 status = LTTNG_UNNAMED;
510 submode = LTTNG_NONE;
511
512 /*
513 * Verification of t->mm is to filter out kernel
514 * threads; Viewer will further filter out if a
515 * user-space thread was in syscall mode or not.
516 */
517 if (p->mm)
518 type = LTTNG_USER_THREAD;
519 else
520 type = LTTNG_KERNEL_THREAD;
73e8ba37 521 lttng_statedump_process_ns(session,
c337ddc2
MD
522 p, type, mode, submode, status);
523 task_unlock(p);
524 } while_each_thread(g, p);
525 }
526 rcu_read_unlock();
527
528 return 0;
529}
530
531static
532void lttng_statedump_work_func(struct work_struct *work)
533{
534 if (atomic_dec_and_test(&kernel_threads_to_run))
535 /* If we are the last thread, wake up do_lttng_statedump */
536 wake_up(&statedump_wq);
537}
538
539static
540int do_lttng_statedump(struct lttng_session *session)
541{
cfcee1c7 542 int cpu, ret;
c337ddc2 543
c337ddc2 544 trace_lttng_statedump_start(session);
cfcee1c7
MD
545 ret = lttng_enumerate_process_states(session);
546 if (ret)
547 return ret;
548 ret = lttng_enumerate_file_descriptors(session);
549 if (ret)
550 return ret;
551 /*
552 * FIXME
553 * ret = lttng_enumerate_vm_maps(session);
554 * if (ret)
555 * return ret;
556 */
557 ret = lttng_list_interrupts(session);
558 if (ret)
559 return ret;
560 ret = lttng_enumerate_network_ip_interface(session);
561 if (ret)
562 return ret;
563 ret = lttng_enumerate_block_devices(session);
564 switch (ret) {
84c7055e
MD
565 case 0:
566 break;
cfcee1c7
MD
567 case -ENOSYS:
568 printk(KERN_WARNING "LTTng: block device enumeration is not supported by kernel\n");
569 break;
570 default:
571 return ret;
572 }
c337ddc2
MD
573
574 /* TODO lttng_dump_idt_table(session); */
575 /* TODO lttng_dump_softirq_vec(session); */
576 /* TODO lttng_list_modules(session); */
577 /* TODO lttng_dump_swap_files(session); */
578
579 /*
580 * Fire off a work queue on each CPU. Their sole purpose in life
581 * is to guarantee that each CPU has been in a state where is was in
582 * syscall mode (i.e. not in a trap, an IRQ or a soft IRQ).
583 */
584 get_online_cpus();
585 atomic_set(&kernel_threads_to_run, num_online_cpus());
586 for_each_online_cpu(cpu) {
587 INIT_DELAYED_WORK(&cpu_work[cpu], lttng_statedump_work_func);
588 schedule_delayed_work_on(cpu, &cpu_work[cpu], 0);
589 }
590 /* Wait for all threads to run */
7a7128e0 591 __wait_event(statedump_wq, (atomic_read(&kernel_threads_to_run) == 0));
c337ddc2
MD
592 put_online_cpus();
593 /* Our work is done */
c337ddc2
MD
594 trace_lttng_statedump_end(session);
595 return 0;
596}
597
598/*
599 * Called with session mutex held.
600 */
601int lttng_statedump_start(struct lttng_session *session)
602{
c337ddc2
MD
603 return do_lttng_statedump(session);
604}
605EXPORT_SYMBOL_GPL(lttng_statedump_start);
606
dd8d5afb
MD
607static
608int __init lttng_statedump_init(void)
609{
d16aa9c9
MD
610 /*
611 * Allow module to load even if the fixup cannot be done. This
612 * will allow seemless transition when the underlying issue fix
613 * is merged into the Linux kernel, and when tracepoint.c
614 * "tracepoint_module_notify" is turned into a static function.
615 */
616 (void) wrapper_lttng_fixup_sig(THIS_MODULE);
617 return 0;
dd8d5afb
MD
618}
619
620module_init(lttng_statedump_init);
621
461277e7
MD
622static
623void __exit lttng_statedump_exit(void)
624{
625}
626
627module_exit(lttng_statedump_exit);
628
c337ddc2
MD
629MODULE_LICENSE("GPL and additional rights");
630MODULE_AUTHOR("Jean-Hugues Deschenes");
1c124020 631MODULE_DESCRIPTION("LTTng statedump provider");
13ab8b0a
MD
632MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
633 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
634 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
635 LTTNG_MODULES_EXTRAVERSION);
This page took 0.068448 seconds and 4 git commands to generate.