Commit | Line | Data |
---|---|---|
9f36eaed MJ |
1 | /* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1) |
2 | * | |
886d51a3 MD |
3 | * lttng-statedump.c |
4 | * | |
c337ddc2 MD |
5 | * Linux Trace Toolkit Next Generation Kernel State Dump |
6 | * | |
7 | * Copyright 2005 Jean-Hugues Deschenes <jean-hugues.deschenes@polymtl.ca> | |
8 | * Copyright 2006-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
9 | * | |
10 | * Changes: | |
11 | * Eric Clement: Add listing of network IP interface | |
12 | * 2006, 2007 Mathieu Desnoyers Fix kernel threads | |
13 | * Various updates | |
c337ddc2 MD |
14 | */ |
15 | ||
16 | #include <linux/init.h> | |
17 | #include <linux/module.h> | |
18 | #include <linux/netlink.h> | |
19 | #include <linux/inet.h> | |
20 | #include <linux/ip.h> | |
21 | #include <linux/kthread.h> | |
22 | #include <linux/proc_fs.h> | |
23 | #include <linux/file.h> | |
24 | #include <linux/interrupt.h> | |
25 | #include <linux/irqnr.h> | |
26 | #include <linux/cpu.h> | |
27 | #include <linux/netdevice.h> | |
28 | #include <linux/inetdevice.h> | |
29 | #include <linux/sched.h> | |
30 | #include <linux/mm.h> | |
c337ddc2 MD |
31 | #include <linux/swap.h> |
32 | #include <linux/wait.h> | |
33 | #include <linux/mutex.h> | |
f0dbdefb | 34 | #include <linux/device.h> |
c337ddc2 | 35 | |
241ae9a8 MD |
36 | #include <lttng-events.h> |
37 | #include <lttng-tracer.h> | |
38 | #include <wrapper/irqdesc.h> | |
241ae9a8 | 39 | #include <wrapper/fdtable.h> |
1965e6b4 | 40 | #include <wrapper/namespace.h> |
241ae9a8 MD |
41 | #include <wrapper/irq.h> |
42 | #include <wrapper/tracepoint.h> | |
43 | #include <wrapper/genhd.h> | |
44 | #include <wrapper/file.h> | |
518dba2d | 45 | #include <wrapper/fdtable.h> |
c337ddc2 | 46 | |
29784493 | 47 | #ifdef CONFIG_LTTNG_HAS_LIST_IRQ |
c337ddc2 MD |
48 | #include <linux/irq.h> |
49 | #endif | |
50 | ||
51 | /* Define the tracepoints, but do not build the probes */ | |
52 | #define CREATE_TRACE_POINTS | |
241ae9a8 | 53 | #define TRACE_INCLUDE_PATH instrumentation/events/lttng-module |
c337ddc2 | 54 | #define TRACE_INCLUDE_FILE lttng-statedump |
3bc29f0a | 55 | #define LTTNG_INSTRUMENTATION |
241ae9a8 | 56 | #include <instrumentation/events/lttng-module/lttng-statedump.h> |
c337ddc2 | 57 | |
bb346792 MJ |
58 | LTTNG_DEFINE_TRACE(lttng_statedump_block_device, |
59 | TP_PROTO(struct lttng_session *session, | |
60 | dev_t dev, const char *diskname), | |
61 | TP_ARGS(session, dev, diskname)); | |
62 | ||
63 | LTTNG_DEFINE_TRACE(lttng_statedump_end, | |
64 | TP_PROTO(struct lttng_session *session), | |
65 | TP_ARGS(session)); | |
66 | ||
67 | LTTNG_DEFINE_TRACE(lttng_statedump_interrupt, | |
68 | TP_PROTO(struct lttng_session *session, | |
69 | unsigned int irq, const char *chip_name, | |
70 | struct irqaction *action), | |
71 | TP_ARGS(session, irq, chip_name, action)); | |
72 | ||
73 | LTTNG_DEFINE_TRACE(lttng_statedump_file_descriptor, | |
74 | TP_PROTO(struct lttng_session *session, | |
75 | struct files_struct *files, | |
76 | int fd, const char *filename, | |
77 | unsigned int flags, fmode_t fmode), | |
78 | TP_ARGS(session, files, fd, filename, flags, fmode)); | |
79 | ||
80 | LTTNG_DEFINE_TRACE(lttng_statedump_start, | |
81 | TP_PROTO(struct lttng_session *session), | |
82 | TP_ARGS(session)); | |
83 | ||
84 | LTTNG_DEFINE_TRACE(lttng_statedump_process_state, | |
85 | TP_PROTO(struct lttng_session *session, | |
86 | struct task_struct *p, | |
87 | int type, int mode, int submode, int status, | |
88 | struct files_struct *files), | |
89 | TP_ARGS(session, p, type, mode, submode, status, files)); | |
90 | ||
91 | LTTNG_DEFINE_TRACE(lttng_statedump_process_pid_ns, | |
92 | TP_PROTO(struct lttng_session *session, | |
93 | struct task_struct *p, | |
94 | struct pid_namespace *pid_ns), | |
95 | TP_ARGS(session, p, pid_ns)); | |
96 | ||
1965e6b4 | 97 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0)) |
bb346792 MJ |
98 | LTTNG_DEFINE_TRACE(lttng_statedump_process_cgroup_ns, |
99 | TP_PROTO(struct lttng_session *session, | |
100 | struct task_struct *p, | |
101 | struct cgroup_namespace *cgroup_ns), | |
102 | TP_ARGS(session, p, cgroup_ns)); | |
1965e6b4 | 103 | #endif |
bb346792 MJ |
104 | |
105 | LTTNG_DEFINE_TRACE(lttng_statedump_process_ipc_ns, | |
106 | TP_PROTO(struct lttng_session *session, | |
107 | struct task_struct *p, | |
108 | struct ipc_namespace *ipc_ns), | |
109 | TP_ARGS(session, p, ipc_ns)); | |
110 | ||
1965e6b4 | 111 | #ifndef LTTNG_MNT_NS_MISSING_HEADER |
bb346792 MJ |
112 | LTTNG_DEFINE_TRACE(lttng_statedump_process_mnt_ns, |
113 | TP_PROTO(struct lttng_session *session, | |
114 | struct task_struct *p, | |
115 | struct mnt_namespace *mnt_ns), | |
116 | TP_ARGS(session, p, mnt_ns)); | |
1965e6b4 | 117 | #endif |
bb346792 | 118 | |
31f8bf79 HZ |
119 | LTTNG_DEFINE_TRACE(lttng_statedump_process_net_ns, |
120 | TP_PROTO(struct lttng_session *session, | |
121 | struct task_struct *p, | |
122 | struct net *net_ns), | |
123 | TP_ARGS(session, p, net_ns)); | |
124 | ||
125 | LTTNG_DEFINE_TRACE(lttng_statedump_process_user_ns, | |
126 | TP_PROTO(struct lttng_session *session, | |
127 | struct task_struct *p, | |
128 | struct user_namespace *user_ns), | |
129 | TP_ARGS(session, p, user_ns)); | |
130 | ||
131 | LTTNG_DEFINE_TRACE(lttng_statedump_process_uts_ns, | |
132 | TP_PROTO(struct lttng_session *session, | |
133 | struct task_struct *p, | |
134 | struct uts_namespace *uts_ns), | |
135 | TP_ARGS(session, p, uts_ns)); | |
136 | ||
bb346792 MJ |
137 | LTTNG_DEFINE_TRACE(lttng_statedump_network_interface, |
138 | TP_PROTO(struct lttng_session *session, | |
139 | struct net_device *dev, struct in_ifaddr *ifa), | |
140 | TP_ARGS(session, dev, ifa)); | |
141 | ||
d0b55e4c | 142 | #ifdef LTTNG_HAVE_STATEDUMP_CPU_TOPOLOGY |
bb346792 MJ |
143 | LTTNG_DEFINE_TRACE(lttng_statedump_cpu_topology, |
144 | TP_PROTO(struct lttng_session *session, struct cpuinfo_x86 *c), | |
145 | TP_ARGS(session, c)); | |
502e4132 | 146 | #endif |
20591cf7 | 147 | |
361c023a MD |
148 | struct lttng_fd_ctx { |
149 | char *page; | |
150 | struct lttng_session *session; | |
d561ecfb | 151 | struct files_struct *files; |
361c023a MD |
152 | }; |
153 | ||
c337ddc2 MD |
154 | /* |
155 | * Protected by the trace lock. | |
156 | */ | |
157 | static struct delayed_work cpu_work[NR_CPUS]; | |
158 | static DECLARE_WAIT_QUEUE_HEAD(statedump_wq); | |
159 | static atomic_t kernel_threads_to_run; | |
160 | ||
161 | enum lttng_thread_type { | |
162 | LTTNG_USER_THREAD = 0, | |
163 | LTTNG_KERNEL_THREAD = 1, | |
164 | }; | |
165 | ||
166 | enum lttng_execution_mode { | |
167 | LTTNG_USER_MODE = 0, | |
168 | LTTNG_SYSCALL = 1, | |
169 | LTTNG_TRAP = 2, | |
170 | LTTNG_IRQ = 3, | |
171 | LTTNG_SOFTIRQ = 4, | |
172 | LTTNG_MODE_UNKNOWN = 5, | |
173 | }; | |
174 | ||
175 | enum lttng_execution_submode { | |
176 | LTTNG_NONE = 0, | |
177 | LTTNG_UNKNOWN = 1, | |
178 | }; | |
179 | ||
180 | enum lttng_process_status { | |
181 | LTTNG_UNNAMED = 0, | |
182 | LTTNG_WAIT_FORK = 1, | |
183 | LTTNG_WAIT_CPU = 2, | |
184 | LTTNG_EXIT = 3, | |
185 | LTTNG_ZOMBIE = 4, | |
186 | LTTNG_WAIT = 5, | |
187 | LTTNG_RUN = 6, | |
188 | LTTNG_DEAD = 7, | |
189 | }; | |
190 | ||
f0dbdefb HD |
191 | static |
192 | int lttng_enumerate_block_devices(struct lttng_session *session) | |
193 | { | |
194 | struct class *ptr_block_class; | |
195 | struct device_type *ptr_disk_type; | |
196 | struct class_dev_iter iter; | |
197 | struct device *dev; | |
198 | ||
199 | ptr_block_class = wrapper_get_block_class(); | |
200 | if (!ptr_block_class) | |
201 | return -ENOSYS; | |
202 | ptr_disk_type = wrapper_get_disk_type(); | |
203 | if (!ptr_disk_type) { | |
204 | return -ENOSYS; | |
205 | } | |
206 | class_dev_iter_init(&iter, ptr_block_class, NULL, ptr_disk_type); | |
207 | while ((dev = class_dev_iter_next(&iter))) { | |
208 | struct disk_part_iter piter; | |
209 | struct gendisk *disk = dev_to_disk(dev); | |
210 | struct hd_struct *part; | |
211 | ||
5a91f3df MD |
212 | /* |
213 | * Don't show empty devices or things that have been | |
214 | * suppressed | |
215 | */ | |
216 | if (get_capacity(disk) == 0 || | |
217 | (disk->flags & GENHD_FL_SUPPRESS_PARTITION_INFO)) | |
218 | continue; | |
219 | ||
f0dbdefb HD |
220 | disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); |
221 | while ((part = disk_part_iter_next(&piter))) { | |
3bcb1fb3 | 222 | struct block_device bdev; |
f0dbdefb | 223 | char name_buf[BDEVNAME_SIZE]; |
3bcb1fb3 | 224 | const char *p; |
f0dbdefb | 225 | |
3bcb1fb3 MJ |
226 | /* |
227 | * Create a partial 'struct blockdevice' to use | |
228 | * 'bdevname()' which is a simple wrapper over | |
229 | * 'disk_name()' but has the honor to be EXPORT_SYMBOL. | |
230 | */ | |
231 | bdev.bd_disk = disk; | |
232 | bdev.bd_part = part; | |
233 | ||
234 | p = bdevname(&bdev, name_buf); | |
f0dbdefb HD |
235 | if (!p) { |
236 | disk_part_iter_exit(&piter); | |
237 | class_dev_iter_exit(&iter); | |
238 | return -ENOSYS; | |
239 | } | |
240 | trace_lttng_statedump_block_device(session, | |
241 | part_devt(part), name_buf); | |
242 | } | |
243 | disk_part_iter_exit(&piter); | |
244 | } | |
245 | class_dev_iter_exit(&iter); | |
246 | return 0; | |
247 | } | |
248 | ||
c337ddc2 | 249 | #ifdef CONFIG_INET |
f0dbdefb | 250 | |
c337ddc2 MD |
251 | static |
252 | void lttng_enumerate_device(struct lttng_session *session, | |
253 | struct net_device *dev) | |
254 | { | |
255 | struct in_device *in_dev; | |
256 | struct in_ifaddr *ifa; | |
257 | ||
258 | if (dev->flags & IFF_UP) { | |
259 | in_dev = in_dev_get(dev); | |
260 | if (in_dev) { | |
261 | for (ifa = in_dev->ifa_list; ifa != NULL; | |
262 | ifa = ifa->ifa_next) { | |
263 | trace_lttng_statedump_network_interface( | |
264 | session, dev, ifa); | |
265 | } | |
266 | in_dev_put(in_dev); | |
267 | } | |
268 | } else { | |
269 | trace_lttng_statedump_network_interface( | |
270 | session, dev, NULL); | |
271 | } | |
272 | } | |
273 | ||
274 | static | |
275 | int lttng_enumerate_network_ip_interface(struct lttng_session *session) | |
276 | { | |
277 | struct net_device *dev; | |
278 | ||
279 | read_lock(&dev_base_lock); | |
280 | for_each_netdev(&init_net, dev) | |
281 | lttng_enumerate_device(session, dev); | |
282 | read_unlock(&dev_base_lock); | |
283 | ||
284 | return 0; | |
285 | } | |
286 | #else /* CONFIG_INET */ | |
287 | static inline | |
288 | int lttng_enumerate_network_ip_interface(struct lttng_session *session) | |
289 | { | |
290 | return 0; | |
291 | } | |
292 | #endif /* CONFIG_INET */ | |
293 | ||
361c023a MD |
294 | static |
295 | int lttng_dump_one_fd(const void *p, struct file *file, unsigned int fd) | |
296 | { | |
297 | const struct lttng_fd_ctx *ctx = p; | |
298 | const char *s = d_path(&file->f_path, ctx->page, PAGE_SIZE); | |
29021503 | 299 | unsigned int flags = file->f_flags; |
d561ecfb | 300 | struct fdtable *fdt; |
361c023a | 301 | |
29021503 MD |
302 | /* |
303 | * We don't expose kernel internal flags, only userspace-visible | |
304 | * flags. | |
305 | */ | |
306 | flags &= ~FMODE_NONOTIFY; | |
d561ecfb MD |
307 | fdt = files_fdtable(ctx->files); |
308 | /* | |
309 | * We need to check here again whether fd is within the fdt | |
310 | * max_fds range, because we might be seeing a different | |
311 | * files_fdtable() than iterate_fd(), assuming only RCU is | |
312 | * protecting the read. In reality, iterate_fd() holds | |
313 | * file_lock, which should ensure the fdt does not change while | |
314 | * the lock is taken, but we are not aware whether this is | |
315 | * guaranteed or not, so play safe. | |
316 | */ | |
aa29f2d3 | 317 | if (fd < fdt->max_fds && lttng_close_on_exec(fd, fdt)) |
29021503 | 318 | flags |= O_CLOEXEC; |
361c023a MD |
319 | if (IS_ERR(s)) { |
320 | struct dentry *dentry = file->f_path.dentry; | |
321 | ||
322 | /* Make sure we give at least some info */ | |
323 | spin_lock(&dentry->d_lock); | |
df493bfd MD |
324 | trace_lttng_statedump_file_descriptor(ctx->session, |
325 | ctx->files, fd, dentry->d_name.name, flags, | |
326 | file->f_mode); | |
361c023a MD |
327 | spin_unlock(&dentry->d_lock); |
328 | goto end; | |
329 | } | |
df493bfd MD |
330 | trace_lttng_statedump_file_descriptor(ctx->session, |
331 | ctx->files, fd, s, flags, file->f_mode); | |
361c023a MD |
332 | end: |
333 | return 0; | |
334 | } | |
c337ddc2 | 335 | |
df493bfd | 336 | /* Called with task lock held. */ |
c337ddc2 | 337 | static |
df493bfd MD |
338 | void lttng_enumerate_files(struct lttng_session *session, |
339 | struct files_struct *files, | |
340 | char *tmp) | |
c337ddc2 | 341 | { |
df493bfd | 342 | struct lttng_fd_ctx ctx = { .page = tmp, .session = session, .files = files, }; |
c337ddc2 | 343 | |
d561ecfb | 344 | lttng_iterate_fd(files, 0, lttng_dump_one_fd, &ctx); |
c337ddc2 MD |
345 | } |
346 | ||
d0b55e4c | 347 | #ifdef LTTNG_HAVE_STATEDUMP_CPU_TOPOLOGY |
502e4132 JD |
348 | static |
349 | int lttng_enumerate_cpu_topology(struct lttng_session *session) | |
350 | { | |
351 | int cpu; | |
352 | const cpumask_t *cpumask = cpu_possible_mask; | |
353 | ||
354 | for (cpu = cpumask_first(cpumask); cpu < nr_cpu_ids; | |
355 | cpu = cpumask_next(cpu, cpumask)) { | |
356 | trace_lttng_statedump_cpu_topology(session, &cpu_data(cpu)); | |
357 | } | |
358 | ||
359 | return 0; | |
360 | } | |
361 | #else | |
362 | static | |
363 | int lttng_enumerate_cpu_topology(struct lttng_session *session) | |
364 | { | |
365 | return 0; | |
366 | } | |
367 | #endif | |
368 | ||
0658bdda MD |
369 | #if 0 |
370 | /* | |
371 | * FIXME: we cannot take a mmap_sem while in a RCU read-side critical section | |
372 | * (scheduling in atomic). Normally, the tasklist lock protects this kind of | |
373 | * iteration, but it is not exported to modules. | |
374 | */ | |
c337ddc2 MD |
375 | static |
376 | void lttng_enumerate_task_vm_maps(struct lttng_session *session, | |
377 | struct task_struct *p) | |
378 | { | |
379 | struct mm_struct *mm; | |
380 | struct vm_area_struct *map; | |
381 | unsigned long ino; | |
382 | ||
383 | /* get_task_mm does a task_lock... */ | |
384 | mm = get_task_mm(p); | |
385 | if (!mm) | |
386 | return; | |
387 | ||
388 | map = mm->mmap; | |
389 | if (map) { | |
390 | down_read(&mm->mmap_sem); | |
391 | while (map) { | |
392 | if (map->vm_file) | |
b06ed645 | 393 | ino = map->vm_file->lttng_f_dentry->d_inode->i_ino; |
c337ddc2 MD |
394 | else |
395 | ino = 0; | |
396 | trace_lttng_statedump_vm_map(session, p, map, ino); | |
397 | map = map->vm_next; | |
398 | } | |
399 | up_read(&mm->mmap_sem); | |
400 | } | |
401 | mmput(mm); | |
402 | } | |
403 | ||
404 | static | |
405 | int lttng_enumerate_vm_maps(struct lttng_session *session) | |
406 | { | |
407 | struct task_struct *p; | |
408 | ||
409 | rcu_read_lock(); | |
410 | for_each_process(p) | |
411 | lttng_enumerate_task_vm_maps(session, p); | |
412 | rcu_read_unlock(); | |
413 | return 0; | |
414 | } | |
0658bdda | 415 | #endif |
c337ddc2 | 416 | |
29784493 | 417 | #ifdef CONFIG_LTTNG_HAS_LIST_IRQ |
47faec4b | 418 | |
c337ddc2 | 419 | static |
cfcee1c7 | 420 | int lttng_list_interrupts(struct lttng_session *session) |
c337ddc2 MD |
421 | { |
422 | unsigned int irq; | |
423 | unsigned long flags = 0; | |
424 | struct irq_desc *desc; | |
425 | ||
426 | #define irq_to_desc wrapper_irq_to_desc | |
427 | /* needs irq_desc */ | |
428 | for_each_irq_desc(irq, desc) { | |
429 | struct irqaction *action; | |
430 | const char *irq_chip_name = | |
431 | irq_desc_get_chip(desc)->name ? : "unnamed_irq_chip"; | |
432 | ||
433 | local_irq_save(flags); | |
fc94c945 | 434 | raw_spin_lock(&desc->lock); |
c337ddc2 MD |
435 | for (action = desc->action; action; action = action->next) { |
436 | trace_lttng_statedump_interrupt(session, | |
437 | irq, irq_chip_name, action); | |
438 | } | |
fc94c945 | 439 | raw_spin_unlock(&desc->lock); |
c337ddc2 MD |
440 | local_irq_restore(flags); |
441 | } | |
cfcee1c7 | 442 | return 0; |
c337ddc2 MD |
443 | #undef irq_to_desc |
444 | } | |
445 | #else | |
446 | static inline | |
cfcee1c7 | 447 | int lttng_list_interrupts(struct lttng_session *session) |
c337ddc2 | 448 | { |
cfcee1c7 | 449 | return 0; |
c337ddc2 MD |
450 | } |
451 | #endif | |
452 | ||
4ba1f53c | 453 | /* |
1965e6b4 MJ |
454 | * Statedump the task's namespaces using the proc filesystem inode number as |
455 | * the unique identifier. The user and pid ns are nested and will be dumped | |
456 | * recursively. | |
457 | * | |
4ba1f53c MD |
458 | * Called with task lock held. |
459 | */ | |
73e8ba37 JD |
460 | static |
461 | void lttng_statedump_process_ns(struct lttng_session *session, | |
462 | struct task_struct *p, | |
463 | enum lttng_thread_type type, | |
464 | enum lttng_execution_mode mode, | |
465 | enum lttng_execution_submode submode, | |
466 | enum lttng_process_status status) | |
467 | { | |
1965e6b4 | 468 | struct nsproxy *proxy; |
73e8ba37 | 469 | struct pid_namespace *pid_ns; |
1965e6b4 | 470 | struct user_namespace *user_ns; |
73e8ba37 | 471 | |
1965e6b4 MJ |
472 | /* |
473 | * The pid and user namespaces are special, they are nested and | |
474 | * accessed with specific functions instead of the nsproxy struct | |
475 | * like the other namespaces. | |
476 | */ | |
887bcdac MJ |
477 | pid_ns = task_active_pid_ns(p); |
478 | do { | |
1965e6b4 | 479 | trace_lttng_statedump_process_pid_ns(session, p, pid_ns); |
51831abd | 480 | pid_ns = pid_ns ? pid_ns->parent : NULL; |
887bcdac | 481 | } while (pid_ns); |
1965e6b4 MJ |
482 | |
483 | ||
484 | user_ns = task_cred_xxx(p, user_ns); | |
485 | do { | |
486 | trace_lttng_statedump_process_user_ns(session, p, user_ns); | |
acdd4850 MD |
487 | /* |
488 | * trace_lttng_statedump_process_user_ns() internally | |
489 | * checks whether user_ns is NULL. While this does not | |
490 | * appear to be a possible return value for | |
491 | * task_cred_xxx(), err on the safe side and check | |
492 | * for NULL here as well to be consistent with the | |
493 | * paranoid behavior of | |
494 | * trace_lttng_statedump_process_user_ns(). | |
495 | */ | |
496 | user_ns = user_ns ? user_ns->lttng_user_ns_parent : NULL; | |
1965e6b4 MJ |
497 | } while (user_ns); |
498 | ||
499 | /* | |
500 | * Back and forth on locking strategy within Linux upstream for nsproxy. | |
501 | * See Linux upstream commit 728dba3a39c66b3d8ac889ddbe38b5b1c264aec3 | |
502 | * "namespaces: Use task_lock and not rcu to protect nsproxy" | |
503 | * for details. | |
504 | */ | |
505 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0) || \ | |
506 | LTTNG_UBUNTU_KERNEL_RANGE(3,13,11,36, 3,14,0,0) || \ | |
507 | LTTNG_UBUNTU_KERNEL_RANGE(3,16,1,11, 3,17,0,0) || \ | |
508 | LTTNG_RHEL_KERNEL_RANGE(3,10,0,229,13,0, 3,11,0,0,0,0)) | |
509 | proxy = p->nsproxy; | |
510 | #else | |
511 | rcu_read_lock(); | |
512 | proxy = task_nsproxy(p); | |
513 | #endif | |
514 | if (proxy) { | |
515 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0)) | |
516 | trace_lttng_statedump_process_cgroup_ns(session, p, proxy->cgroup_ns); | |
517 | #endif | |
518 | trace_lttng_statedump_process_ipc_ns(session, p, proxy->ipc_ns); | |
519 | #ifndef LTTNG_MNT_NS_MISSING_HEADER | |
520 | trace_lttng_statedump_process_mnt_ns(session, p, proxy->mnt_ns); | |
521 | #endif | |
522 | trace_lttng_statedump_process_net_ns(session, p, proxy->net_ns); | |
523 | trace_lttng_statedump_process_uts_ns(session, p, proxy->uts_ns); | |
524 | } | |
525 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0) || \ | |
526 | LTTNG_UBUNTU_KERNEL_RANGE(3,13,11,36, 3,14,0,0) || \ | |
527 | LTTNG_UBUNTU_KERNEL_RANGE(3,16,1,11, 3,17,0,0) || \ | |
528 | LTTNG_RHEL_KERNEL_RANGE(3,10,0,229,13,0, 3,11,0,0,0,0)) | |
529 | /* (nothing) */ | |
530 | #else | |
531 | rcu_read_unlock(); | |
532 | #endif | |
73e8ba37 JD |
533 | } |
534 | ||
c337ddc2 MD |
535 | static |
536 | int lttng_enumerate_process_states(struct lttng_session *session) | |
537 | { | |
538 | struct task_struct *g, *p; | |
df493bfd MD |
539 | char *tmp; |
540 | ||
541 | tmp = (char *) __get_free_page(GFP_KERNEL); | |
542 | if (!tmp) | |
543 | return -ENOMEM; | |
c337ddc2 MD |
544 | |
545 | rcu_read_lock(); | |
546 | for_each_process(g) { | |
df493bfd MD |
547 | struct files_struct *prev_files = NULL; |
548 | ||
c337ddc2 MD |
549 | p = g; |
550 | do { | |
551 | enum lttng_execution_mode mode = | |
552 | LTTNG_MODE_UNKNOWN; | |
553 | enum lttng_execution_submode submode = | |
554 | LTTNG_UNKNOWN; | |
555 | enum lttng_process_status status; | |
556 | enum lttng_thread_type type; | |
df493bfd | 557 | struct files_struct *files; |
c337ddc2 MD |
558 | |
559 | task_lock(p); | |
560 | if (p->exit_state == EXIT_ZOMBIE) | |
561 | status = LTTNG_ZOMBIE; | |
562 | else if (p->exit_state == EXIT_DEAD) | |
563 | status = LTTNG_DEAD; | |
564 | else if (p->state == TASK_RUNNING) { | |
565 | /* Is this a forked child that has not run yet? */ | |
566 | if (list_empty(&p->rt.run_list)) | |
567 | status = LTTNG_WAIT_FORK; | |
568 | else | |
569 | /* | |
570 | * All tasks are considered as wait_cpu; | |
571 | * the viewer will sort out if the task | |
572 | * was really running at this time. | |
573 | */ | |
574 | status = LTTNG_WAIT_CPU; | |
575 | } else if (p->state & | |
576 | (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)) { | |
577 | /* Task is waiting for something to complete */ | |
578 | status = LTTNG_WAIT; | |
579 | } else | |
580 | status = LTTNG_UNNAMED; | |
581 | submode = LTTNG_NONE; | |
582 | ||
583 | /* | |
584 | * Verification of t->mm is to filter out kernel | |
585 | * threads; Viewer will further filter out if a | |
586 | * user-space thread was in syscall mode or not. | |
587 | */ | |
588 | if (p->mm) | |
589 | type = LTTNG_USER_THREAD; | |
590 | else | |
591 | type = LTTNG_KERNEL_THREAD; | |
df493bfd | 592 | files = p->files; |
d2a927ac MJ |
593 | |
594 | trace_lttng_statedump_process_state(session, | |
df493bfd | 595 | p, type, mode, submode, status, files); |
73e8ba37 | 596 | lttng_statedump_process_ns(session, |
c337ddc2 | 597 | p, type, mode, submode, status); |
df493bfd MD |
598 | /* |
599 | * As an optimisation for the common case, do not | |
600 | * repeat information for the same files_struct in | |
601 | * two consecutive threads. This is the common case | |
602 | * for threads sharing the same fd table. RCU guarantees | |
603 | * that the same files_struct pointer is not re-used | |
604 | * throughout processes/threads iteration. | |
605 | */ | |
606 | if (files && files != prev_files) { | |
607 | lttng_enumerate_files(session, files, tmp); | |
608 | prev_files = files; | |
609 | } | |
c337ddc2 MD |
610 | task_unlock(p); |
611 | } while_each_thread(g, p); | |
612 | } | |
613 | rcu_read_unlock(); | |
614 | ||
df493bfd MD |
615 | free_page((unsigned long) tmp); |
616 | ||
c337ddc2 MD |
617 | return 0; |
618 | } | |
619 | ||
620 | static | |
621 | void lttng_statedump_work_func(struct work_struct *work) | |
622 | { | |
623 | if (atomic_dec_and_test(&kernel_threads_to_run)) | |
624 | /* If we are the last thread, wake up do_lttng_statedump */ | |
625 | wake_up(&statedump_wq); | |
626 | } | |
627 | ||
628 | static | |
629 | int do_lttng_statedump(struct lttng_session *session) | |
630 | { | |
cfcee1c7 | 631 | int cpu, ret; |
c337ddc2 | 632 | |
c337ddc2 | 633 | trace_lttng_statedump_start(session); |
cfcee1c7 | 634 | ret = lttng_enumerate_process_states(session); |
cfcee1c7 MD |
635 | if (ret) |
636 | return ret; | |
637 | /* | |
638 | * FIXME | |
639 | * ret = lttng_enumerate_vm_maps(session); | |
640 | * if (ret) | |
641 | * return ret; | |
642 | */ | |
643 | ret = lttng_list_interrupts(session); | |
644 | if (ret) | |
645 | return ret; | |
646 | ret = lttng_enumerate_network_ip_interface(session); | |
647 | if (ret) | |
648 | return ret; | |
649 | ret = lttng_enumerate_block_devices(session); | |
650 | switch (ret) { | |
84c7055e MD |
651 | case 0: |
652 | break; | |
cfcee1c7 MD |
653 | case -ENOSYS: |
654 | printk(KERN_WARNING "LTTng: block device enumeration is not supported by kernel\n"); | |
655 | break; | |
656 | default: | |
657 | return ret; | |
658 | } | |
502e4132 JD |
659 | ret = lttng_enumerate_cpu_topology(session); |
660 | if (ret) | |
661 | return ret; | |
c337ddc2 MD |
662 | |
663 | /* TODO lttng_dump_idt_table(session); */ | |
664 | /* TODO lttng_dump_softirq_vec(session); */ | |
665 | /* TODO lttng_list_modules(session); */ | |
666 | /* TODO lttng_dump_swap_files(session); */ | |
667 | ||
668 | /* | |
669 | * Fire off a work queue on each CPU. Their sole purpose in life | |
670 | * is to guarantee that each CPU has been in a state where is was in | |
671 | * syscall mode (i.e. not in a trap, an IRQ or a soft IRQ). | |
672 | */ | |
673 | get_online_cpus(); | |
674 | atomic_set(&kernel_threads_to_run, num_online_cpus()); | |
675 | for_each_online_cpu(cpu) { | |
676 | INIT_DELAYED_WORK(&cpu_work[cpu], lttng_statedump_work_func); | |
677 | schedule_delayed_work_on(cpu, &cpu_work[cpu], 0); | |
678 | } | |
679 | /* Wait for all threads to run */ | |
7a7128e0 | 680 | __wait_event(statedump_wq, (atomic_read(&kernel_threads_to_run) == 0)); |
c337ddc2 MD |
681 | put_online_cpus(); |
682 | /* Our work is done */ | |
c337ddc2 MD |
683 | trace_lttng_statedump_end(session); |
684 | return 0; | |
685 | } | |
686 | ||
687 | /* | |
688 | * Called with session mutex held. | |
689 | */ | |
690 | int lttng_statedump_start(struct lttng_session *session) | |
691 | { | |
c337ddc2 MD |
692 | return do_lttng_statedump(session); |
693 | } | |
694 | EXPORT_SYMBOL_GPL(lttng_statedump_start); | |
695 | ||
dd8d5afb MD |
696 | static |
697 | int __init lttng_statedump_init(void) | |
698 | { | |
d16aa9c9 MD |
699 | /* |
700 | * Allow module to load even if the fixup cannot be done. This | |
701 | * will allow seemless transition when the underlying issue fix | |
702 | * is merged into the Linux kernel, and when tracepoint.c | |
703 | * "tracepoint_module_notify" is turned into a static function. | |
704 | */ | |
705 | (void) wrapper_lttng_fixup_sig(THIS_MODULE); | |
706 | return 0; | |
dd8d5afb MD |
707 | } |
708 | ||
709 | module_init(lttng_statedump_init); | |
710 | ||
461277e7 MD |
711 | static |
712 | void __exit lttng_statedump_exit(void) | |
713 | { | |
714 | } | |
715 | ||
716 | module_exit(lttng_statedump_exit); | |
717 | ||
c337ddc2 MD |
718 | MODULE_LICENSE("GPL and additional rights"); |
719 | MODULE_AUTHOR("Jean-Hugues Deschenes"); | |
1c124020 | 720 | MODULE_DESCRIPTION("LTTng statedump provider"); |
13ab8b0a MD |
721 | MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "." |
722 | __stringify(LTTNG_MODULES_MINOR_VERSION) "." | |
723 | __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION) | |
724 | LTTNG_MODULES_EXTRAVERSION); |