3e122d84 |
1 | /* ltt-state-dump.c |
2 | * |
3 | * Lunix Trace Toolkit Kernel State Dump |
4 | * |
5 | * Copyright 2005 - |
6 | * Jean-Hugues Deschenes <jean-hugues.deschenes@polymtl.ca> |
7 | * |
8 | * Changes: |
9 | * Eric Clement: add listing of network IP interface |
10 | */ |
11 | |
a387f1f9 |
12 | #include <linux/config.h> |
3e122d84 |
13 | #include <linux/init.h> |
14 | #include <linux/module.h> |
15 | #include <linux/ltt-core.h> |
16 | #include <linux/netlink.h> |
17 | #include <linux/inet.h> |
18 | #include <linux/ip.h> |
19 | #include <linux/kthread.h> |
20 | #include <linux/proc_fs.h> |
21 | #include <linux/file.h> |
22 | #include <linux/interrupt.h> |
23 | #include <linux/irq.h> |
a387f1f9 |
24 | #include <linux/cpu.h> |
3e122d84 |
25 | #include <linux/ltt/ltt-facility-statedump.h> |
26 | |
27 | #define NB_PROC_CHUNK 20 |
28 | |
29 | #include <linux/netdevice.h> |
30 | #include <linux/inetdevice.h> |
31 | |
32 | /* in modules.c */ |
33 | extern int ltt_enumerate_modules(void); |
34 | |
35 | static inline int ltt_enumerate_network_ip_interface(void) |
36 | { |
37 | struct net_device *list; |
38 | struct in_device *in_dev = NULL; |
39 | struct in_ifaddr *ifa = NULL; |
40 | |
41 | read_lock(&dev_base_lock); |
42 | for(list=dev_base; list != NULL; list=list->next) { |
43 | |
44 | if(list->flags & IFF_UP) { |
45 | in_dev = in_dev_get(list); |
46 | |
47 | if(in_dev) { |
48 | for (ifa = in_dev->ifa_list; ifa != NULL; ifa = ifa->ifa_next) { |
49 | trace_statedump_enumerate_network_ip_interface(list->name, |
50 | ifa->ifa_address, |
51 | LTTNG_UP); |
52 | } |
53 | in_dev_put(in_dev); |
54 | } |
55 | } else { |
56 | trace_statedump_enumerate_network_ip_interface(list->name, |
57 | 0, |
58 | LTTNG_DOWN); |
59 | } |
60 | } |
61 | read_unlock(&dev_base_lock); |
62 | |
63 | return 0; |
64 | } |
65 | |
66 | static inline int ltt_enumerate_file_descriptors(void) |
67 | { |
68 | struct task_struct * t = &init_task; |
69 | unsigned int i; |
70 | struct file * filp; |
71 | char *tmp = (char*)__get_free_page(GFP_KERNEL), *path; |
72 | struct fdtable *fdt; |
73 | |
74 | /* Enumerate active file descriptors */ |
75 | |
76 | do { |
77 | read_lock(&tasklist_lock); |
78 | if(t != &init_task) atomic_dec(&t->usage); |
79 | t = next_task(t); |
80 | atomic_inc(&t->usage); |
81 | read_unlock(&tasklist_lock); |
82 | |
83 | task_lock(t); |
84 | if (t->files) { |
85 | spin_lock(&t->files->file_lock); |
86 | fdt = files_fdtable(t->files); |
87 | for (i=0; i < fdt->max_fds; i++) { |
88 | filp = fcheck_files(t->files, i); |
89 | if (!filp) |
90 | continue; |
91 | path = d_path(filp->f_dentry, filp->f_vfsmnt, tmp, PAGE_SIZE); |
92 | |
93 | /* Make sure we give at least some info */ |
94 | if(IS_ERR(path)) |
95 | trace_statedump_enumerate_file_descriptors(filp->f_dentry->d_name.name, t->pid, i); |
96 | else |
97 | trace_statedump_enumerate_file_descriptors(path, t->pid, i); |
98 | } |
99 | spin_unlock(&t->files->file_lock); |
100 | } |
101 | task_unlock(t); |
102 | |
103 | } while( t != &init_task ); |
104 | |
105 | free_page((unsigned long)tmp); |
106 | |
107 | return 0; |
108 | } |
109 | |
110 | static inline int ltt_enumerate_vm_maps(void) |
111 | { |
112 | struct mm_struct *mm; |
113 | struct task_struct * t = &init_task; |
114 | struct vm_area_struct * map; |
115 | unsigned long ino = 0; |
116 | |
117 | do { |
118 | read_lock(&tasklist_lock); |
119 | if(t != &init_task) atomic_dec(&t->usage); |
120 | t = next_task(t); |
121 | atomic_inc(&t->usage); |
122 | read_unlock(&tasklist_lock); |
123 | |
124 | /* get_task_mm does a task_lock... */ |
125 | |
126 | mm = get_task_mm(t); |
127 | |
128 | if (mm) |
129 | { |
130 | map = mm->mmap; |
131 | |
132 | if(map) |
133 | { |
134 | down_read(&mm->mmap_sem); |
135 | |
136 | while (map) { |
137 | if (map->vm_file) { |
138 | ino = map->vm_file->f_dentry->d_inode->i_ino; |
139 | } else { |
140 | ino = 0; |
141 | } |
142 | |
143 | trace_statedump_enumerate_vm_maps(t->pid, (void *)map->vm_start, (void *)map->vm_end, map->vm_flags, map->vm_pgoff << PAGE_SHIFT, ino); |
144 | map = map->vm_next; |
145 | } |
146 | |
147 | up_read(&mm->mmap_sem); |
148 | } |
149 | |
150 | mmput(mm); |
151 | } |
152 | |
153 | } while( t != &init_task ); |
154 | |
155 | return 0; |
156 | } |
157 | |
158 | #if defined( CONFIG_ARM ) |
159 | /* defined in arch/arm/kernel/irq.c because of dependency on statically-defined lock & irq_desc */ |
160 | int ltt_enumerate_interrupts(void); |
161 | #else |
162 | static inline int ltt_enumerate_interrupts(void) |
163 | { |
164 | unsigned int i; |
165 | unsigned long flags = 0; |
166 | |
167 | /* needs irq_desc */ |
168 | |
169 | for(i = 0; i < NR_IRQS; i++) |
170 | { |
171 | struct irqaction * action; |
172 | |
173 | spin_lock_irqsave(&irq_desc[i].lock, flags); |
174 | |
175 | |
176 | for (action=irq_desc[i].action; action; action = action->next) |
177 | trace_statedump_enumerate_interrupts( |
178 | irq_desc[i].handler->typename, |
179 | action->name, |
180 | i ); |
181 | |
182 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
183 | } |
184 | |
185 | return 0; |
186 | } |
187 | #endif |
188 | |
189 | static inline int ltt_enumerate_process_states(void) |
190 | { |
191 | struct task_struct * t = &init_task; |
192 | struct task_struct * p = t; |
193 | enum lttng_process_status status; |
194 | enum lttng_thread_type type; |
195 | enum lttng_execution_mode mode; |
196 | enum lttng_execution_submode submode; |
197 | |
198 | do { |
199 | mode = LTTNG_MODE_UNKNOWN; |
200 | submode = LTTNG_UNKNOWN; |
201 | |
202 | read_lock(&tasklist_lock); |
203 | if(t != &init_task) { |
204 | atomic_dec(&t->usage); |
205 | t = next_thread(t); |
206 | } |
207 | if(t == p) { |
208 | t = p = next_task(t); |
209 | } |
210 | atomic_inc(&t->usage); |
211 | read_unlock(&tasklist_lock); |
212 | |
213 | task_lock(t); |
214 | |
215 | if(t->exit_state == EXIT_ZOMBIE) |
216 | status = LTTNG_ZOMBIE; |
217 | else if(t->exit_state == EXIT_DEAD) |
218 | status = LTTNG_DEAD; |
219 | else if(t->state == TASK_RUNNING) |
220 | { |
221 | /* Is this a forked child that has not run yet? */ |
222 | if( list_empty(&t->run_list) ) |
223 | { |
224 | status = LTTNG_WAIT_FORK; |
225 | } |
226 | else |
227 | { |
228 | /* All tasks are considered as wait_cpu; the viewer will sort out if the task was relly running at this time. */ |
229 | status = LTTNG_WAIT_CPU; |
230 | } |
231 | } |
232 | else if(t->state & (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)) |
233 | { |
234 | /* Task is waiting for something to complete */ |
235 | status = LTTNG_WAIT; |
236 | } |
237 | else |
238 | status = LTTNG_UNNAMED; |
239 | |
240 | submode = LTTNG_NONE; |
241 | |
242 | |
243 | /* Verification of t->mm is to filter out kernel threads; |
244 | Viewer will further filter out if a user-space thread was in syscall mode or not */ |
245 | if(t->mm) |
246 | type = LTTNG_USER_THREAD; |
247 | else |
248 | type = LTTNG_KERNEL_THREAD; |
249 | |
250 | trace_statedump_enumerate_process_state(t->pid, t->parent->pid, t->comm, |
251 | type, mode, submode, status); |
252 | |
253 | task_unlock(t); |
254 | |
255 | } while( t != &init_task ); |
256 | |
257 | return 0; |
258 | } |
259 | |
260 | void ltt_statedump_work_func(void *sem) |
261 | { |
262 | /* Our job is just to release the semaphore so |
263 | that we are sure that each CPU has been in syscall |
264 | mode before the end of ltt_statedump_thread */ |
265 | up((struct semaphore *)sem); |
266 | } |
267 | |
a387f1f9 |
268 | static struct work_struct cpu_work[NR_CPUS]; |
269 | |
3e122d84 |
270 | int ltt_statedump_thread(void *data) |
271 | { |
3e122d84 |
272 | struct semaphore work_sema4; |
a387f1f9 |
273 | int cpu; |
3e122d84 |
274 | |
275 | printk("ltt_statedump_thread\n"); |
276 | |
a387f1f9 |
277 | ltt_enumerate_process_states(); |
3e122d84 |
278 | |
279 | ltt_enumerate_file_descriptors(); |
280 | |
281 | ltt_enumerate_modules(); |
282 | |
283 | ltt_enumerate_vm_maps(); |
284 | |
285 | ltt_enumerate_interrupts(); |
286 | |
287 | ltt_enumerate_network_ip_interface(); |
288 | |
a387f1f9 |
289 | /* Fire off a work queue on each CPU. Their sole purpose in life |
290 | * is to guarantee that each CPU has been in a state where is was in syscall |
291 | * mode (i.e. not in a trap, an IRQ or a soft IRQ) */ |
292 | sema_init(&work_sema4, 1 - num_online_cpus()); |
293 | |
294 | lock_cpu_hotplug(); |
295 | for_each_online_cpu(cpu) |
296 | { |
297 | INIT_WORK(&cpu_work[cpu], ltt_statedump_work_func, &work_sema4); |
298 | |
299 | schedule_delayed_work_on(cpu,&cpu_work[cpu],0); |
300 | } |
301 | unlock_cpu_hotplug(); |
302 | |
3e122d84 |
303 | /* Wait for all work queues to have completed */ |
304 | down(&work_sema4); |
3e122d84 |
305 | |
306 | /* Our work is done */ |
307 | printk("trace_statedump_statedump_end\n"); |
308 | trace_statedump_statedump_end(); |
309 | |
310 | do_exit(0); |
311 | |
312 | return 0; |
313 | } |
314 | |
315 | int ltt_statedump_start(struct ltt_trace_struct *trace) |
316 | { |
317 | printk("ltt_statedump_start\n"); |
318 | |
319 | kthread_run( ltt_statedump_thread, |
320 | NULL, |
321 | "ltt_statedump"); |
322 | |
323 | return 0; |
324 | } |
325 | |
326 | |
327 | /* Dynamic facility. */ |
328 | |
329 | static int __init statedump_init(void) |
330 | { |
331 | int ret; |
332 | printk(KERN_INFO "LTT : ltt-facility-statedump init\n"); |
333 | |
334 | ret = ltt_module_register(LTT_FUNCTION_STATEDUMP, |
335 | ltt_statedump_start,THIS_MODULE); |
336 | |
337 | return ret; |
338 | } |
339 | |
340 | static void __exit statedump_exit(void) |
341 | { |
342 | ltt_module_unregister(LTT_FUNCTION_STATEDUMP); |
343 | } |
344 | |
345 | module_init(statedump_init) |
346 | module_exit(statedump_exit) |
347 | |
348 | |
349 | MODULE_LICENSE("GPL"); |
350 | MODULE_AUTHOR("Jean-Hugues Deschenes"); |
351 | MODULE_DESCRIPTION("Linux Trace Toolkit Statedump"); |
352 | |