3e122d84 |
1 | /* ltt-state-dump.c |
2 | * |
3 | * Lunix Trace Toolkit Kernel State Dump |
4 | * |
5 | * Copyright 2005 - |
6 | * Jean-Hugues Deschenes <jean-hugues.deschenes@polymtl.ca> |
7 | * |
8 | * Changes: |
9 | * Eric Clement: add listing of network IP interface |
10 | */ |
11 | |
12 | #include <linux/init.h> |
13 | #include <linux/module.h> |
14 | #include <linux/ltt-core.h> |
15 | #include <linux/netlink.h> |
16 | #include <linux/inet.h> |
17 | #include <linux/ip.h> |
18 | #include <linux/kthread.h> |
19 | #include <linux/proc_fs.h> |
20 | #include <linux/file.h> |
21 | #include <linux/interrupt.h> |
22 | #include <linux/irq.h> |
23 | #include <linux/ltt/ltt-facility-statedump.h> |
24 | |
25 | #define NB_PROC_CHUNK 20 |
26 | |
27 | #include <linux/netdevice.h> |
28 | #include <linux/inetdevice.h> |
29 | |
30 | /* in modules.c */ |
31 | extern int ltt_enumerate_modules(void); |
32 | |
33 | static inline int ltt_enumerate_network_ip_interface(void) |
34 | { |
35 | struct net_device *list; |
36 | struct in_device *in_dev = NULL; |
37 | struct in_ifaddr *ifa = NULL; |
38 | |
39 | read_lock(&dev_base_lock); |
40 | for(list=dev_base; list != NULL; list=list->next) { |
41 | |
42 | if(list->flags & IFF_UP) { |
43 | in_dev = in_dev_get(list); |
44 | |
45 | if(in_dev) { |
46 | for (ifa = in_dev->ifa_list; ifa != NULL; ifa = ifa->ifa_next) { |
47 | trace_statedump_enumerate_network_ip_interface(list->name, |
48 | ifa->ifa_address, |
49 | LTTNG_UP); |
50 | } |
51 | in_dev_put(in_dev); |
52 | } |
53 | } else { |
54 | trace_statedump_enumerate_network_ip_interface(list->name, |
55 | 0, |
56 | LTTNG_DOWN); |
57 | } |
58 | } |
59 | read_unlock(&dev_base_lock); |
60 | |
61 | return 0; |
62 | } |
63 | |
64 | static inline int ltt_enumerate_file_descriptors(void) |
65 | { |
66 | struct task_struct * t = &init_task; |
67 | unsigned int i; |
68 | struct file * filp; |
69 | char *tmp = (char*)__get_free_page(GFP_KERNEL), *path; |
70 | struct fdtable *fdt; |
71 | |
72 | /* Enumerate active file descriptors */ |
73 | |
74 | do { |
75 | read_lock(&tasklist_lock); |
76 | if(t != &init_task) atomic_dec(&t->usage); |
77 | t = next_task(t); |
78 | atomic_inc(&t->usage); |
79 | read_unlock(&tasklist_lock); |
80 | |
81 | task_lock(t); |
82 | if (t->files) { |
83 | spin_lock(&t->files->file_lock); |
84 | fdt = files_fdtable(t->files); |
85 | for (i=0; i < fdt->max_fds; i++) { |
86 | filp = fcheck_files(t->files, i); |
87 | if (!filp) |
88 | continue; |
89 | path = d_path(filp->f_dentry, filp->f_vfsmnt, tmp, PAGE_SIZE); |
90 | |
91 | /* Make sure we give at least some info */ |
92 | if(IS_ERR(path)) |
93 | trace_statedump_enumerate_file_descriptors(filp->f_dentry->d_name.name, t->pid, i); |
94 | else |
95 | trace_statedump_enumerate_file_descriptors(path, t->pid, i); |
96 | } |
97 | spin_unlock(&t->files->file_lock); |
98 | } |
99 | task_unlock(t); |
100 | |
101 | } while( t != &init_task ); |
102 | |
103 | free_page((unsigned long)tmp); |
104 | |
105 | return 0; |
106 | } |
107 | |
108 | static inline int ltt_enumerate_vm_maps(void) |
109 | { |
110 | struct mm_struct *mm; |
111 | struct task_struct * t = &init_task; |
112 | struct vm_area_struct * map; |
113 | unsigned long ino = 0; |
114 | |
115 | do { |
116 | read_lock(&tasklist_lock); |
117 | if(t != &init_task) atomic_dec(&t->usage); |
118 | t = next_task(t); |
119 | atomic_inc(&t->usage); |
120 | read_unlock(&tasklist_lock); |
121 | |
122 | /* get_task_mm does a task_lock... */ |
123 | |
124 | mm = get_task_mm(t); |
125 | |
126 | if (mm) |
127 | { |
128 | map = mm->mmap; |
129 | |
130 | if(map) |
131 | { |
132 | down_read(&mm->mmap_sem); |
133 | |
134 | while (map) { |
135 | if (map->vm_file) { |
136 | ino = map->vm_file->f_dentry->d_inode->i_ino; |
137 | } else { |
138 | ino = 0; |
139 | } |
140 | |
141 | trace_statedump_enumerate_vm_maps(t->pid, (void *)map->vm_start, (void *)map->vm_end, map->vm_flags, map->vm_pgoff << PAGE_SHIFT, ino); |
142 | map = map->vm_next; |
143 | } |
144 | |
145 | up_read(&mm->mmap_sem); |
146 | } |
147 | |
148 | mmput(mm); |
149 | } |
150 | |
151 | } while( t != &init_task ); |
152 | |
153 | return 0; |
154 | } |
155 | |
156 | #if defined( CONFIG_ARM ) |
157 | /* defined in arch/arm/kernel/irq.c because of dependency on statically-defined lock & irq_desc */ |
158 | int ltt_enumerate_interrupts(void); |
159 | #else |
160 | static inline int ltt_enumerate_interrupts(void) |
161 | { |
162 | unsigned int i; |
163 | unsigned long flags = 0; |
164 | |
165 | /* needs irq_desc */ |
166 | |
167 | for(i = 0; i < NR_IRQS; i++) |
168 | { |
169 | struct irqaction * action; |
170 | |
171 | spin_lock_irqsave(&irq_desc[i].lock, flags); |
172 | |
173 | |
174 | for (action=irq_desc[i].action; action; action = action->next) |
175 | trace_statedump_enumerate_interrupts( |
176 | irq_desc[i].handler->typename, |
177 | action->name, |
178 | i ); |
179 | |
180 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
181 | } |
182 | |
183 | return 0; |
184 | } |
185 | #endif |
186 | |
187 | static inline int ltt_enumerate_process_states(void) |
188 | { |
189 | struct task_struct * t = &init_task; |
190 | struct task_struct * p = t; |
191 | enum lttng_process_status status; |
192 | enum lttng_thread_type type; |
193 | enum lttng_execution_mode mode; |
194 | enum lttng_execution_submode submode; |
195 | |
196 | do { |
197 | mode = LTTNG_MODE_UNKNOWN; |
198 | submode = LTTNG_UNKNOWN; |
199 | |
200 | read_lock(&tasklist_lock); |
201 | if(t != &init_task) { |
202 | atomic_dec(&t->usage); |
203 | t = next_thread(t); |
204 | } |
205 | if(t == p) { |
206 | t = p = next_task(t); |
207 | } |
208 | atomic_inc(&t->usage); |
209 | read_unlock(&tasklist_lock); |
210 | |
211 | task_lock(t); |
212 | |
213 | if(t->exit_state == EXIT_ZOMBIE) |
214 | status = LTTNG_ZOMBIE; |
215 | else if(t->exit_state == EXIT_DEAD) |
216 | status = LTTNG_DEAD; |
217 | else if(t->state == TASK_RUNNING) |
218 | { |
219 | /* Is this a forked child that has not run yet? */ |
220 | if( list_empty(&t->run_list) ) |
221 | { |
222 | status = LTTNG_WAIT_FORK; |
223 | } |
224 | else |
225 | { |
226 | /* All tasks are considered as wait_cpu; the viewer will sort out if the task was relly running at this time. */ |
227 | status = LTTNG_WAIT_CPU; |
228 | } |
229 | } |
230 | else if(t->state & (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)) |
231 | { |
232 | /* Task is waiting for something to complete */ |
233 | status = LTTNG_WAIT; |
234 | } |
235 | else |
236 | status = LTTNG_UNNAMED; |
237 | |
238 | submode = LTTNG_NONE; |
239 | |
240 | |
241 | /* Verification of t->mm is to filter out kernel threads; |
242 | Viewer will further filter out if a user-space thread was in syscall mode or not */ |
243 | if(t->mm) |
244 | type = LTTNG_USER_THREAD; |
245 | else |
246 | type = LTTNG_KERNEL_THREAD; |
247 | |
248 | trace_statedump_enumerate_process_state(t->pid, t->parent->pid, t->comm, |
249 | type, mode, submode, status); |
250 | |
251 | task_unlock(t); |
252 | |
253 | } while( t != &init_task ); |
254 | |
255 | return 0; |
256 | } |
257 | |
258 | void ltt_statedump_work_func(void *sem) |
259 | { |
260 | /* Our job is just to release the semaphore so |
261 | that we are sure that each CPU has been in syscall |
262 | mode before the end of ltt_statedump_thread */ |
263 | up((struct semaphore *)sem); |
264 | } |
265 | |
266 | int ltt_statedump_thread(void *data) |
267 | { |
268 | struct work_struct *cpu_work; |
269 | struct semaphore work_sema4; |
270 | int cpu, cpu_index=0; |
271 | |
272 | printk("ltt_statedump_thread\n"); |
273 | |
274 | /* Start by firing off a work queue on each CPU. Their sole purpose in life |
275 | * is to guarantee that each CPU has been in a state where is was in syscall |
276 | * mode (i.e. not in a trap, an IRQ or a soft IRQ) */ |
277 | sema_init(&work_sema4, 1 - num_online_cpus()); |
278 | cpu_work = (struct work_struct *)kmalloc(sizeof(struct work_struct) * |
279 | num_online_cpus(), GFP_KERNEL); |
280 | for_each_online_cpu(cpu) |
281 | { |
282 | INIT_WORK(&cpu_work[cpu_index], ltt_statedump_work_func, &work_sema4); |
283 | |
284 | /* TODO: verify RC */ |
285 | schedule_delayed_work_on(cpu,&cpu_work[cpu_index],0); |
286 | cpu_index++; |
287 | } |
288 | |
289 | ltt_enumerate_process_states(); |
290 | |
291 | ltt_enumerate_file_descriptors(); |
292 | |
293 | ltt_enumerate_modules(); |
294 | |
295 | ltt_enumerate_vm_maps(); |
296 | |
297 | ltt_enumerate_interrupts(); |
298 | |
299 | ltt_enumerate_network_ip_interface(); |
300 | |
301 | /* Wait for all work queues to have completed */ |
302 | down(&work_sema4); |
303 | kfree(cpu_work); |
304 | |
305 | /* Our work is done */ |
306 | printk("trace_statedump_statedump_end\n"); |
307 | trace_statedump_statedump_end(); |
308 | |
309 | do_exit(0); |
310 | |
311 | return 0; |
312 | } |
313 | |
314 | int ltt_statedump_start(struct ltt_trace_struct *trace) |
315 | { |
316 | printk("ltt_statedump_start\n"); |
317 | |
318 | kthread_run( ltt_statedump_thread, |
319 | NULL, |
320 | "ltt_statedump"); |
321 | |
322 | return 0; |
323 | } |
324 | |
325 | |
326 | /* Dynamic facility. */ |
327 | |
328 | static int __init statedump_init(void) |
329 | { |
330 | int ret; |
331 | printk(KERN_INFO "LTT : ltt-facility-statedump init\n"); |
332 | |
333 | ret = ltt_module_register(LTT_FUNCTION_STATEDUMP, |
334 | ltt_statedump_start,THIS_MODULE); |
335 | |
336 | return ret; |
337 | } |
338 | |
339 | static void __exit statedump_exit(void) |
340 | { |
341 | ltt_module_unregister(LTT_FUNCTION_STATEDUMP); |
342 | } |
343 | |
344 | module_init(statedump_init) |
345 | module_exit(statedump_exit) |
346 | |
347 | |
348 | MODULE_LICENSE("GPL"); |
349 | MODULE_AUTHOR("Jean-Hugues Deschenes"); |
350 | MODULE_DESCRIPTION("Linux Trace Toolkit Statedump"); |
351 | |