Update ring buffer and pretty print
[lttng-modules.git] / deprecated / probes / kernel-trace.c
1 /*
2 * ltt/probes/kernel-trace.c
3 *
4 * kernel tracepoint probes.
5 *
6 * (C) Copyright 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
7 * Dual LGPL v2.1/GPL v2 license.
8 */
9
10 #include <linux/module.h>
11 #include <linux/irq.h>
12 #include <trace/events/signal.h>
13 #include <trace/irq.h>
14 #include <trace/sched.h>
15 #include <trace/timer.h>
16 #include <trace/kernel.h>
17 #include <trace/fault.h>
18 #include <trace/events/sched.h>
19
20 #include "../ltt-tracer.h"
21 #include "../ltt-type-serializer.h"
22
23 /*
24 * This should probably be added to s390.
25 */
26 #ifdef CONFIG_S390
27 static struct pt_regs *get_irq_regs(void)
28 {
29 return task_pt_regs(current);
30 }
31 #endif
32
33 /*
34 * FIXME :
35 * currently, the specialized tracepoint probes cannot call into other marker
36 * probes, such as ftrace enable/disable. Given we want them to be as fast as
37 * possible, it might not be so bad to lose this flexibility. But that means
38 * such probes would have to connect to tracepoints on their own.
39 */
40
41 /* kernel_irq_entry specialized tracepoint probe */
42
43 void probe_irq_entry(void *_data, unsigned int id, struct pt_regs *regs,
44 struct irqaction *action);
45
46 DEFINE_MARKER_TP(kernel, irq_entry, irq_entry, probe_irq_entry,
47 "ip %lu handler %p irq_id #2u%u kernel_mode #1u%u");
48
49 notrace void probe_irq_entry(void *_data, unsigned int id, struct pt_regs *regs,
50 struct irqaction *action)
51 {
52 struct marker *marker;
53 struct serialize_long_long_short_char data;
54
55 if (unlikely(!regs))
56 regs = get_irq_regs();
57 if (likely(regs)) {
58 data.f1 = instruction_pointer(regs);
59 data.f4 = !user_mode(regs);
60 } else {
61 data.f1 = 0UL;
62 data.f4 = 1;
63 }
64 data.f2 = (unsigned long) (action ? action->handler : NULL);
65 data.f3 = id;
66
67 marker = &GET_MARKER(kernel, irq_entry);
68 ltt_specialized_trace(marker, marker->single.probe_private,
69 &data, serialize_sizeof(data), sizeof(long));
70 }
71
72 void probe_irq_next_handler(void *_data, unsigned int id, struct irqaction *action,
73 irqreturn_t prev_ret);
74
75 DEFINE_MARKER_TP(kernel, irq_next_handler, irq_next_handler,
76 probe_irq_next_handler,
77 "handler %p prev_ret #1u%u");
78
79 notrace void probe_irq_next_handler(void *_data, unsigned int id, struct irqaction *action,
80 irqreturn_t prev_ret)
81 {
82 struct marker *marker;
83 struct serialize_long_char data;
84
85 data.f1 = (unsigned long) (action ? action->handler : NULL);
86 data.f2 = prev_ret;
87
88 marker = &GET_MARKER(kernel, irq_next_handler);
89 ltt_specialized_trace(marker, marker->single.probe_private,
90 &data, serialize_sizeof(data), sizeof(long));
91 }
92
93 /* kernel_irq_exit specialized tracepoint probe */
94
95 void probe_irq_exit(void *_data, irqreturn_t retval);
96
97 DEFINE_MARKER_TP(kernel, irq_exit, irq_exit, probe_irq_exit,
98 "handled #1u%u");
99
100 notrace void probe_irq_exit(void *_data, irqreturn_t retval)
101 {
102 struct marker *marker;
103 unsigned char data;
104
105 data = IRQ_RETVAL(retval);
106
107 marker = &GET_MARKER(kernel, irq_exit);
108 ltt_specialized_trace(marker, marker->single.probe_private,
109 &data, sizeof(data), sizeof(data));
110 }
111
112 /* kernel_softirq_entry specialized tracepoint probe */
113
114 void probe_softirq_entry(void *_data, struct softirq_action *h,
115 struct softirq_action *softirq_vec);
116
117 DEFINE_MARKER_TP(kernel, softirq_entry, softirq_entry,
118 probe_softirq_entry, "softirq_id #1u%lu");
119
120 notrace void probe_softirq_entry(void *_data, struct softirq_action *h,
121 struct softirq_action *softirq_vec)
122 {
123 struct marker *marker;
124 unsigned char data;
125
126 data = ((unsigned long)h - (unsigned long)softirq_vec) / sizeof(*h);
127
128 marker = &GET_MARKER(kernel, softirq_entry);
129 ltt_specialized_trace(marker, marker->single.probe_private,
130 &data, sizeof(data), sizeof(data));
131 }
132
133 /* kernel_softirq_exit specialized tracepoint probe */
134
135 void probe_softirq_exit(void *_data, struct softirq_action *h,
136 struct softirq_action *softirq_vec);
137
138 DEFINE_MARKER_TP(kernel, softirq_exit, softirq_exit,
139 probe_softirq_exit, "softirq_id #1u%lu");
140
141 notrace void probe_softirq_exit(void *_data, struct softirq_action *h,
142 struct softirq_action *softirq_vec)
143 {
144 struct marker *marker;
145 unsigned char data;
146
147 data = ((unsigned long)h - (unsigned long)softirq_vec) / sizeof(*h);
148
149 marker = &GET_MARKER(kernel, softirq_exit);
150 ltt_specialized_trace(marker, marker->single.probe_private,
151 &data, sizeof(data), sizeof(data));
152 }
153
154 /* kernel_softirq_raise specialized tracepoint probe */
155
156 void probe_softirq_raise(void *_data, unsigned int nr);
157
158 DEFINE_MARKER_TP(kernel, softirq_raise, softirq_raise,
159 probe_softirq_raise, "softirq_id #1u%u");
160
161 notrace void probe_softirq_raise(void *_data, unsigned int nr)
162 {
163 struct marker *marker;
164 unsigned char data;
165
166 data = nr;
167
168 marker = &GET_MARKER(kernel, softirq_raise);
169 ltt_specialized_trace(marker, marker->single.probe_private,
170 &data, sizeof(data), sizeof(data));
171 }
172
173 /* Standard probes */
174 void probe_irq_tasklet_low_entry(void *_data, struct tasklet_struct *t)
175 {
176 trace_mark_tp(kernel, tasklet_low_entry, irq_tasklet_low_entry,
177 probe_irq_tasklet_low_entry, "func %p data %lu",
178 t->func, t->data);
179 }
180
181 void probe_irq_tasklet_low_exit(void *_data, struct tasklet_struct *t)
182 {
183 trace_mark_tp(kernel, tasklet_low_exit, irq_tasklet_low_exit,
184 probe_irq_tasklet_low_exit, "func %p data %lu",
185 t->func, t->data);
186 }
187
188 void probe_irq_tasklet_high_entry(void *_data, struct tasklet_struct *t)
189 {
190 trace_mark_tp(kernel, tasklet_high_entry, irq_tasklet_high_entry,
191 probe_irq_tasklet_high_entry, "func %p data %lu",
192 t->func, t->data);
193 }
194
195 void probe_irq_tasklet_high_exit(void *_data, struct tasklet_struct *t)
196 {
197 trace_mark_tp(kernel, tasklet_high_exit, irq_tasklet_high_exit,
198 probe_irq_tasklet_high_exit, "func %p data %lu",
199 t->func, t->data);
200 }
201
202 void probe_sched_kthread_stop(void *_data, struct task_struct *t)
203 {
204 trace_mark_tp(kernel, kthread_stop, sched_kthread_stop,
205 probe_sched_kthread_stop, "pid %d", t->pid);
206 }
207
208 void probe_sched_kthread_stop_ret(void *_data, int ret)
209 {
210 trace_mark_tp(kernel, kthread_stop_ret, sched_kthread_stop_ret,
211 probe_sched_kthread_stop_ret, "ret %d", ret);
212 }
213
214 void probe_sched_wait_task(void *_data, struct task_struct *p)
215 {
216 trace_mark_tp(kernel, sched_wait_task, sched_wait_task,
217 probe_sched_wait_task, "pid %d state #2d%ld",
218 p->pid, p->state);
219 }
220
221 /* kernel_sched_try_wakeup specialized tracepoint probe */
222
223 void probe_sched_wakeup(void *_data, struct task_struct *p, int success);
224
225 DEFINE_MARKER_TP(kernel, sched_try_wakeup, sched_wakeup,
226 probe_sched_wakeup, "pid %d cpu_id %u state #2d%ld");
227
228 notrace void probe_sched_wakeup(void *_data, struct task_struct *p, int success)
229 {
230 struct marker *marker;
231 struct serialize_int_int_short data;
232
233 data.f1 = p->pid;
234 data.f2 = task_cpu(p);
235 data.f3 = p->state;
236
237 marker = &GET_MARKER(kernel, sched_try_wakeup);
238 ltt_specialized_trace(marker, marker->single.probe_private,
239 &data, serialize_sizeof(data), sizeof(int));
240 }
241
242 void probe_sched_wakeup_new(void *_data, struct task_struct *p, int success)
243 {
244 trace_mark_tp(kernel, sched_wakeup_new_task, sched_wakeup_new,
245 probe_sched_wakeup_new, "pid %d state #2d%ld cpu_id %u",
246 p->pid, p->state, task_cpu(p));
247 }
248
249 /* kernel_sched_schedule specialized tracepoint probe */
250
251 void probe_sched_switch(void *_data, struct task_struct *prev,
252 struct task_struct *next);
253
254 DEFINE_MARKER_TP(kernel, sched_schedule, sched_switch, probe_sched_switch,
255 "prev_pid %d next_pid %d prev_state #2d%ld");
256
257 notrace void probe_sched_switch(void *_data, struct task_struct *prev,
258 struct task_struct *next)
259 {
260 struct marker *marker;
261 struct serialize_int_int_short data;
262
263 data.f1 = prev->pid;
264 data.f2 = next->pid;
265 data.f3 = prev->state;
266
267 marker = &GET_MARKER(kernel, sched_schedule);
268 ltt_specialized_trace(marker, marker->single.probe_private,
269 &data, serialize_sizeof(data), sizeof(int));
270 }
271
272 void probe_sched_migrate_task(void *_data, struct task_struct *p, int dest_cpu)
273 {
274 trace_mark_tp(kernel, sched_migrate_task, sched_migrate_task,
275 probe_sched_migrate_task, "pid %d state #2d%ld dest_cpu %d",
276 p->pid, p->state, dest_cpu);
277 }
278
279 void probe_sched_signal_send(void *_data, int sig, struct siginfo *info, struct task_struct *t)
280 {
281 trace_mark_tp(kernel, send_signal, signal_generate,
282 probe_sched_signal_send, "pid %d signal %d", t->pid, sig);
283 }
284
285 void probe_sched_process_free(void *_data, struct task_struct *p)
286 {
287 trace_mark_tp(kernel, process_free, sched_process_free,
288 probe_sched_process_free, "pid %d", p->pid);
289 }
290
291 void probe_sched_process_exit(void *_data, struct task_struct *p)
292 {
293 trace_mark_tp(kernel, process_exit, sched_process_exit,
294 probe_sched_process_exit, "pid %d", p->pid);
295 }
296
297 void probe_sched_process_wait(void *_data, struct pid *pid)
298 {
299 trace_mark_tp(kernel, process_wait, sched_process_wait,
300 probe_sched_process_wait, "pid %d", pid_nr(pid));
301 }
302
303 void probe_sched_process_fork(void *_data, struct task_struct *parent,
304 struct task_struct *child)
305 {
306 trace_mark_tp(kernel, process_fork, sched_process_fork,
307 probe_sched_process_fork,
308 "parent_pid %d child_pid %d child_tgid %d",
309 parent->pid, child->pid, child->tgid);
310 }
311
312 void probe_sched_kthread_create(void *_data, void *fn, int pid)
313 {
314 trace_mark_tp(kernel, kthread_create, sched_kthread_create,
315 probe_sched_kthread_create,
316 "fn %p pid %d", fn, pid);
317 }
318
319 void probe_timer_itimer_expired(void *_data, struct signal_struct *sig)
320 {
321 trace_mark_tp(kernel, timer_itimer_expired, timer_itimer_expired,
322 probe_timer_itimer_expired, "pid %d",
323 pid_nr(sig->leader_pid));
324 }
325
326 void probe_timer_itimer_set(void *_data, int which, struct itimerval *value)
327 {
328 trace_mark_tp(kernel, timer_itimer_set,
329 timer_itimer_set, probe_timer_itimer_set,
330 "which %d interval_sec %ld interval_usec %ld "
331 "value_sec %ld value_usec %ld",
332 which,
333 value->it_interval.tv_sec,
334 value->it_interval.tv_usec,
335 value->it_value.tv_sec,
336 value->it_value.tv_usec);
337 }
338
339 /* kernel_timer_set specialized tracepoint probe */
340
341 void probe_timer_set(void *_data, struct timer_list *timer);
342
343 DEFINE_MARKER_TP(kernel, timer_set, timer_set, probe_timer_set,
344 "expires %lu function %p data %lu");
345
346 notrace void probe_timer_set(void *_data, struct timer_list *timer)
347 {
348 struct marker *marker;
349 struct serialize_long_long_long data;
350
351 data.f1 = timer->expires;
352 data.f2 = (unsigned long)timer->function;
353 data.f3 = timer->data;
354
355 marker = &GET_MARKER(kernel, timer_set);
356 ltt_specialized_trace(marker, marker->single.probe_private,
357 &data, serialize_sizeof(data), sizeof(long));
358 }
359
360 void probe_timer_update_time(void *_data, struct timespec *_xtime,
361 struct timespec *_wall_to_monotonic)
362 {
363 trace_mark_tp(kernel, timer_update_time, timer_update_time,
364 probe_timer_update_time,
365 "jiffies #8u%llu xtime_sec %ld xtime_nsec %ld "
366 "walltomonotonic_sec %ld walltomonotonic_nsec %ld",
367 (unsigned long long)jiffies_64, _xtime->tv_sec, _xtime->tv_nsec,
368 _wall_to_monotonic->tv_sec, _wall_to_monotonic->tv_nsec);
369 }
370
371 void probe_timer_timeout(void *_data, struct task_struct *p)
372 {
373 trace_mark_tp(kernel, timer_timeout, timer_timeout,
374 probe_timer_timeout, "pid %d", p->pid);
375 }
376
377 void probe_kernel_printk(void *_data, unsigned long retaddr)
378 {
379 trace_mark_tp(kernel, printk, kernel_printk,
380 probe_kernel_printk, "ip 0x%lX", retaddr);
381 }
382
383 void probe_kernel_vprintk(void *_data, unsigned long retaddr, char *buf, int len)
384 {
385 if (len > 0) {
386 unsigned int loglevel;
387 int mark_len;
388 char *mark_buf;
389 char saved_char;
390
391 if (buf[0] == '<' && buf[1] >= '0' &&
392 buf[1] <= '7' && buf[2] == '>') {
393 loglevel = buf[1] - '0';
394 mark_buf = &buf[3];
395 mark_len = len - 3;
396 } else {
397 loglevel = default_message_loglevel;
398 mark_buf = buf;
399 mark_len = len;
400 }
401 if (mark_buf[mark_len - 1] == '\n')
402 mark_len--;
403 saved_char = mark_buf[mark_len];
404 mark_buf[mark_len] = '\0';
405 trace_mark_tp(kernel, vprintk, kernel_vprintk,
406 probe_kernel_vprintk,
407 "loglevel #1u%u string %s ip 0x%lX",
408 loglevel, mark_buf, retaddr);
409 mark_buf[mark_len] = saved_char;
410 }
411 }
412
413 #ifdef CONFIG_MODULES
414 void probe_kernel_module_free(void *_data, struct module *mod)
415 {
416 trace_mark_tp(kernel, module_free, kernel_module_free,
417 probe_kernel_module_free, "name %s", mod->name);
418 }
419
420 void probe_kernel_module_load(void *_data, struct module *mod)
421 {
422 trace_mark_tp(kernel, module_load, kernel_module_load,
423 probe_kernel_module_load, "name %s", mod->name);
424 }
425 #endif
426
427 void probe_kernel_panic(void *_data, const char *fmt, va_list args)
428 {
429 char info[64];
430 vsnprintf(info, sizeof(info), fmt, args);
431 trace_mark_tp(kernel, panic, kernel_panic, probe_kernel_panic,
432 "info %s", info);
433 }
434
435 void probe_kernel_kernel_kexec(void *_data, struct kimage *image)
436 {
437 trace_mark_tp(kernel, kernel_kexec, kernel_kernel_kexec,
438 probe_kernel_kernel_kexec, "image %p", image);
439 }
440
441 void probe_kernel_crash_kexec(void *_data, struct kimage *image, struct pt_regs *regs)
442 {
443 trace_mark_tp(kernel, crash_kexec, kernel_crash_kexec,
444 probe_kernel_crash_kexec, "image %p ip %p", image,
445 regs ? (void *)instruction_pointer(regs) : NULL);
446 }
447
448 /* kernel_page_fault_entry specialized tracepoint probe */
449
450 void probe_kernel_page_fault_entry(void *_data, struct pt_regs *regs, int trapnr,
451 struct mm_struct *mm, struct vm_area_struct *vma,
452 unsigned long address, int write_access);
453
454 DEFINE_MARKER_TP(kernel, page_fault_entry, page_fault_entry,
455 probe_kernel_page_fault_entry,
456 "ip #p%lu address #p%lu trap_id #2u%u write_access #1u%u");
457
458 notrace void probe_kernel_page_fault_entry(void *_data, struct pt_regs *regs, int trapnr,
459 struct mm_struct *mm, struct vm_area_struct *vma,
460 unsigned long address, int write_access)
461 {
462 struct marker *marker;
463 struct serialize_long_long_short_char data;
464
465 if (likely(regs))
466 data.f1 = instruction_pointer(regs);
467 else
468 data.f1 = 0UL;
469 data.f2 = address;
470 data.f3 = (unsigned short)trapnr;
471 data.f4 = (unsigned char)!!write_access;
472
473 marker = &GET_MARKER(kernel, page_fault_entry);
474 ltt_specialized_trace(marker, marker->single.probe_private,
475 &data, serialize_sizeof(data), sizeof(long));
476 }
477
478 /* kernel_page_fault_exit specialized tracepoint probe */
479
480 void probe_kernel_page_fault_exit(void *_data, int res);
481
482 DEFINE_MARKER_TP(kernel, page_fault_exit, page_fault_exit,
483 probe_kernel_page_fault_exit,
484 "res %d");
485
486 notrace void probe_kernel_page_fault_exit(void *_data, int res)
487 {
488 struct marker *marker;
489
490 marker = &GET_MARKER(kernel, page_fault_exit);
491 ltt_specialized_trace(marker, marker->single.probe_private,
492 &res, sizeof(res), sizeof(res));
493 }
494
495 /* kernel_page_fault_nosem_entry specialized tracepoint probe */
496
497 void probe_kernel_page_fault_nosem_entry(void *_data, struct pt_regs *regs,
498 int trapnr, unsigned long address);
499
500 DEFINE_MARKER_TP(kernel, page_fault_nosem_entry, page_fault_nosem_entry,
501 probe_kernel_page_fault_nosem_entry,
502 "ip #p%lu address #p%lu trap_id #2u%u");
503
504 notrace void probe_kernel_page_fault_nosem_entry(void *_data, struct pt_regs *regs,
505 int trapnr, unsigned long address)
506 {
507 struct marker *marker;
508 struct serialize_long_long_short data;
509
510 if (likely(regs))
511 data.f1 = instruction_pointer(regs);
512 else
513 data.f1 = 0UL;
514 data.f2 = address;
515 data.f3 = (unsigned short)trapnr;
516
517 marker = &GET_MARKER(kernel, page_fault_nosem_entry);
518 ltt_specialized_trace(marker, marker->single.probe_private,
519 &data, serialize_sizeof(data), sizeof(long));
520 }
521
522 /* kernel_page_fault_nosem_exit specialized tracepoint probe */
523
524 void probe_kernel_page_fault_nosem_exit(void *_data, int res);
525
526 DEFINE_MARKER_TP(kernel, page_fault_nosem_exit, page_fault_nosem_exit,
527 probe_kernel_page_fault_nosem_exit,
528 MARK_NOARGS);
529
530 notrace void probe_kernel_page_fault_nosem_exit(void *_data, int res)
531 {
532 struct marker *marker;
533
534 marker = &GET_MARKER(kernel, page_fault_nosem_exit);
535 ltt_specialized_trace(marker, marker->single.probe_private,
536 NULL, 0, 0);
537 }
538
539 /* kernel_page_fault_get_user_entry specialized tracepoint probe */
540
541 void probe_kernel_page_fault_get_user_entry(void *_data, struct mm_struct *mm,
542 struct vm_area_struct *vma, unsigned long address, int write_access);
543
544 DEFINE_MARKER_TP(kernel, page_fault_get_user_entry, page_fault_get_user_entry,
545 probe_kernel_page_fault_get_user_entry,
546 "address #p%lu write_access #1u%u");
547
548 notrace void probe_kernel_page_fault_get_user_entry(void *_data, struct mm_struct *mm,
549 struct vm_area_struct *vma, unsigned long address, int write_access)
550 {
551 struct marker *marker;
552 struct serialize_long_char data;
553
554 data.f1 = address;
555 data.f2 = (unsigned char)!!write_access;
556
557 marker = &GET_MARKER(kernel, page_fault_get_user_entry);
558 ltt_specialized_trace(marker, marker->single.probe_private,
559 &data, serialize_sizeof(data), sizeof(long));
560 }
561
562 /* kernel_page_fault_get_user_exit specialized tracepoint probe */
563
564 void probe_kernel_page_fault_get_user_exit(void *_data, int res);
565
566 DEFINE_MARKER_TP(kernel, page_fault_get_user_exit, page_fault_get_user_exit,
567 probe_kernel_page_fault_get_user_exit,
568 "res %d");
569
570 notrace void probe_kernel_page_fault_get_user_exit(void *_data, int res)
571 {
572 struct marker *marker;
573
574 marker = &GET_MARKER(kernel, page_fault_get_user_exit);
575 ltt_specialized_trace(marker, marker->single.probe_private,
576 &res, sizeof(res), sizeof(res));
577 }
578
579 MODULE_LICENSE("GPL and additional rights");
580 MODULE_AUTHOR("Mathieu Desnoyers");
581 MODULE_DESCRIPTION("kernel Tracepoint Probes");
This page took 0.044075 seconds and 4 git commands to generate.