2 #define TRACE_SYSTEM sched
4 #if !defined(LTTNG_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define LTTNG_TRACE_SCHED_H
7 #include "../../../probes/lttng-tracepoint-event.h"
8 #include <linux/sched.h>
9 #include <linux/binfmts.h>
10 #include <linux/version.h>
11 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
12 #include <linux/sched/rt.h>
15 #ifndef _TRACE_SCHED_DEF_
16 #define _TRACE_SCHED_DEF_
18 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0))
20 static inline long __trace_sched_switch_state(struct task_struct
*p
)
22 long state
= p
->state
;
26 * For all intents and purposes a preempted task is a running task.
28 if (task_preempt_count(p
) & PREEMPT_ACTIVE
)
29 state
= TASK_RUNNING
| TASK_STATE_MAX
;
35 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
37 static inline long __trace_sched_switch_state(struct task_struct
*p
)
39 long state
= p
->state
;
43 * For all intents and purposes a preempted task is a running task.
45 if (task_thread_info(p
)->preempt_count
& PREEMPT_ACTIVE
)
46 state
= TASK_RUNNING
| TASK_STATE_MAX
;
52 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
54 static inline long __trace_sched_switch_state(struct task_struct
*p
)
56 long state
= p
->state
;
60 * For all intents and purposes a preempted task is a running task.
62 if (task_thread_info(p
)->preempt_count
& PREEMPT_ACTIVE
)
71 #endif /* _TRACE_SCHED_DEF_ */
74 * Tracepoint for calling kthread_stop, performed to end a kthread:
76 LTTNG_TRACEPOINT_EVENT(sched_kthread_stop
,
78 TP_PROTO(struct task_struct
*t
),
83 __array_text( char, comm
, TASK_COMM_LEN
)
88 tp_memcpy(comm
, t
->comm
, TASK_COMM_LEN
)
89 tp_assign(tid
, t
->pid
)
92 TP_printk("comm=%s tid=%d", __entry
->comm
, __entry
->tid
)
96 * Tracepoint for the return value of the kthread stopping:
98 LTTNG_TRACEPOINT_EVENT(sched_kthread_stop_ret
,
112 TP_printk("ret=%d", __entry
->ret
)
116 * Tracepoint for waking up a task:
118 LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template
,
120 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
121 TP_PROTO(struct task_struct
*p
, int success
),
125 TP_PROTO(struct rq
*rq
, struct task_struct
*p
, int success
),
127 TP_ARGS(rq
, p
, success
),
131 __array_text( char, comm
, TASK_COMM_LEN
)
132 __field( pid_t
, tid
)
134 __field( int, success
)
135 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
136 __field( int, target_cpu
)
141 tp_memcpy(comm
, p
->comm
, TASK_COMM_LEN
)
142 tp_assign(tid
, p
->pid
)
143 tp_assign(prio
, p
->prio
)
144 tp_assign(success
, success
)
145 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
146 tp_assign(target_cpu
, task_cpu(p
))
148 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
155 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
156 TP_printk("comm=%s tid=%d prio=%d success=%d target_cpu=%03d",
157 __entry
->comm
, __entry
->tid
, __entry
->prio
,
158 __entry
->success
, __entry
->target_cpu
)
160 TP_printk("comm=%s tid=%d prio=%d success=%d",
161 __entry
->comm
, __entry
->tid
, __entry
->prio
,
166 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
168 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup
,
169 TP_PROTO(struct task_struct
*p
, int success
),
173 * Tracepoint for waking up a new task:
175 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup_new
,
176 TP_PROTO(struct task_struct
*p
, int success
),
179 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
181 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup
,
182 TP_PROTO(struct rq
*rq
, struct task_struct
*p
, int success
),
183 TP_ARGS(rq
, p
, success
))
186 * Tracepoint for waking up a new task:
188 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup_new
,
189 TP_PROTO(struct rq
*rq
, struct task_struct
*p
, int success
),
190 TP_ARGS(rq
, p
, success
))
192 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
195 * Tracepoint for task switches, performed by the scheduler:
197 LTTNG_TRACEPOINT_EVENT(sched_switch
,
199 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
200 TP_PROTO(struct task_struct
*prev
,
201 struct task_struct
*next
),
204 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
205 TP_PROTO(struct rq
*rq
, struct task_struct
*prev
,
206 struct task_struct
*next
),
208 TP_ARGS(rq
, prev
, next
),
209 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
212 __array_text( char, prev_comm
, TASK_COMM_LEN
)
213 __field( pid_t
, prev_tid
)
214 __field( int, prev_prio
)
215 __field( long, prev_state
)
216 __array_text( char, next_comm
, TASK_COMM_LEN
)
217 __field( pid_t
, next_tid
)
218 __field( int, next_prio
)
222 tp_memcpy(next_comm
, next
->comm
, TASK_COMM_LEN
)
223 tp_assign(prev_tid
, prev
->pid
)
224 tp_assign(prev_prio
, prev
->prio
- MAX_RT_PRIO
)
225 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
226 tp_assign(prev_state
, __trace_sched_switch_state(prev
))
228 tp_assign(prev_state
, prev
->state
)
230 tp_memcpy(prev_comm
, prev
->comm
, TASK_COMM_LEN
)
231 tp_assign(next_tid
, next
->pid
)
232 tp_assign(next_prio
, next
->prio
- MAX_RT_PRIO
)
235 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
236 TP_printk("prev_comm=%s prev_tid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_tid=%d next_prio=%d",
237 __entry
->prev_comm
, __entry
->prev_tid
, __entry
->prev_prio
,
238 __entry
->prev_state
& (TASK_STATE_MAX
-1) ?
239 __print_flags(__entry
->prev_state
& (TASK_STATE_MAX
-1), "|",
240 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
241 { 16, "Z" }, { 32, "X" }, { 64, "x" },
243 __entry
->prev_state
& TASK_STATE_MAX
? "+" : "",
244 __entry
->next_comm
, __entry
->next_tid
, __entry
->next_prio
)
246 TP_printk("prev_comm=%s prev_tid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_tid=%d next_prio=%d",
247 __entry
->prev_comm
, __entry
->prev_tid
, __entry
->prev_prio
,
248 __entry
->prev_state
?
249 __print_flags(__entry
->prev_state
, "|",
250 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
251 { 16, "Z" }, { 32, "X" }, { 64, "x" },
253 __entry
->next_comm
, __entry
->next_tid
, __entry
->next_prio
)
258 * Tracepoint for a task being migrated:
260 LTTNG_TRACEPOINT_EVENT(sched_migrate_task
,
262 TP_PROTO(struct task_struct
*p
, int dest_cpu
),
264 TP_ARGS(p
, dest_cpu
),
267 __array_text( char, comm
, TASK_COMM_LEN
)
268 __field( pid_t
, tid
)
270 __field( int, orig_cpu
)
271 __field( int, dest_cpu
)
275 tp_memcpy(comm
, p
->comm
, TASK_COMM_LEN
)
276 tp_assign(tid
, p
->pid
)
277 tp_assign(prio
, p
->prio
- MAX_RT_PRIO
)
278 tp_assign(orig_cpu
, task_cpu(p
))
279 tp_assign(dest_cpu
, dest_cpu
)
282 TP_printk("comm=%s tid=%d prio=%d orig_cpu=%d dest_cpu=%d",
283 __entry
->comm
, __entry
->tid
, __entry
->prio
,
284 __entry
->orig_cpu
, __entry
->dest_cpu
)
287 LTTNG_TRACEPOINT_EVENT_CLASS(sched_process_template
,
289 TP_PROTO(struct task_struct
*p
),
294 __array_text( char, comm
, TASK_COMM_LEN
)
295 __field( pid_t
, tid
)
300 tp_memcpy(comm
, p
->comm
, TASK_COMM_LEN
)
301 tp_assign(tid
, p
->pid
)
302 tp_assign(prio
, p
->prio
- MAX_RT_PRIO
)
305 TP_printk("comm=%s tid=%d prio=%d",
306 __entry
->comm
, __entry
->tid
, __entry
->prio
)
310 * Tracepoint for freeing a task:
312 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template
, sched_process_free
,
313 TP_PROTO(struct task_struct
*p
),
318 * Tracepoint for a task exiting:
320 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template
, sched_process_exit
,
321 TP_PROTO(struct task_struct
*p
),
325 * Tracepoint for waiting on task to unschedule:
327 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
328 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template
, sched_wait_task
,
329 TP_PROTO(struct task_struct
*p
),
331 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
332 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template
, sched_wait_task
,
333 TP_PROTO(struct rq
*rq
, struct task_struct
*p
),
335 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
338 * Tracepoint for a waiting task:
340 LTTNG_TRACEPOINT_EVENT(sched_process_wait
,
342 TP_PROTO(struct pid
*pid
),
347 __array_text( char, comm
, TASK_COMM_LEN
)
348 __field( pid_t
, tid
)
353 tp_memcpy(comm
, current
->comm
, TASK_COMM_LEN
)
354 tp_assign(tid
, pid_nr(pid
))
355 tp_assign(prio
, current
->prio
- MAX_RT_PRIO
)
358 TP_printk("comm=%s tid=%d prio=%d",
359 __entry
->comm
, __entry
->tid
, __entry
->prio
)
363 * Tracepoint for do_fork.
364 * Saving both TID and PID information, especially for the child, allows
365 * trace analyzers to distinguish between creation of a new process and
366 * creation of a new thread. Newly created processes will have child_tid
367 * == child_pid, while creation of a thread yields to child_tid !=
370 LTTNG_TRACEPOINT_EVENT(sched_process_fork
,
372 TP_PROTO(struct task_struct
*parent
, struct task_struct
*child
),
374 TP_ARGS(parent
, child
),
377 __array_text( char, parent_comm
, TASK_COMM_LEN
)
378 __field( pid_t
, parent_tid
)
379 __field( pid_t
, parent_pid
)
380 __array_text( char, child_comm
, TASK_COMM_LEN
)
381 __field( pid_t
, child_tid
)
382 __field( pid_t
, child_pid
)
386 tp_memcpy(parent_comm
, parent
->comm
, TASK_COMM_LEN
)
387 tp_assign(parent_tid
, parent
->pid
)
388 tp_assign(parent_pid
, parent
->tgid
)
389 tp_memcpy(child_comm
, child
->comm
, TASK_COMM_LEN
)
390 tp_assign(child_tid
, child
->pid
)
391 tp_assign(child_pid
, child
->tgid
)
394 TP_printk("comm=%s tid=%d child_comm=%s child_tid=%d",
395 __entry
->parent_comm
, __entry
->parent_tid
,
396 __entry
->child_comm
, __entry
->child_tid
)
399 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33))
401 * Tracepoint for sending a signal:
403 LTTNG_TRACEPOINT_EVENT(sched_signal_send
,
405 TP_PROTO(int sig
, struct task_struct
*p
),
411 __array( char, comm
, TASK_COMM_LEN
)
412 __field( pid_t
, pid
)
416 tp_memcpy(comm
, p
->comm
, TASK_COMM_LEN
)
417 tp_assign(pid
, p
->pid
)
421 TP_printk("sig=%d comm=%s pid=%d",
422 __entry
->sig
, __entry
->comm
, __entry
->pid
)
426 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
428 * Tracepoint for exec:
430 LTTNG_TRACEPOINT_EVENT(sched_process_exec
,
432 TP_PROTO(struct task_struct
*p
, pid_t old_pid
,
433 struct linux_binprm
*bprm
),
435 TP_ARGS(p
, old_pid
, bprm
),
438 __string( filename
, bprm
->filename
)
439 __field( pid_t
, tid
)
440 __field( pid_t
, old_tid
)
444 tp_strcpy(filename
, bprm
->filename
)
445 tp_assign(tid
, p
->pid
)
446 tp_assign(old_tid
, old_pid
)
449 TP_printk("filename=%s tid=%d old_tid=%d", __get_str(filename
),
450 __entry
->tid
, __entry
->old_tid
)
454 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
456 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
457 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
459 LTTNG_TRACEPOINT_EVENT_CLASS(sched_stat_template
,
461 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
466 __array_text( char, comm
, TASK_COMM_LEN
)
467 __field( pid_t
, tid
)
468 __field( u64
, delay
)
472 tp_memcpy(comm
, tsk
->comm
, TASK_COMM_LEN
)
473 tp_assign(tid
, tsk
->pid
)
474 tp_assign(delay
, delay
)
480 TP_printk("comm=%s tid=%d delay=%Lu [ns]",
481 __entry
->comm
, __entry
->tid
,
482 (unsigned long long)__entry
->delay
)
487 * Tracepoint for accounting wait time (time the task is runnable
488 * but not actually running due to scheduler contention).
490 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template
, sched_stat_wait
,
491 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
495 * Tracepoint for accounting sleep time (time the task is not runnable,
496 * including iowait, see below).
498 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template
, sched_stat_sleep
,
499 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
503 * Tracepoint for accounting iowait time (time the task is not runnable
504 * due to waiting on IO to complete).
506 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template
, sched_stat_iowait
,
507 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
510 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
512 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
514 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template
, sched_stat_blocked
,
515 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
520 * Tracepoint for accounting runtime (time the task is executing
523 LTTNG_TRACEPOINT_EVENT(sched_stat_runtime
,
525 TP_PROTO(struct task_struct
*tsk
, u64 runtime
, u64 vruntime
),
527 TP_ARGS(tsk
, runtime
, vruntime
),
530 __array_text( char, comm
, TASK_COMM_LEN
)
531 __field( pid_t
, tid
)
532 __field( u64
, runtime
)
533 __field( u64
, vruntime
)
537 tp_memcpy(comm
, tsk
->comm
, TASK_COMM_LEN
)
538 tp_assign(tid
, tsk
->pid
)
539 tp_assign(runtime
, runtime
)
540 tp_assign(vruntime
, vruntime
)
543 __perf_count(runtime
)
544 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
549 TP_printk("comm=%s tid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
550 __entry
->comm
, __entry
->tid
,
551 (unsigned long long)__entry
->runtime
,
552 (unsigned long long)__entry
->vruntime
)
556 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
558 * Tracepoint for showing priority inheritance modifying a tasks
561 LTTNG_TRACEPOINT_EVENT(sched_pi_setprio
,
563 TP_PROTO(struct task_struct
*tsk
, int newprio
),
565 TP_ARGS(tsk
, newprio
),
568 __array_text( char, comm
, TASK_COMM_LEN
)
569 __field( pid_t
, tid
)
570 __field( int, oldprio
)
571 __field( int, newprio
)
575 tp_memcpy(comm
, tsk
->comm
, TASK_COMM_LEN
)
576 tp_assign(tid
, tsk
->pid
)
577 tp_assign(oldprio
, tsk
->prio
- MAX_RT_PRIO
)
578 tp_assign(newprio
, newprio
- MAX_RT_PRIO
)
581 TP_printk("comm=%s tid=%d oldprio=%d newprio=%d",
582 __entry
->comm
, __entry
->tid
,
583 __entry
->oldprio
, __entry
->newprio
)
587 #endif /* LTTNG_TRACE_SCHED_H */
589 /* This part must be outside protection */
590 #include "../../../probes/define_trace.h"
This page took 0.041452 seconds and 4 git commands to generate.