2 #define TRACE_SYSTEM sched
4 #if !defined(LTTNG_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define LTTNG_TRACE_SCHED_H
7 #include <probes/lttng-tracepoint-event.h>
8 #include <linux/sched.h>
9 #include <linux/pid_namespace.h>
10 #include <linux/binfmts.h>
11 #include <linux/version.h>
12 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
13 #include <linux/sched/rt.h>
16 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0))
17 #define lttng_proc_inum ns.inum
19 #define lttng_proc_inum proc_inum
22 #define LTTNG_MAX_PID_NS_LEVEL 32
24 #ifndef _TRACE_SCHED_DEF_
25 #define _TRACE_SCHED_DEF_
27 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,15,0))
29 static inline long __trace_sched_switch_state(bool preempt
, struct task_struct
*p
)
33 #ifdef CONFIG_SCHED_DEBUG
35 #endif /* CONFIG_SCHED_DEBUG */
38 * Preemption ignores task state, therefore preempted tasks are always
39 * RUNNING (we will not have dequeued if state != RUNNING).
42 return TASK_REPORT_MAX
;
45 * task_state_index() uses fls() and returns a value from 0-8 range.
46 * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
47 * it for left shift operation to get the correct task->state
50 state
= task_state_index(p
);
52 return state
? (1 << (state
- 1)) : state
;
55 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0))
57 static inline long __trace_sched_switch_state(bool preempt
, struct task_struct
*p
)
61 #ifdef CONFIG_SCHED_DEBUG
63 #endif /* CONFIG_SCHED_DEBUG */
66 * Preemption ignores task state, therefore preempted tasks are always
67 * RUNNING (we will not have dequeued if state != RUNNING).
70 return TASK_REPORT_MAX
;
73 * __get_task_state() uses fls() and returns a value from 0-8 range.
74 * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
75 * it for left shift operation to get the correct task->state
78 state
= __get_task_state(p
);
80 return state
? (1 << (state
- 1)) : state
;
83 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
85 static inline long __trace_sched_switch_state(bool preempt
, struct task_struct
*p
)
87 #ifdef CONFIG_SCHED_DEBUG
89 #endif /* CONFIG_SCHED_DEBUG */
91 * Preemption ignores task state, therefore preempted tasks are always RUNNING
92 * (we will not have dequeued if state != RUNNING).
94 return preempt
? TASK_RUNNING
| TASK_STATE_MAX
: p
->state
;
97 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0))
99 static inline long __trace_sched_switch_state(struct task_struct
*p
)
101 long state
= p
->state
;
103 #ifdef CONFIG_PREEMPT
104 #ifdef CONFIG_SCHED_DEBUG
105 BUG_ON(p
!= current
);
106 #endif /* CONFIG_SCHED_DEBUG */
108 * For all intents and purposes a preempted task is a running task.
110 if (preempt_count() & PREEMPT_ACTIVE
)
111 state
= TASK_RUNNING
| TASK_STATE_MAX
;
112 #endif /* CONFIG_PREEMPT */
117 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0))
119 static inline long __trace_sched_switch_state(struct task_struct
*p
)
121 long state
= p
->state
;
123 #ifdef CONFIG_PREEMPT
125 * For all intents and purposes a preempted task is a running task.
127 if (task_preempt_count(p
) & PREEMPT_ACTIVE
)
128 state
= TASK_RUNNING
| TASK_STATE_MAX
;
134 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
136 static inline long __trace_sched_switch_state(struct task_struct
*p
)
138 long state
= p
->state
;
140 #ifdef CONFIG_PREEMPT
142 * For all intents and purposes a preempted task is a running task.
144 if (task_thread_info(p
)->preempt_count
& PREEMPT_ACTIVE
)
145 state
= TASK_RUNNING
| TASK_STATE_MAX
;
151 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
153 static inline long __trace_sched_switch_state(struct task_struct
*p
)
155 long state
= p
->state
;
157 #ifdef CONFIG_PREEMPT
159 * For all intents and purposes a preempted task is a running task.
161 if (task_thread_info(p
)->preempt_count
& PREEMPT_ACTIVE
)
162 state
= TASK_RUNNING
;
170 #endif /* _TRACE_SCHED_DEF_ */
173 * Tracepoint for calling kthread_stop, performed to end a kthread:
175 LTTNG_TRACEPOINT_EVENT(sched_kthread_stop
,
177 TP_PROTO(struct task_struct
*t
),
182 ctf_array_text(char, comm
, t
->comm
, TASK_COMM_LEN
)
183 ctf_integer(pid_t
, tid
, t
->pid
)
188 * Tracepoint for the return value of the kthread stopping:
190 LTTNG_TRACEPOINT_EVENT(sched_kthread_stop_ret
,
197 ctf_integer(int, ret
, ret
)
202 * Tracepoint for waking up a task:
204 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0) || \
205 LTTNG_RT_KERNEL_RANGE(4,1,10,11, 4,2,0,0) || \
206 LTTNG_RT_KERNEL_RANGE(3,18,27,26, 3,19,0,0) || \
207 LTTNG_RT_KERNEL_RANGE(3,14,61,63, 3,15,0,0) || \
208 LTTNG_RT_KERNEL_RANGE(3,12,54,73, 3,13,0,0) || \
209 LTTNG_RT_KERNEL_RANGE(3,10,97,106, 3,11,0,0) || \
210 LTTNG_RT_KERNEL_RANGE(3,4,110,139, 3,5,0,0) || \
211 LTTNG_RT_KERNEL_RANGE(3,2,77,111, 3,3,0,0))
212 LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template
,
214 TP_PROTO(struct task_struct
*p
),
219 ctf_array_text(char, comm
, p
->comm
, TASK_COMM_LEN
)
220 ctf_integer(pid_t
, tid
, p
->pid
)
221 ctf_integer(int, prio
, p
->prio
- MAX_RT_PRIO
)
222 ctf_integer(int, target_cpu
, task_cpu(p
))
225 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) */
226 LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template
,
228 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
229 TP_PROTO(struct task_struct
*p
, int success
),
233 TP_PROTO(struct rq
*rq
, struct task_struct
*p
, int success
),
235 TP_ARGS(rq
, p
, success
),
239 ctf_array_text(char, comm
, p
->comm
, TASK_COMM_LEN
)
240 ctf_integer(pid_t
, tid
, p
->pid
)
241 ctf_integer(int, prio
, p
->prio
- MAX_RT_PRIO
)
242 ctf_integer(int, success
, success
)
243 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
244 ctf_integer(int, target_cpu
, task_cpu(p
))
248 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) */
250 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0) || \
251 LTTNG_RT_KERNEL_RANGE(4,1,10,11, 4,2,0,0) || \
252 LTTNG_RT_KERNEL_RANGE(3,18,27,26, 3,19,0,0) || \
253 LTTNG_RT_KERNEL_RANGE(3,14,61,63, 3,15,0,0) || \
254 LTTNG_RT_KERNEL_RANGE(3,12,54,73, 3,13,0,0) || \
255 LTTNG_RT_KERNEL_RANGE(3,10,97,106, 3,11,0,0) || \
256 LTTNG_RT_KERNEL_RANGE(3,4,110,139, 3,5,0,0) || \
257 LTTNG_RT_KERNEL_RANGE(3,2,77,111, 3,3,0,0))
260 * Tracepoint called when waking a task; this tracepoint is guaranteed to be
261 * called from the waking context.
263 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_waking
,
264 TP_PROTO(struct task_struct
*p
),
268 * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
269 * It it not always called from the waking context.
271 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup
,
272 TP_PROTO(struct task_struct
*p
),
276 * Tracepoint for waking up a new task:
278 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup_new
,
279 TP_PROTO(struct task_struct
*p
),
282 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
284 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup
,
285 TP_PROTO(struct task_struct
*p
, int success
),
289 * Tracepoint for waking up a new task:
291 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup_new
,
292 TP_PROTO(struct task_struct
*p
, int success
),
295 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
297 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup
,
298 TP_PROTO(struct rq
*rq
, struct task_struct
*p
, int success
),
299 TP_ARGS(rq
, p
, success
))
302 * Tracepoint for waking up a new task:
304 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup_new
,
305 TP_PROTO(struct rq
*rq
, struct task_struct
*p
, int success
),
306 TP_ARGS(rq
, p
, success
))
308 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
311 * Tracepoint for task switches, performed by the scheduler:
313 LTTNG_TRACEPOINT_EVENT(sched_switch
,
315 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
316 TP_PROTO(bool preempt
,
317 struct task_struct
*prev
,
318 struct task_struct
*next
),
320 TP_ARGS(preempt
, prev
, next
),
321 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
322 TP_PROTO(struct task_struct
*prev
,
323 struct task_struct
*next
),
326 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
327 TP_PROTO(struct rq
*rq
, struct task_struct
*prev
,
328 struct task_struct
*next
),
330 TP_ARGS(rq
, prev
, next
),
331 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
334 ctf_array_text(char, prev_comm
, prev
->comm
, TASK_COMM_LEN
)
335 ctf_integer(pid_t
, prev_tid
, prev
->pid
)
336 ctf_integer(int, prev_prio
, prev
->prio
- MAX_RT_PRIO
)
337 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
338 ctf_integer(long, prev_state
, __trace_sched_switch_state(preempt
, prev
))
339 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
340 ctf_integer(long, prev_state
, __trace_sched_switch_state(prev
))
342 ctf_integer(long, prev_state
, prev
->state
)
344 ctf_array_text(char, next_comm
, next
->comm
, TASK_COMM_LEN
)
345 ctf_integer(pid_t
, next_tid
, next
->pid
)
346 ctf_integer(int, next_prio
, next
->prio
- MAX_RT_PRIO
)
351 * Tracepoint for a task being migrated:
353 LTTNG_TRACEPOINT_EVENT(sched_migrate_task
,
355 TP_PROTO(struct task_struct
*p
, int dest_cpu
),
357 TP_ARGS(p
, dest_cpu
),
360 ctf_array_text(char, comm
, p
->comm
, TASK_COMM_LEN
)
361 ctf_integer(pid_t
, tid
, p
->pid
)
362 ctf_integer(int, prio
, p
->prio
- MAX_RT_PRIO
)
363 ctf_integer(int, orig_cpu
, task_cpu(p
))
364 ctf_integer(int, dest_cpu
, dest_cpu
)
368 LTTNG_TRACEPOINT_EVENT_CLASS(sched_process_template
,
370 TP_PROTO(struct task_struct
*p
),
375 ctf_array_text(char, comm
, p
->comm
, TASK_COMM_LEN
)
376 ctf_integer(pid_t
, tid
, p
->pid
)
377 ctf_integer(int, prio
, p
->prio
- MAX_RT_PRIO
)
382 * Tracepoint for freeing a task:
384 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template
, sched_process_free
,
385 TP_PROTO(struct task_struct
*p
),
390 * Tracepoint for a task exiting:
392 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template
, sched_process_exit
,
393 TP_PROTO(struct task_struct
*p
),
397 * Tracepoint for waiting on task to unschedule:
399 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
400 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template
, sched_wait_task
,
401 TP_PROTO(struct task_struct
*p
),
403 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
404 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template
, sched_wait_task
,
405 TP_PROTO(struct rq
*rq
, struct task_struct
*p
),
407 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
410 * Tracepoint for a waiting task:
412 LTTNG_TRACEPOINT_EVENT(sched_process_wait
,
414 TP_PROTO(struct pid
*pid
),
419 ctf_array_text(char, comm
, current
->comm
, TASK_COMM_LEN
)
420 ctf_integer(pid_t
, tid
, pid_nr(pid
))
421 ctf_integer(int, prio
, current
->prio
- MAX_RT_PRIO
)
426 * Tracepoint for do_fork.
427 * Saving both TID and PID information, especially for the child, allows
428 * trace analyzers to distinguish between creation of a new process and
429 * creation of a new thread. Newly created processes will have child_tid
430 * == child_pid, while creation of a thread yields to child_tid !=
433 LTTNG_TRACEPOINT_EVENT_CODE(sched_process_fork
,
435 TP_PROTO(struct task_struct
*parent
, struct task_struct
*child
),
437 TP_ARGS(parent
, child
),
440 pid_t vtids
[LTTNG_MAX_PID_NS_LEVEL
];
441 unsigned int ns_level
;
446 struct pid
*child_pid
;
449 child_pid
= task_pid(child
);
450 tp_locvar
->ns_level
=
451 min_t(unsigned int, child_pid
->level
+ 1,
452 LTTNG_MAX_PID_NS_LEVEL
);
453 for (i
= 0; i
< tp_locvar
->ns_level
; i
++)
454 tp_locvar
->vtids
[i
] = child_pid
->numbers
[i
].nr
;
459 ctf_array_text(char, parent_comm
, parent
->comm
, TASK_COMM_LEN
)
460 ctf_integer(pid_t
, parent_tid
, parent
->pid
)
461 ctf_integer(pid_t
, parent_pid
, parent
->tgid
)
462 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
463 ctf_integer(unsigned int, parent_ns_inum
,
465 unsigned int parent_ns_inum
= 0;
468 struct pid_namespace
*pid_ns
;
470 pid_ns
= task_active_pid_ns(parent
);
473 pid_ns
->lttng_proc_inum
;
478 ctf_array_text(char, child_comm
, child
->comm
, TASK_COMM_LEN
)
479 ctf_integer(pid_t
, child_tid
, child
->pid
)
480 ctf_sequence(pid_t
, vtids
, tp_locvar
->vtids
, u8
, tp_locvar
->ns_level
)
481 ctf_integer(pid_t
, child_pid
, child
->tgid
)
482 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
483 ctf_integer(unsigned int, child_ns_inum
,
485 unsigned int child_ns_inum
= 0;
488 struct pid_namespace
*pid_ns
;
490 pid_ns
= task_active_pid_ns(child
);
493 pid_ns
->lttng_proc_inum
;
503 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33))
505 * Tracepoint for sending a signal:
507 LTTNG_TRACEPOINT_EVENT(sched_signal_send
,
509 TP_PROTO(int sig
, struct task_struct
*p
),
514 ctf_integer(int, sig
, sig
)
515 ctf_array_text(char, comm
, p
->comm
, TASK_COMM_LEN
)
516 ctf_integer(pid_t
, tid
, p
->pid
)
521 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
523 * Tracepoint for exec:
525 LTTNG_TRACEPOINT_EVENT(sched_process_exec
,
527 TP_PROTO(struct task_struct
*p
, pid_t old_pid
,
528 struct linux_binprm
*bprm
),
530 TP_ARGS(p
, old_pid
, bprm
),
533 ctf_string(filename
, bprm
->filename
)
534 ctf_integer(pid_t
, tid
, p
->pid
)
535 ctf_integer(pid_t
, old_tid
, old_pid
)
540 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
542 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
543 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
545 LTTNG_TRACEPOINT_EVENT_CLASS(sched_stat_template
,
547 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
552 ctf_array_text(char, comm
, tsk
->comm
, TASK_COMM_LEN
)
553 ctf_integer(pid_t
, tid
, tsk
->pid
)
554 ctf_integer(u64
, delay
, delay
)
560 * Tracepoint for accounting wait time (time the task is runnable
561 * but not actually running due to scheduler contention).
563 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template
, sched_stat_wait
,
564 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
568 * Tracepoint for accounting sleep time (time the task is not runnable,
569 * including iowait, see below).
571 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template
, sched_stat_sleep
,
572 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
576 * Tracepoint for accounting iowait time (time the task is not runnable
577 * due to waiting on IO to complete).
579 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template
, sched_stat_iowait
,
580 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
583 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
585 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
587 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template
, sched_stat_blocked
,
588 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
593 * Tracepoint for accounting runtime (time the task is executing
596 LTTNG_TRACEPOINT_EVENT(sched_stat_runtime
,
598 TP_PROTO(struct task_struct
*tsk
, u64 runtime
, u64 vruntime
),
600 TP_ARGS(tsk
, runtime
, vruntime
),
603 ctf_array_text(char, comm
, tsk
->comm
, TASK_COMM_LEN
)
604 ctf_integer(pid_t
, tid
, tsk
->pid
)
605 ctf_integer(u64
, runtime
, runtime
)
606 ctf_integer(u64
, vruntime
, vruntime
)
611 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0) || \
612 LTTNG_RT_KERNEL_RANGE(4,9,27,18, 4,10,0,0) || \
613 LTTNG_RT_KERNEL_RANGE(4,11,5,1, 4,12,0,0))
615 * Tracepoint for showing priority inheritance modifying a tasks
618 LTTNG_TRACEPOINT_EVENT(sched_pi_setprio
,
620 TP_PROTO(struct task_struct
*tsk
, struct task_struct
*pi_task
),
622 TP_ARGS(tsk
, pi_task
),
625 ctf_array_text(char, comm
, tsk
->comm
, TASK_COMM_LEN
)
626 ctf_integer(pid_t
, tid
, tsk
->pid
)
627 ctf_integer(int, oldprio
, tsk
->prio
- MAX_RT_PRIO
)
628 ctf_integer(int, newprio
, pi_task
? pi_task
->prio
- MAX_RT_PRIO
: tsk
->prio
- MAX_RT_PRIO
)
631 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
633 * Tracepoint for showing priority inheritance modifying a tasks
636 LTTNG_TRACEPOINT_EVENT(sched_pi_setprio
,
638 TP_PROTO(struct task_struct
*tsk
, int newprio
),
640 TP_ARGS(tsk
, newprio
),
643 ctf_array_text(char, comm
, tsk
->comm
, TASK_COMM_LEN
)
644 ctf_integer(pid_t
, tid
, tsk
->pid
)
645 ctf_integer(int, oldprio
, tsk
->prio
- MAX_RT_PRIO
)
646 ctf_integer(int, newprio
, newprio
- MAX_RT_PRIO
)
651 #endif /* LTTNG_TRACE_SCHED_H */
653 /* This part must be outside protection */
654 #include <probes/define_trace.h>
This page took 0.046254 seconds and 4 git commands to generate.