1 /* SPDX-License-Identifier: GPL-2.0 */
3 #define TRACE_SYSTEM sched
5 #if !defined(LTTNG_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define LTTNG_TRACE_SCHED_H
8 #include <probes/lttng-tracepoint-event.h>
9 #include <linux/sched.h>
10 #include <linux/pid_namespace.h>
11 #include <linux/binfmts.h>
12 #include <linux/version.h>
13 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
14 #include <linux/sched/rt.h>
16 #include <wrapper/namespace.h>
18 #define LTTNG_MAX_PID_NS_LEVEL 32
20 #ifndef _TRACE_SCHED_DEF_
21 #define _TRACE_SCHED_DEF_
23 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,15,0))
25 static inline long __trace_sched_switch_state(bool preempt
, struct task_struct
*p
)
29 #ifdef CONFIG_SCHED_DEBUG
31 #endif /* CONFIG_SCHED_DEBUG */
34 * Preemption ignores task state, therefore preempted tasks are always
35 * RUNNING (we will not have dequeued if state != RUNNING).
38 return TASK_REPORT_MAX
;
41 * task_state_index() uses fls() and returns a value from 0-8 range.
42 * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
43 * it for left shift operation to get the correct task->state
46 state
= task_state_index(p
);
48 return state
? (1 << (state
- 1)) : state
;
51 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0))
53 static inline long __trace_sched_switch_state(bool preempt
, struct task_struct
*p
)
57 #ifdef CONFIG_SCHED_DEBUG
59 #endif /* CONFIG_SCHED_DEBUG */
62 * Preemption ignores task state, therefore preempted tasks are always
63 * RUNNING (we will not have dequeued if state != RUNNING).
66 return TASK_REPORT_MAX
;
69 * __get_task_state() uses fls() and returns a value from 0-8 range.
70 * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
71 * it for left shift operation to get the correct task->state
74 state
= __get_task_state(p
);
76 return state
? (1 << (state
- 1)) : state
;
79 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
81 static inline long __trace_sched_switch_state(bool preempt
, struct task_struct
*p
)
83 #ifdef CONFIG_SCHED_DEBUG
85 #endif /* CONFIG_SCHED_DEBUG */
87 * Preemption ignores task state, therefore preempted tasks are always RUNNING
88 * (we will not have dequeued if state != RUNNING).
90 return preempt
? TASK_RUNNING
| TASK_STATE_MAX
: p
->state
;
93 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0))
95 static inline long __trace_sched_switch_state(struct task_struct
*p
)
97 long state
= p
->state
;
100 #ifdef CONFIG_SCHED_DEBUG
101 BUG_ON(p
!= current
);
102 #endif /* CONFIG_SCHED_DEBUG */
104 * For all intents and purposes a preempted task is a running task.
106 if (preempt_count() & PREEMPT_ACTIVE
)
107 state
= TASK_RUNNING
| TASK_STATE_MAX
;
108 #endif /* CONFIG_PREEMPT */
113 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0))
115 static inline long __trace_sched_switch_state(struct task_struct
*p
)
117 long state
= p
->state
;
119 #ifdef CONFIG_PREEMPT
121 * For all intents and purposes a preempted task is a running task.
123 if (task_preempt_count(p
) & PREEMPT_ACTIVE
)
124 state
= TASK_RUNNING
| TASK_STATE_MAX
;
130 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
132 static inline long __trace_sched_switch_state(struct task_struct
*p
)
134 long state
= p
->state
;
136 #ifdef CONFIG_PREEMPT
138 * For all intents and purposes a preempted task is a running task.
140 if (task_thread_info(p
)->preempt_count
& PREEMPT_ACTIVE
)
141 state
= TASK_RUNNING
| TASK_STATE_MAX
;
149 static inline long __trace_sched_switch_state(struct task_struct
*p
)
151 long state
= p
->state
;
153 #ifdef CONFIG_PREEMPT
155 * For all intents and purposes a preempted task is a running task.
157 if (task_thread_info(p
)->preempt_count
& PREEMPT_ACTIVE
)
158 state
= TASK_RUNNING
;
166 #endif /* _TRACE_SCHED_DEF_ */
169 * Enumeration of the task state bitmask.
170 * Only bit flags are enumerated here, not composition of states.
172 LTTNG_TRACEPOINT_ENUM(task_state
,
174 ctf_enum_value("TASK_RUNNING", TASK_RUNNING
)
175 ctf_enum_value("TASK_INTERRUPTIBLE", TASK_INTERRUPTIBLE
)
176 ctf_enum_value("TASK_UNINTERRUPTIBLE", TASK_UNINTERRUPTIBLE
)
177 ctf_enum_value("TASK_STOPPED", __TASK_STOPPED
)
178 ctf_enum_value("TASK_TRACED", __TASK_TRACED
)
179 ctf_enum_value("EXIT_DEAD", EXIT_DEAD
)
180 ctf_enum_value("EXIT_ZOMBIE", EXIT_ZOMBIE
)
182 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
183 ctf_enum_value("TASK_PARKED", TASK_PARKED
)
184 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)) */
186 ctf_enum_value("TASK_DEAD", TASK_DEAD
)
187 ctf_enum_value("TASK_WAKEKILL", TASK_WAKEKILL
)
188 ctf_enum_value("TASK_WAKING", TASK_WAKING
)
190 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0))
191 ctf_enum_value("TASK_NOLOAD", TASK_NOLOAD
)
192 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0)) */
194 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0))
195 ctf_enum_value("TASK_NEW", TASK_NEW
)
196 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0)) */
198 ctf_enum_value("TASK_STATE_MAX", TASK_STATE_MAX
)
203 * Tracepoint for calling kthread_stop, performed to end a kthread:
205 LTTNG_TRACEPOINT_EVENT(sched_kthread_stop
,
207 TP_PROTO(struct task_struct
*t
),
212 ctf_array_text(char, comm
, t
->comm
, TASK_COMM_LEN
)
213 ctf_integer(pid_t
, tid
, t
->pid
)
218 * Tracepoint for the return value of the kthread stopping:
220 LTTNG_TRACEPOINT_EVENT(sched_kthread_stop_ret
,
227 ctf_integer(int, ret
, ret
)
232 * Tracepoint for waking up a task:
234 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0) || \
235 LTTNG_RT_KERNEL_RANGE(4,1,10,11, 4,2,0,0) || \
236 LTTNG_RT_KERNEL_RANGE(3,18,27,26, 3,19,0,0) || \
237 LTTNG_RT_KERNEL_RANGE(3,14,61,63, 3,15,0,0) || \
238 LTTNG_RT_KERNEL_RANGE(3,12,54,73, 3,13,0,0) || \
239 LTTNG_RT_KERNEL_RANGE(3,10,97,106, 3,11,0,0) || \
240 LTTNG_RT_KERNEL_RANGE(3,4,110,139, 3,5,0,0) || \
241 LTTNG_RT_KERNEL_RANGE(3,2,77,111, 3,3,0,0))
242 LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template
,
244 TP_PROTO(struct task_struct
*p
),
249 ctf_array_text(char, comm
, p
->comm
, TASK_COMM_LEN
)
250 ctf_integer(pid_t
, tid
, p
->pid
)
251 ctf_integer(int, prio
, p
->prio
- MAX_RT_PRIO
)
252 ctf_integer(int, target_cpu
, task_cpu(p
))
255 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) */
256 LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template
,
258 TP_PROTO(struct task_struct
*p
, int success
),
263 ctf_array_text(char, comm
, p
->comm
, TASK_COMM_LEN
)
264 ctf_integer(pid_t
, tid
, p
->pid
)
265 ctf_integer(int, prio
, p
->prio
- MAX_RT_PRIO
)
266 ctf_integer(int, success
, success
)
267 ctf_integer(int, target_cpu
, task_cpu(p
))
270 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) */
272 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0) || \
273 LTTNG_RT_KERNEL_RANGE(4,1,10,11, 4,2,0,0) || \
274 LTTNG_RT_KERNEL_RANGE(3,18,27,26, 3,19,0,0) || \
275 LTTNG_RT_KERNEL_RANGE(3,14,61,63, 3,15,0,0) || \
276 LTTNG_RT_KERNEL_RANGE(3,12,54,73, 3,13,0,0) || \
277 LTTNG_RT_KERNEL_RANGE(3,10,97,106, 3,11,0,0) || \
278 LTTNG_RT_KERNEL_RANGE(3,4,110,139, 3,5,0,0) || \
279 LTTNG_RT_KERNEL_RANGE(3,2,77,111, 3,3,0,0))
282 * Tracepoint called when waking a task; this tracepoint is guaranteed to be
283 * called from the waking context.
285 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_waking
,
286 TP_PROTO(struct task_struct
*p
),
290 * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
291 * It it not always called from the waking context.
293 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup
,
294 TP_PROTO(struct task_struct
*p
),
298 * Tracepoint for waking up a new task:
300 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup_new
,
301 TP_PROTO(struct task_struct
*p
),
306 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup
,
307 TP_PROTO(struct task_struct
*p
, int success
),
311 * Tracepoint for waking up a new task:
313 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup_new
,
314 TP_PROTO(struct task_struct
*p
, int success
),
317 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) */
320 * Tracepoint for task switches, performed by the scheduler:
322 LTTNG_TRACEPOINT_EVENT(sched_switch
,
324 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
325 TP_PROTO(bool preempt
,
326 struct task_struct
*prev
,
327 struct task_struct
*next
),
329 TP_ARGS(preempt
, prev
, next
),
331 TP_PROTO(struct task_struct
*prev
,
332 struct task_struct
*next
),
335 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0)) */
338 ctf_array_text(char, prev_comm
, prev
->comm
, TASK_COMM_LEN
)
339 ctf_integer(pid_t
, prev_tid
, prev
->pid
)
340 ctf_integer(int, prev_prio
, prev
->prio
- MAX_RT_PRIO
)
341 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
342 ctf_enum(task_state
, long, prev_state
, __trace_sched_switch_state(preempt
, prev
))
344 ctf_enum(task_state
, long, prev_state
, __trace_sched_switch_state(prev
))
346 ctf_array_text(char, next_comm
, next
->comm
, TASK_COMM_LEN
)
347 ctf_integer(pid_t
, next_tid
, next
->pid
)
348 ctf_integer(int, next_prio
, next
->prio
- MAX_RT_PRIO
)
353 * Tracepoint for a task being migrated:
355 LTTNG_TRACEPOINT_EVENT(sched_migrate_task
,
357 TP_PROTO(struct task_struct
*p
, int dest_cpu
),
359 TP_ARGS(p
, dest_cpu
),
362 ctf_array_text(char, comm
, p
->comm
, TASK_COMM_LEN
)
363 ctf_integer(pid_t
, tid
, p
->pid
)
364 ctf_integer(int, prio
, p
->prio
- MAX_RT_PRIO
)
365 ctf_integer(int, orig_cpu
, task_cpu(p
))
366 ctf_integer(int, dest_cpu
, dest_cpu
)
370 LTTNG_TRACEPOINT_EVENT_CLASS(sched_process_template
,
372 TP_PROTO(struct task_struct
*p
),
377 ctf_array_text(char, comm
, p
->comm
, TASK_COMM_LEN
)
378 ctf_integer(pid_t
, tid
, p
->pid
)
379 ctf_integer(int, prio
, p
->prio
- MAX_RT_PRIO
)
384 * Tracepoint for freeing a task:
386 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template
, sched_process_free
,
387 TP_PROTO(struct task_struct
*p
),
392 * Tracepoint for a task exiting:
394 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template
, sched_process_exit
,
395 TP_PROTO(struct task_struct
*p
),
399 * Tracepoint for waiting on task to unschedule:
401 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template
, sched_wait_task
,
402 TP_PROTO(struct task_struct
*p
),
406 * Tracepoint for a waiting task:
408 LTTNG_TRACEPOINT_EVENT(sched_process_wait
,
410 TP_PROTO(struct pid
*pid
),
415 ctf_array_text(char, comm
, current
->comm
, TASK_COMM_LEN
)
416 ctf_integer(pid_t
, tid
, pid_nr(pid
))
417 ctf_integer(int, prio
, current
->prio
- MAX_RT_PRIO
)
422 * Tracepoint for do_fork.
423 * Saving both TID and PID information, especially for the child, allows
424 * trace analyzers to distinguish between creation of a new process and
425 * creation of a new thread. Newly created processes will have child_tid
426 * == child_pid, while creation of a thread yields to child_tid !=
429 LTTNG_TRACEPOINT_EVENT_CODE(sched_process_fork
,
431 TP_PROTO(struct task_struct
*parent
, struct task_struct
*child
),
433 TP_ARGS(parent
, child
),
436 pid_t vtids
[LTTNG_MAX_PID_NS_LEVEL
];
437 unsigned int ns_level
;
442 struct pid
*child_pid
;
445 child_pid
= task_pid(child
);
446 tp_locvar
->ns_level
=
447 min_t(unsigned int, child_pid
->level
+ 1,
448 LTTNG_MAX_PID_NS_LEVEL
);
449 for (i
= 0; i
< tp_locvar
->ns_level
; i
++)
450 tp_locvar
->vtids
[i
] = child_pid
->numbers
[i
].nr
;
455 ctf_array_text(char, parent_comm
, parent
->comm
, TASK_COMM_LEN
)
456 ctf_integer(pid_t
, parent_tid
, parent
->pid
)
457 ctf_integer(pid_t
, parent_pid
, parent
->tgid
)
458 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
459 ctf_integer(unsigned int, parent_ns_inum
,
461 unsigned int parent_ns_inum
= 0;
464 struct pid_namespace
*pid_ns
;
466 pid_ns
= task_active_pid_ns(parent
);
469 pid_ns
->lttng_ns_inum
;
474 ctf_array_text(char, child_comm
, child
->comm
, TASK_COMM_LEN
)
475 ctf_integer(pid_t
, child_tid
, child
->pid
)
476 ctf_sequence(pid_t
, vtids
, tp_locvar
->vtids
, u8
, tp_locvar
->ns_level
)
477 ctf_integer(pid_t
, child_pid
, child
->tgid
)
478 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
479 ctf_integer(unsigned int, child_ns_inum
,
481 unsigned int child_ns_inum
= 0;
484 struct pid_namespace
*pid_ns
;
486 pid_ns
= task_active_pid_ns(child
);
489 pid_ns
->lttng_ns_inum
;
499 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
501 * Tracepoint for exec:
503 LTTNG_TRACEPOINT_EVENT(sched_process_exec
,
505 TP_PROTO(struct task_struct
*p
, pid_t old_pid
,
506 struct linux_binprm
*bprm
),
508 TP_ARGS(p
, old_pid
, bprm
),
511 ctf_string(filename
, bprm
->filename
)
512 ctf_integer(pid_t
, tid
, p
->pid
)
513 ctf_integer(pid_t
, old_tid
, old_pid
)
519 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
520 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
522 LTTNG_TRACEPOINT_EVENT_CLASS(sched_stat_template
,
524 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
529 ctf_array_text(char, comm
, tsk
->comm
, TASK_COMM_LEN
)
530 ctf_integer(pid_t
, tid
, tsk
->pid
)
531 ctf_integer(u64
, delay
, delay
)
537 * Tracepoint for accounting wait time (time the task is runnable
538 * but not actually running due to scheduler contention).
540 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template
, sched_stat_wait
,
541 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
545 * Tracepoint for accounting sleep time (time the task is not runnable,
546 * including iowait, see below).
548 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template
, sched_stat_sleep
,
549 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
553 * Tracepoint for accounting iowait time (time the task is not runnable
554 * due to waiting on IO to complete).
556 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template
, sched_stat_iowait
,
557 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
560 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
562 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
564 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template
, sched_stat_blocked
,
565 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
570 * Tracepoint for accounting runtime (time the task is executing
573 LTTNG_TRACEPOINT_EVENT(sched_stat_runtime
,
575 TP_PROTO(struct task_struct
*tsk
, u64 runtime
, u64 vruntime
),
577 TP_ARGS(tsk
, runtime
, vruntime
),
580 ctf_array_text(char, comm
, tsk
->comm
, TASK_COMM_LEN
)
581 ctf_integer(pid_t
, tid
, tsk
->pid
)
582 ctf_integer(u64
, runtime
, runtime
)
583 ctf_integer(u64
, vruntime
, vruntime
)
587 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0) || \
588 LTTNG_RT_KERNEL_RANGE(4,9,27,18, 4,10,0,0) || \
589 LTTNG_RT_KERNEL_RANGE(4,11,5,1, 4,12,0,0))
591 * Tracepoint for showing priority inheritance modifying a tasks
594 LTTNG_TRACEPOINT_EVENT(sched_pi_setprio
,
596 TP_PROTO(struct task_struct
*tsk
, struct task_struct
*pi_task
),
598 TP_ARGS(tsk
, pi_task
),
601 ctf_array_text(char, comm
, tsk
->comm
, TASK_COMM_LEN
)
602 ctf_integer(pid_t
, tid
, tsk
->pid
)
603 ctf_integer(int, oldprio
, tsk
->prio
- MAX_RT_PRIO
)
604 ctf_integer(int, newprio
, pi_task
? pi_task
->prio
- MAX_RT_PRIO
: tsk
->prio
- MAX_RT_PRIO
)
609 * Tracepoint for showing priority inheritance modifying a tasks
612 LTTNG_TRACEPOINT_EVENT(sched_pi_setprio
,
614 TP_PROTO(struct task_struct
*tsk
, int newprio
),
616 TP_ARGS(tsk
, newprio
),
619 ctf_array_text(char, comm
, tsk
->comm
, TASK_COMM_LEN
)
620 ctf_integer(pid_t
, tid
, tsk
->pid
)
621 ctf_integer(int, oldprio
, tsk
->prio
- MAX_RT_PRIO
)
622 ctf_integer(int, newprio
, newprio
- MAX_RT_PRIO
)
627 #endif /* LTTNG_TRACE_SCHED_H */
629 /* This part must be outside protection */
630 #include <probes/define_trace.h>
This page took 0.166245 seconds and 4 git commands to generate.