2 #define TRACE_SYSTEM sched
4 #if !defined(LTTNG_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define LTTNG_TRACE_SCHED_H
7 #include "../../../probes/lttng-tracepoint-event.h"
8 #include <linux/sched.h>
9 #include <linux/pid_namespace.h>
10 #include <linux/binfmts.h>
11 #include <linux/version.h>
12 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
13 #include <linux/sched/rt.h>
16 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0))
17 #define lttng_proc_inum ns.inum
19 #define lttng_proc_inum proc_inum
22 #define LTTNG_MAX_PID_NS_LEVEL 32
24 #ifndef _TRACE_SCHED_DEF_
25 #define _TRACE_SCHED_DEF_
27 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0))
29 static inline long __trace_sched_switch_state(struct task_struct
*p
)
31 long state
= p
->state
;
34 #ifdef CONFIG_SCHED_DEBUG
36 #endif /* CONFIG_SCHED_DEBUG */
38 * For all intents and purposes a preempted task is a running task.
40 if (preempt_count() & PREEMPT_ACTIVE
)
41 state
= TASK_RUNNING
| TASK_STATE_MAX
;
42 #endif /* CONFIG_PREEMPT */
47 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0))
49 static inline long __trace_sched_switch_state(struct task_struct
*p
)
51 long state
= p
->state
;
55 * For all intents and purposes a preempted task is a running task.
57 if (task_preempt_count(p
) & PREEMPT_ACTIVE
)
58 state
= TASK_RUNNING
| TASK_STATE_MAX
;
64 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
66 static inline long __trace_sched_switch_state(struct task_struct
*p
)
68 long state
= p
->state
;
72 * For all intents and purposes a preempted task is a running task.
74 if (task_thread_info(p
)->preempt_count
& PREEMPT_ACTIVE
)
75 state
= TASK_RUNNING
| TASK_STATE_MAX
;
81 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
83 static inline long __trace_sched_switch_state(struct task_struct
*p
)
85 long state
= p
->state
;
89 * For all intents and purposes a preempted task is a running task.
91 if (task_thread_info(p
)->preempt_count
& PREEMPT_ACTIVE
)
100 #endif /* _TRACE_SCHED_DEF_ */
103 * Tracepoint for calling kthread_stop, performed to end a kthread:
105 LTTNG_TRACEPOINT_EVENT(sched_kthread_stop
,
107 TP_PROTO(struct task_struct
*t
),
112 ctf_array_text(char, comm
, t
->comm
, TASK_COMM_LEN
)
113 ctf_integer(pid_t
, tid
, t
->pid
)
118 * Tracepoint for the return value of the kthread stopping:
120 LTTNG_TRACEPOINT_EVENT(sched_kthread_stop_ret
,
127 ctf_integer(int, ret
, ret
)
132 * Tracepoint for waking up a task:
134 LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template
,
136 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
137 TP_PROTO(struct task_struct
*p
, int success
),
141 TP_PROTO(struct rq
*rq
, struct task_struct
*p
, int success
),
143 TP_ARGS(rq
, p
, success
),
147 ctf_array_text(char, comm
, p
->comm
, TASK_COMM_LEN
)
148 ctf_integer(pid_t
, tid
, p
->pid
)
149 ctf_integer(int, prio
, p
->prio
)
150 ctf_integer(int, success
, success
)
151 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
152 ctf_integer(int, target_cpu
, task_cpu(p
))
157 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
159 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup
,
160 TP_PROTO(struct task_struct
*p
, int success
),
164 * Tracepoint for waking up a new task:
166 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup_new
,
167 TP_PROTO(struct task_struct
*p
, int success
),
170 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
172 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup
,
173 TP_PROTO(struct rq
*rq
, struct task_struct
*p
, int success
),
174 TP_ARGS(rq
, p
, success
))
177 * Tracepoint for waking up a new task:
179 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup_new
,
180 TP_PROTO(struct rq
*rq
, struct task_struct
*p
, int success
),
181 TP_ARGS(rq
, p
, success
))
183 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
186 * Tracepoint for task switches, performed by the scheduler:
188 LTTNG_TRACEPOINT_EVENT(sched_switch
,
190 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
191 TP_PROTO(struct task_struct
*prev
,
192 struct task_struct
*next
),
195 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
196 TP_PROTO(struct rq
*rq
, struct task_struct
*prev
,
197 struct task_struct
*next
),
199 TP_ARGS(rq
, prev
, next
),
200 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
203 ctf_array_text(char, prev_comm
, prev
->comm
, TASK_COMM_LEN
)
204 ctf_integer(pid_t
, prev_tid
, prev
->pid
)
205 ctf_integer(int, prev_prio
, prev
->prio
- MAX_RT_PRIO
)
206 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
207 ctf_integer(long, prev_state
, __trace_sched_switch_state(prev
))
209 ctf_integer(long, prev_state
, prev
->state
)
211 ctf_array_text(char, next_comm
, next
->comm
, TASK_COMM_LEN
)
212 ctf_integer(pid_t
, next_tid
, next
->pid
)
213 ctf_integer(int, next_prio
, next
->prio
- MAX_RT_PRIO
)
218 * Tracepoint for a task being migrated:
220 LTTNG_TRACEPOINT_EVENT(sched_migrate_task
,
222 TP_PROTO(struct task_struct
*p
, int dest_cpu
),
224 TP_ARGS(p
, dest_cpu
),
227 ctf_array_text(char, comm
, p
->comm
, TASK_COMM_LEN
)
228 ctf_integer(pid_t
, tid
, p
->pid
)
229 ctf_integer(int, prio
, p
->prio
- MAX_RT_PRIO
)
230 ctf_integer(int, orig_cpu
, task_cpu(p
))
231 ctf_integer(int, dest_cpu
, dest_cpu
)
235 LTTNG_TRACEPOINT_EVENT_CLASS(sched_process_template
,
237 TP_PROTO(struct task_struct
*p
),
242 ctf_array_text(char, comm
, p
->comm
, TASK_COMM_LEN
)
243 ctf_integer(pid_t
, tid
, p
->pid
)
244 ctf_integer(int, prio
, p
->prio
- MAX_RT_PRIO
)
249 * Tracepoint for freeing a task:
251 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template
, sched_process_free
,
252 TP_PROTO(struct task_struct
*p
),
257 * Tracepoint for a task exiting:
259 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template
, sched_process_exit
,
260 TP_PROTO(struct task_struct
*p
),
264 * Tracepoint for waiting on task to unschedule:
266 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
267 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template
, sched_wait_task
,
268 TP_PROTO(struct task_struct
*p
),
270 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
271 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template
, sched_wait_task
,
272 TP_PROTO(struct rq
*rq
, struct task_struct
*p
),
274 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
277 * Tracepoint for a waiting task:
279 LTTNG_TRACEPOINT_EVENT(sched_process_wait
,
281 TP_PROTO(struct pid
*pid
),
286 ctf_array_text(char, comm
, current
->comm
, TASK_COMM_LEN
)
287 ctf_integer(pid_t
, tid
, pid_nr(pid
))
288 ctf_integer(int, prio
, current
->prio
- MAX_RT_PRIO
)
293 * Tracepoint for do_fork.
294 * Saving both TID and PID information, especially for the child, allows
295 * trace analyzers to distinguish between creation of a new process and
296 * creation of a new thread. Newly created processes will have child_tid
297 * == child_pid, while creation of a thread yields to child_tid !=
300 LTTNG_TRACEPOINT_EVENT_CODE(sched_process_fork
,
302 TP_PROTO(struct task_struct
*parent
, struct task_struct
*child
),
304 TP_ARGS(parent
, child
),
307 pid_t vtids
[LTTNG_MAX_PID_NS_LEVEL
];
308 unsigned int ns_level
;
313 struct pid
*child_pid
;
316 child_pid
= task_pid(child
);
317 tp_locvar
->ns_level
=
318 min_t(unsigned int, child_pid
->level
+ 1,
319 LTTNG_MAX_PID_NS_LEVEL
);
320 for (i
= 0; i
< tp_locvar
->ns_level
; i
++)
321 tp_locvar
->vtids
[i
] = child_pid
->numbers
[i
].nr
;
326 ctf_array_text(char, parent_comm
, parent
->comm
, TASK_COMM_LEN
)
327 ctf_integer(pid_t
, parent_tid
, parent
->pid
)
328 ctf_integer(pid_t
, parent_pid
, parent
->tgid
)
329 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
330 ctf_integer(unsigned int, parent_ns_inum
,
332 unsigned int parent_ns_inum
= 0;
335 struct pid_namespace
*pid_ns
;
337 pid_ns
= task_active_pid_ns(parent
);
340 pid_ns
->lttng_proc_inum
;
345 ctf_array_text(char, child_comm
, child
->comm
, TASK_COMM_LEN
)
346 ctf_integer(pid_t
, child_tid
, child
->pid
)
347 ctf_sequence(pid_t
, vtids
, tp_locvar
->vtids
, u8
, tp_locvar
->ns_level
)
348 ctf_integer(pid_t
, child_pid
, child
->tgid
)
349 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
350 ctf_integer(unsigned int, child_ns_inum
,
352 unsigned int child_ns_inum
= 0;
355 struct pid_namespace
*pid_ns
;
357 pid_ns
= task_active_pid_ns(child
);
360 pid_ns
->lttng_proc_inum
;
368 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33))
370 * Tracepoint for sending a signal:
372 LTTNG_TRACEPOINT_EVENT(sched_signal_send
,
374 TP_PROTO(int sig
, struct task_struct
*p
),
379 ctf_integer(int, sig
, sig
)
380 ctf_array_text(char, comm
, p
->comm
, TASK_COMM_LEN
)
381 ctf_integer(pid_t
, tid
, p
->pid
)
386 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
388 * Tracepoint for exec:
390 LTTNG_TRACEPOINT_EVENT(sched_process_exec
,
392 TP_PROTO(struct task_struct
*p
, pid_t old_pid
,
393 struct linux_binprm
*bprm
),
395 TP_ARGS(p
, old_pid
, bprm
),
398 ctf_string(filename
, bprm
->filename
)
399 ctf_integer(pid_t
, tid
, p
->pid
)
400 ctf_integer(pid_t
, old_tid
, old_pid
)
405 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
407 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
408 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
410 LTTNG_TRACEPOINT_EVENT_CLASS(sched_stat_template
,
412 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
417 ctf_array_text(char, comm
, tsk
->comm
, TASK_COMM_LEN
)
418 ctf_integer(pid_t
, tid
, tsk
->pid
)
419 ctf_integer(u64
, delay
, delay
)
425 * Tracepoint for accounting wait time (time the task is runnable
426 * but not actually running due to scheduler contention).
428 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template
, sched_stat_wait
,
429 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
433 * Tracepoint for accounting sleep time (time the task is not runnable,
434 * including iowait, see below).
436 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template
, sched_stat_sleep
,
437 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
441 * Tracepoint for accounting iowait time (time the task is not runnable
442 * due to waiting on IO to complete).
444 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template
, sched_stat_iowait
,
445 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
448 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
450 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
452 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template
, sched_stat_blocked
,
453 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
458 * Tracepoint for accounting runtime (time the task is executing
461 LTTNG_TRACEPOINT_EVENT(sched_stat_runtime
,
463 TP_PROTO(struct task_struct
*tsk
, u64 runtime
, u64 vruntime
),
465 TP_ARGS(tsk
, runtime
, vruntime
),
468 ctf_array_text(char, comm
, tsk
->comm
, TASK_COMM_LEN
)
469 ctf_integer(pid_t
, tid
, tsk
->pid
)
470 ctf_integer(u64
, runtime
, runtime
)
471 ctf_integer(u64
, vruntime
, vruntime
)
476 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
478 * Tracepoint for showing priority inheritance modifying a tasks
481 LTTNG_TRACEPOINT_EVENT(sched_pi_setprio
,
483 TP_PROTO(struct task_struct
*tsk
, int newprio
),
485 TP_ARGS(tsk
, newprio
),
488 ctf_array_text(char, comm
, tsk
->comm
, TASK_COMM_LEN
)
489 ctf_integer(pid_t
, tid
, tsk
->pid
)
490 ctf_integer(int, oldprio
, tsk
->prio
- MAX_RT_PRIO
)
491 ctf_integer(int, newprio
, newprio
- MAX_RT_PRIO
)
496 #endif /* LTTNG_TRACE_SCHED_H */
498 /* This part must be outside protection */
499 #include "../../../probes/define_trace.h"
This page took 0.071833 seconds and 4 git commands to generate.