e6c03a3f24124a5f7426acb457d0ca3310b864e4
2 #define TRACE_SYSTEM sched
4 #if !defined(LTTNG_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define LTTNG_TRACE_SCHED_H
7 #include <probes/lttng-tracepoint-event.h>
8 #include <linux/sched.h>
9 #include <linux/pid_namespace.h>
10 #include <linux/binfmts.h>
11 #include <linux/version.h>
12 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
13 #include <linux/sched/rt.h>
16 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0))
17 #define lttng_proc_inum ns.inum
19 #define lttng_proc_inum proc_inum
22 #define LTTNG_MAX_PID_NS_LEVEL 32
24 #ifndef _TRACE_SCHED_DEF_
25 #define _TRACE_SCHED_DEF_
27 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
29 static inline long __trace_sched_switch_state(bool preempt
, struct task_struct
*p
)
31 #ifdef CONFIG_SCHED_DEBUG
33 #endif /* CONFIG_SCHED_DEBUG */
35 * Preemption ignores task state, therefore preempted tasks are always RUNNING
36 * (we will not have dequeued if state != RUNNING).
38 return preempt
? TASK_RUNNING
| TASK_STATE_MAX
: p
->state
;
41 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0))
43 static inline long __trace_sched_switch_state(struct task_struct
*p
)
45 long state
= p
->state
;
48 #ifdef CONFIG_SCHED_DEBUG
50 #endif /* CONFIG_SCHED_DEBUG */
52 * For all intents and purposes a preempted task is a running task.
54 if (preempt_count() & PREEMPT_ACTIVE
)
55 state
= TASK_RUNNING
| TASK_STATE_MAX
;
56 #endif /* CONFIG_PREEMPT */
61 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0))
63 static inline long __trace_sched_switch_state(struct task_struct
*p
)
65 long state
= p
->state
;
69 * For all intents and purposes a preempted task is a running task.
71 if (task_preempt_count(p
) & PREEMPT_ACTIVE
)
72 state
= TASK_RUNNING
| TASK_STATE_MAX
;
78 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
80 static inline long __trace_sched_switch_state(struct task_struct
*p
)
82 long state
= p
->state
;
86 * For all intents and purposes a preempted task is a running task.
88 if (task_thread_info(p
)->preempt_count
& PREEMPT_ACTIVE
)
89 state
= TASK_RUNNING
| TASK_STATE_MAX
;
95 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
97 static inline long __trace_sched_switch_state(struct task_struct
*p
)
99 long state
= p
->state
;
101 #ifdef CONFIG_PREEMPT
103 * For all intents and purposes a preempted task is a running task.
105 if (task_thread_info(p
)->preempt_count
& PREEMPT_ACTIVE
)
106 state
= TASK_RUNNING
;
114 #endif /* _TRACE_SCHED_DEF_ */
117 * Tracepoint for calling kthread_stop, performed to end a kthread:
119 LTTNG_TRACEPOINT_EVENT(sched_kthread_stop
,
121 TP_PROTO(struct task_struct
*t
),
126 ctf_array_text(char, comm
, t
->comm
, TASK_COMM_LEN
)
127 ctf_integer(pid_t
, tid
, t
->pid
)
132 * Tracepoint for the return value of the kthread stopping:
134 LTTNG_TRACEPOINT_EVENT(sched_kthread_stop_ret
,
141 ctf_integer(int, ret
, ret
)
146 * Tracepoint for waking up a task:
148 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0) || \
149 LTTNG_RT_KERNEL_RANGE(4,1,10,11, 4,2,0,0))
150 LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template
,
152 TP_PROTO(struct task_struct
*p
),
157 ctf_array_text(char, comm
, p
->comm
, TASK_COMM_LEN
)
158 ctf_integer(pid_t
, tid
, p
->pid
)
159 ctf_integer(int, prio
, p
->prio
- MAX_RT_PRIO
)
160 ctf_integer(int, target_cpu
, task_cpu(p
))
163 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) */
164 LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template
,
166 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
167 TP_PROTO(struct task_struct
*p
, int success
),
171 TP_PROTO(struct rq
*rq
, struct task_struct
*p
, int success
),
173 TP_ARGS(rq
, p
, success
),
177 ctf_array_text(char, comm
, p
->comm
, TASK_COMM_LEN
)
178 ctf_integer(pid_t
, tid
, p
->pid
)
179 ctf_integer(int, prio
, p
->prio
- MAX_RT_PRIO
)
180 ctf_integer(int, success
, success
)
181 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
182 ctf_integer(int, target_cpu
, task_cpu(p
))
186 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) */
188 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0) || \
189 LTTNG_RT_KERNEL_RANGE(4,1,10,11, 4,2,0,0))
192 * Tracepoint called when waking a task; this tracepoint is guaranteed to be
193 * called from the waking context.
195 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_waking
,
196 TP_PROTO(struct task_struct
*p
),
200 * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
201 * It it not always called from the waking context.
203 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup
,
204 TP_PROTO(struct task_struct
*p
),
208 * Tracepoint for waking up a new task:
210 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup_new
,
211 TP_PROTO(struct task_struct
*p
),
214 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
216 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup
,
217 TP_PROTO(struct task_struct
*p
, int success
),
221 * Tracepoint for waking up a new task:
223 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup_new
,
224 TP_PROTO(struct task_struct
*p
, int success
),
227 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
229 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup
,
230 TP_PROTO(struct rq
*rq
, struct task_struct
*p
, int success
),
231 TP_ARGS(rq
, p
, success
))
234 * Tracepoint for waking up a new task:
236 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup_new
,
237 TP_PROTO(struct rq
*rq
, struct task_struct
*p
, int success
),
238 TP_ARGS(rq
, p
, success
))
240 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
243 * Tracepoint for task switches, performed by the scheduler:
245 LTTNG_TRACEPOINT_EVENT(sched_switch
,
247 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
248 TP_PROTO(bool preempt
,
249 struct task_struct
*prev
,
250 struct task_struct
*next
),
252 TP_ARGS(preempt
, prev
, next
),
253 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
254 TP_PROTO(struct task_struct
*prev
,
255 struct task_struct
*next
),
258 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
259 TP_PROTO(struct rq
*rq
, struct task_struct
*prev
,
260 struct task_struct
*next
),
262 TP_ARGS(rq
, prev
, next
),
263 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
266 ctf_array_text(char, prev_comm
, prev
->comm
, TASK_COMM_LEN
)
267 ctf_integer(pid_t
, prev_tid
, prev
->pid
)
268 ctf_integer(int, prev_prio
, prev
->prio
- MAX_RT_PRIO
)
269 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
270 ctf_integer(long, prev_state
, __trace_sched_switch_state(preempt
, prev
))
271 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
272 ctf_integer(long, prev_state
, __trace_sched_switch_state(prev
))
274 ctf_integer(long, prev_state
, prev
->state
)
276 ctf_array_text(char, next_comm
, next
->comm
, TASK_COMM_LEN
)
277 ctf_integer(pid_t
, next_tid
, next
->pid
)
278 ctf_integer(int, next_prio
, next
->prio
- MAX_RT_PRIO
)
283 * Tracepoint for a task being migrated:
285 LTTNG_TRACEPOINT_EVENT(sched_migrate_task
,
287 TP_PROTO(struct task_struct
*p
, int dest_cpu
),
289 TP_ARGS(p
, dest_cpu
),
292 ctf_array_text(char, comm
, p
->comm
, TASK_COMM_LEN
)
293 ctf_integer(pid_t
, tid
, p
->pid
)
294 ctf_integer(int, prio
, p
->prio
- MAX_RT_PRIO
)
295 ctf_integer(int, orig_cpu
, task_cpu(p
))
296 ctf_integer(int, dest_cpu
, dest_cpu
)
300 LTTNG_TRACEPOINT_EVENT_CLASS(sched_process_template
,
302 TP_PROTO(struct task_struct
*p
),
307 ctf_array_text(char, comm
, p
->comm
, TASK_COMM_LEN
)
308 ctf_integer(pid_t
, tid
, p
->pid
)
309 ctf_integer(int, prio
, p
->prio
- MAX_RT_PRIO
)
314 * Tracepoint for freeing a task:
316 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template
, sched_process_free
,
317 TP_PROTO(struct task_struct
*p
),
322 * Tracepoint for a task exiting:
324 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template
, sched_process_exit
,
325 TP_PROTO(struct task_struct
*p
),
329 * Tracepoint for waiting on task to unschedule:
331 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
332 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template
, sched_wait_task
,
333 TP_PROTO(struct task_struct
*p
),
335 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
336 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template
, sched_wait_task
,
337 TP_PROTO(struct rq
*rq
, struct task_struct
*p
),
339 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
342 * Tracepoint for a waiting task:
344 LTTNG_TRACEPOINT_EVENT(sched_process_wait
,
346 TP_PROTO(struct pid
*pid
),
351 ctf_array_text(char, comm
, current
->comm
, TASK_COMM_LEN
)
352 ctf_integer(pid_t
, tid
, pid_nr(pid
))
353 ctf_integer(int, prio
, current
->prio
- MAX_RT_PRIO
)
358 * Tracepoint for do_fork.
359 * Saving both TID and PID information, especially for the child, allows
360 * trace analyzers to distinguish between creation of a new process and
361 * creation of a new thread. Newly created processes will have child_tid
362 * == child_pid, while creation of a thread yields to child_tid !=
365 LTTNG_TRACEPOINT_EVENT_CODE(sched_process_fork
,
367 TP_PROTO(struct task_struct
*parent
, struct task_struct
*child
),
369 TP_ARGS(parent
, child
),
372 pid_t vtids
[LTTNG_MAX_PID_NS_LEVEL
];
373 unsigned int ns_level
;
378 struct pid
*child_pid
;
381 child_pid
= task_pid(child
);
382 tp_locvar
->ns_level
=
383 min_t(unsigned int, child_pid
->level
+ 1,
384 LTTNG_MAX_PID_NS_LEVEL
);
385 for (i
= 0; i
< tp_locvar
->ns_level
; i
++)
386 tp_locvar
->vtids
[i
] = child_pid
->numbers
[i
].nr
;
391 ctf_array_text(char, parent_comm
, parent
->comm
, TASK_COMM_LEN
)
392 ctf_integer(pid_t
, parent_tid
, parent
->pid
)
393 ctf_integer(pid_t
, parent_pid
, parent
->tgid
)
394 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
395 ctf_integer(unsigned int, parent_ns_inum
,
397 unsigned int parent_ns_inum
= 0;
400 struct pid_namespace
*pid_ns
;
402 pid_ns
= task_active_pid_ns(parent
);
405 pid_ns
->lttng_proc_inum
;
410 ctf_array_text(char, child_comm
, child
->comm
, TASK_COMM_LEN
)
411 ctf_integer(pid_t
, child_tid
, child
->pid
)
412 ctf_sequence(pid_t
, vtids
, tp_locvar
->vtids
, u8
, tp_locvar
->ns_level
)
413 ctf_integer(pid_t
, child_pid
, child
->tgid
)
414 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
415 ctf_integer(unsigned int, child_ns_inum
,
417 unsigned int child_ns_inum
= 0;
420 struct pid_namespace
*pid_ns
;
422 pid_ns
= task_active_pid_ns(child
);
425 pid_ns
->lttng_proc_inum
;
435 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33))
437 * Tracepoint for sending a signal:
439 LTTNG_TRACEPOINT_EVENT(sched_signal_send
,
441 TP_PROTO(int sig
, struct task_struct
*p
),
446 ctf_integer(int, sig
, sig
)
447 ctf_array_text(char, comm
, p
->comm
, TASK_COMM_LEN
)
448 ctf_integer(pid_t
, tid
, p
->pid
)
453 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
455 * Tracepoint for exec:
457 LTTNG_TRACEPOINT_EVENT(sched_process_exec
,
459 TP_PROTO(struct task_struct
*p
, pid_t old_pid
,
460 struct linux_binprm
*bprm
),
462 TP_ARGS(p
, old_pid
, bprm
),
465 ctf_string(filename
, bprm
->filename
)
466 ctf_integer(pid_t
, tid
, p
->pid
)
467 ctf_integer(pid_t
, old_tid
, old_pid
)
472 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
474 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
475 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
477 LTTNG_TRACEPOINT_EVENT_CLASS(sched_stat_template
,
479 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
484 ctf_array_text(char, comm
, tsk
->comm
, TASK_COMM_LEN
)
485 ctf_integer(pid_t
, tid
, tsk
->pid
)
486 ctf_integer(u64
, delay
, delay
)
492 * Tracepoint for accounting wait time (time the task is runnable
493 * but not actually running due to scheduler contention).
495 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template
, sched_stat_wait
,
496 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
500 * Tracepoint for accounting sleep time (time the task is not runnable,
501 * including iowait, see below).
503 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template
, sched_stat_sleep
,
504 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
508 * Tracepoint for accounting iowait time (time the task is not runnable
509 * due to waiting on IO to complete).
511 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template
, sched_stat_iowait
,
512 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
515 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
517 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
519 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template
, sched_stat_blocked
,
520 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
525 * Tracepoint for accounting runtime (time the task is executing
528 LTTNG_TRACEPOINT_EVENT(sched_stat_runtime
,
530 TP_PROTO(struct task_struct
*tsk
, u64 runtime
, u64 vruntime
),
532 TP_ARGS(tsk
, runtime
, vruntime
),
535 ctf_array_text(char, comm
, tsk
->comm
, TASK_COMM_LEN
)
536 ctf_integer(pid_t
, tid
, tsk
->pid
)
537 ctf_integer(u64
, runtime
, runtime
)
538 ctf_integer(u64
, vruntime
, vruntime
)
543 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0))
545 * Tracepoint for showing priority inheritance modifying a tasks
548 LTTNG_TRACEPOINT_EVENT(sched_pi_setprio
,
550 TP_PROTO(struct task_struct
*tsk
, struct task_struct
*pi_task
),
552 TP_ARGS(tsk
, pi_task
),
555 ctf_array_text(char, comm
, tsk
->comm
, TASK_COMM_LEN
)
556 ctf_integer(pid_t
, tid
, tsk
->pid
)
557 ctf_integer(int, oldprio
, tsk
->prio
- MAX_RT_PRIO
)
558 ctf_integer(int, newprio
, pi_task
? pi_task
->prio
- MAX_RT_PRIO
: tsk
->prio
- MAX_RT_PRIO
)
561 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
563 * Tracepoint for showing priority inheritance modifying a tasks
566 LTTNG_TRACEPOINT_EVENT(sched_pi_setprio
,
568 TP_PROTO(struct task_struct
*tsk
, int newprio
),
570 TP_ARGS(tsk
, newprio
),
573 ctf_array_text(char, comm
, tsk
->comm
, TASK_COMM_LEN
)
574 ctf_integer(pid_t
, tid
, tsk
->pid
)
575 ctf_integer(int, oldprio
, tsk
->prio
- MAX_RT_PRIO
)
576 ctf_integer(int, newprio
, newprio
- MAX_RT_PRIO
)
581 #endif /* LTTNG_TRACE_SCHED_H */
583 /* This part must be outside protection */
584 #include <probes/define_trace.h>
This page took 0.065662 seconds and 4 git commands to generate.