1 /* SPDX-License-Identifier: GPL-2.0 */
3 #define TRACE_SYSTEM sched
5 #if !defined(LTTNG_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define LTTNG_TRACE_SCHED_H
8 #include <probes/lttng-tracepoint-event.h>
9 #include <linux/sched.h>
10 #include <linux/pid_namespace.h>
11 #include <linux/binfmts.h>
12 #include <lttng-kernel-version.h>
13 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,9,0))
14 #include <linux/sched/rt.h>
16 #include <wrapper/namespace.h>
18 #define LTTNG_MAX_PID_NS_LEVEL 32
20 #ifndef _TRACE_SCHED_DEF_
21 #define _TRACE_SCHED_DEF_
23 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,18,0) \
24 || LTTNG_RHEL_KERNEL_RANGE(5,14,0,162,0,0, 5,15,0,0,0,0))
26 static inline long __trace_sched_switch_state(bool preempt
,
27 unsigned int prev_state
,
28 struct task_struct
*p
)
32 #ifdef CONFIG_SCHED_DEBUG
34 #endif /* CONFIG_SCHED_DEBUG */
37 * Preemption ignores task state, therefore preempted tasks are always
38 * RUNNING (we will not have dequeued if state != RUNNING).
41 return TASK_REPORT_MAX
;
44 * task_state_index() uses fls() and returns a value from 0-8 range.
45 * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
46 * it for left shift operation to get the correct task->state
49 state
= __task_state_index(prev_state
, p
->exit_state
);
51 return state
? (1 << (state
- 1)) : state
;
54 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,15,0))
56 static inline long __trace_sched_switch_state(bool preempt
, struct task_struct
*p
)
60 #ifdef CONFIG_SCHED_DEBUG
62 #endif /* CONFIG_SCHED_DEBUG */
65 * Preemption ignores task state, therefore preempted tasks are always
66 * RUNNING (we will not have dequeued if state != RUNNING).
69 return TASK_REPORT_MAX
;
72 * task_state_index() uses fls() and returns a value from 0-8 range.
73 * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
74 * it for left shift operation to get the correct task->state
77 state
= task_state_index(p
);
79 return state
? (1 << (state
- 1)) : state
;
82 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,14,0))
84 static inline long __trace_sched_switch_state(bool preempt
, struct task_struct
*p
)
88 #ifdef CONFIG_SCHED_DEBUG
90 #endif /* CONFIG_SCHED_DEBUG */
93 * Preemption ignores task state, therefore preempted tasks are always
94 * RUNNING (we will not have dequeued if state != RUNNING).
97 return TASK_REPORT_MAX
;
100 * __get_task_state() uses fls() and returns a value from 0-8 range.
101 * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
102 * it for left shift operation to get the correct task->state
105 state
= __get_task_state(p
);
107 return state
? (1 << (state
- 1)) : state
;
110 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,4,0))
112 static inline long __trace_sched_switch_state(bool preempt
, struct task_struct
*p
)
114 #ifdef CONFIG_SCHED_DEBUG
115 BUG_ON(p
!= current
);
116 #endif /* CONFIG_SCHED_DEBUG */
118 * Preemption ignores task state, therefore preempted tasks are always RUNNING
119 * (we will not have dequeued if state != RUNNING).
121 return preempt
? TASK_RUNNING
| TASK_STATE_MAX
: p
->state
;
124 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,19,0))
126 static inline long __trace_sched_switch_state(struct task_struct
*p
)
128 long state
= p
->state
;
130 #ifdef CONFIG_PREEMPT
131 #ifdef CONFIG_SCHED_DEBUG
132 BUG_ON(p
!= current
);
133 #endif /* CONFIG_SCHED_DEBUG */
135 * For all intents and purposes a preempted task is a running task.
137 if (preempt_count() & PREEMPT_ACTIVE
)
138 state
= TASK_RUNNING
| TASK_STATE_MAX
;
139 #endif /* CONFIG_PREEMPT */
144 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,13,0))
146 static inline long __trace_sched_switch_state(struct task_struct
*p
)
148 long state
= p
->state
;
150 #ifdef CONFIG_PREEMPT
152 * For all intents and purposes a preempted task is a running task.
154 if (task_preempt_count(p
) & PREEMPT_ACTIVE
)
155 state
= TASK_RUNNING
| TASK_STATE_MAX
;
161 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,2,0))
163 static inline long __trace_sched_switch_state(struct task_struct
*p
)
165 long state
= p
->state
;
167 #ifdef CONFIG_PREEMPT
169 * For all intents and purposes a preempted task is a running task.
171 if (task_thread_info(p
)->preempt_count
& PREEMPT_ACTIVE
)
172 state
= TASK_RUNNING
| TASK_STATE_MAX
;
180 static inline long __trace_sched_switch_state(struct task_struct
*p
)
182 long state
= p
->state
;
184 #ifdef CONFIG_PREEMPT
186 * For all intents and purposes a preempted task is a running task.
188 if (task_thread_info(p
)->preempt_count
& PREEMPT_ACTIVE
)
189 state
= TASK_RUNNING
;
197 #endif /* _TRACE_SCHED_DEF_ */
199 #ifdef CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM
201 * Enumeration of the task state bitmask.
202 * Only bit flags are enumerated here, not composition of states.
204 LTTNG_TRACEPOINT_ENUM(task_state
,
206 ctf_enum_value("TASK_RUNNING", TASK_RUNNING
)
207 ctf_enum_value("TASK_INTERRUPTIBLE", TASK_INTERRUPTIBLE
)
208 ctf_enum_value("TASK_UNINTERRUPTIBLE", TASK_UNINTERRUPTIBLE
)
209 ctf_enum_value("TASK_STOPPED", __TASK_STOPPED
)
210 ctf_enum_value("TASK_TRACED", __TASK_TRACED
)
211 ctf_enum_value("EXIT_DEAD", EXIT_DEAD
)
212 ctf_enum_value("EXIT_ZOMBIE", EXIT_ZOMBIE
)
214 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,9,0))
215 ctf_enum_value("TASK_PARKED", TASK_PARKED
)
216 #endif /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,9,0)) */
218 ctf_enum_value("TASK_DEAD", TASK_DEAD
)
219 ctf_enum_value("TASK_WAKEKILL", TASK_WAKEKILL
)
220 ctf_enum_value("TASK_WAKING", TASK_WAKING
)
222 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,2,0))
223 ctf_enum_value("TASK_NOLOAD", TASK_NOLOAD
)
224 #endif /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,2,0)) */
226 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,8,0))
227 ctf_enum_value("TASK_NEW", TASK_NEW
)
228 #endif /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,8,0)) */
230 ctf_enum_value("TASK_STATE_MAX", TASK_STATE_MAX
)
233 #endif /* CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM */
236 * Tracepoint for calling kthread_stop, performed to end a kthread:
238 LTTNG_TRACEPOINT_EVENT(sched_kthread_stop
,
240 TP_PROTO(struct task_struct
*t
),
245 ctf_array_text(char, comm
, t
->comm
, TASK_COMM_LEN
)
246 ctf_integer(pid_t
, tid
, t
->pid
)
251 * Tracepoint for the return value of the kthread stopping:
253 LTTNG_TRACEPOINT_EVENT(sched_kthread_stop_ret
,
260 ctf_integer(int, ret
, ret
)
265 * Tracepoint for waking up a task:
267 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,3,0) || \
268 LTTNG_RT_KERNEL_RANGE(4,1,10,11, 4,2,0,0) || \
269 LTTNG_RT_KERNEL_RANGE(3,18,27,26, 3,19,0,0) || \
270 LTTNG_RT_KERNEL_RANGE(3,14,61,63, 3,15,0,0) || \
271 LTTNG_RT_KERNEL_RANGE(3,12,54,73, 3,13,0,0) || \
272 LTTNG_RT_KERNEL_RANGE(3,10,97,106, 3,11,0,0) || \
273 LTTNG_RT_KERNEL_RANGE(3,4,110,139, 3,5,0,0) || \
274 LTTNG_RT_KERNEL_RANGE(3,2,77,111, 3,3,0,0))
275 LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template
,
277 TP_PROTO(struct task_struct
*p
),
282 ctf_array_text(char, comm
, p
->comm
, TASK_COMM_LEN
)
283 ctf_integer(pid_t
, tid
, p
->pid
)
284 ctf_integer(int, prio
, p
->prio
- MAX_RT_PRIO
)
285 ctf_integer(int, target_cpu
, task_cpu(p
))
288 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,3,0)) */
289 LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template
,
291 TP_PROTO(struct task_struct
*p
, int success
),
296 ctf_array_text(char, comm
, p
->comm
, TASK_COMM_LEN
)
297 ctf_integer(pid_t
, tid
, p
->pid
)
298 ctf_integer(int, prio
, p
->prio
- MAX_RT_PRIO
)
299 ctf_integer(int, success
, success
)
300 ctf_integer(int, target_cpu
, task_cpu(p
))
303 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,3,0)) */
305 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,3,0) || \
306 LTTNG_RT_KERNEL_RANGE(4,1,10,11, 4,2,0,0) || \
307 LTTNG_RT_KERNEL_RANGE(3,18,27,26, 3,19,0,0) || \
308 LTTNG_RT_KERNEL_RANGE(3,14,61,63, 3,15,0,0) || \
309 LTTNG_RT_KERNEL_RANGE(3,12,54,73, 3,13,0,0) || \
310 LTTNG_RT_KERNEL_RANGE(3,10,97,106, 3,11,0,0) || \
311 LTTNG_RT_KERNEL_RANGE(3,4,110,139, 3,5,0,0) || \
312 LTTNG_RT_KERNEL_RANGE(3,2,77,111, 3,3,0,0))
315 * Tracepoint called when waking a task; this tracepoint is guaranteed to be
316 * called from the waking context.
318 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_waking
,
319 TP_PROTO(struct task_struct
*p
),
323 * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
324 * It it not always called from the waking context.
326 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup
,
327 TP_PROTO(struct task_struct
*p
),
331 * Tracepoint for waking up a new task:
333 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup_new
,
334 TP_PROTO(struct task_struct
*p
),
339 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup
,
340 TP_PROTO(struct task_struct
*p
, int success
),
344 * Tracepoint for waking up a new task:
346 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup_new
,
347 TP_PROTO(struct task_struct
*p
, int success
),
350 #endif /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,3,0)) */
353 * Tracepoint for task switches, performed by the scheduler:
356 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,18,0) \
357 || LTTNG_RHEL_KERNEL_RANGE(5,14,0,162,0,0, 5,15,0,0,0,0))
358 LTTNG_TRACEPOINT_EVENT(sched_switch
,
360 TP_PROTO(bool preempt
,
361 struct task_struct
*prev
,
362 struct task_struct
*next
,
363 unsigned int prev_state
),
365 TP_ARGS(preempt
, prev
, next
, prev_state
),
368 ctf_array_text(char, prev_comm
, prev
->comm
, TASK_COMM_LEN
)
369 ctf_integer(pid_t
, prev_tid
, prev
->pid
)
370 ctf_integer(int, prev_prio
, prev
->prio
- MAX_RT_PRIO
)
371 #ifdef CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM
372 ctf_enum(task_state
, long, prev_state
, __trace_sched_switch_state(preempt
, prev_state
, prev
))
374 ctf_integer(long, prev_state
, __trace_sched_switch_state(preempt
, prev_state
, prev
))
376 ctf_array_text(char, next_comm
, next
->comm
, TASK_COMM_LEN
)
377 ctf_integer(pid_t
, next_tid
, next
->pid
)
378 ctf_integer(int, next_prio
, next
->prio
- MAX_RT_PRIO
)
382 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,4,0))
384 LTTNG_TRACEPOINT_EVENT(sched_switch
,
386 TP_PROTO(bool preempt
,
387 struct task_struct
*prev
,
388 struct task_struct
*next
),
390 TP_ARGS(preempt
, prev
, next
),
393 ctf_array_text(char, prev_comm
, prev
->comm
, TASK_COMM_LEN
)
394 ctf_integer(pid_t
, prev_tid
, prev
->pid
)
395 ctf_integer(int, prev_prio
, prev
->prio
- MAX_RT_PRIO
)
396 #ifdef CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM
397 ctf_enum(task_state
, long, prev_state
, __trace_sched_switch_state(preempt
, prev
))
399 ctf_integer(long, prev_state
, __trace_sched_switch_state(preempt
, prev
))
401 ctf_array_text(char, next_comm
, next
->comm
, TASK_COMM_LEN
)
402 ctf_integer(pid_t
, next_tid
, next
->pid
)
403 ctf_integer(int, next_prio
, next
->prio
- MAX_RT_PRIO
)
409 LTTNG_TRACEPOINT_EVENT(sched_switch
,
411 TP_PROTO(struct task_struct
*prev
,
412 struct task_struct
*next
),
417 ctf_array_text(char, prev_comm
, prev
->comm
, TASK_COMM_LEN
)
418 ctf_integer(pid_t
, prev_tid
, prev
->pid
)
419 ctf_integer(int, prev_prio
, prev
->prio
- MAX_RT_PRIO
)
420 #ifdef CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM
421 ctf_enum(task_state
, long, prev_state
, __trace_sched_switch_state(prev
))
423 ctf_integer(long, prev_state
, __trace_sched_switch_state(prev
))
425 ctf_array_text(char, next_comm
, next
->comm
, TASK_COMM_LEN
)
426 ctf_integer(pid_t
, next_tid
, next
->pid
)
427 ctf_integer(int, next_prio
, next
->prio
- MAX_RT_PRIO
)
433 * Tracepoint for a task being migrated:
435 LTTNG_TRACEPOINT_EVENT(sched_migrate_task
,
437 TP_PROTO(struct task_struct
*p
, int dest_cpu
),
439 TP_ARGS(p
, dest_cpu
),
442 ctf_array_text(char, comm
, p
->comm
, TASK_COMM_LEN
)
443 ctf_integer(pid_t
, tid
, p
->pid
)
444 ctf_integer(int, prio
, p
->prio
- MAX_RT_PRIO
)
445 ctf_integer(int, orig_cpu
, task_cpu(p
))
446 ctf_integer(int, dest_cpu
, dest_cpu
)
450 LTTNG_TRACEPOINT_EVENT_CLASS(sched_process_template
,
452 TP_PROTO(struct task_struct
*p
),
457 ctf_array_text(char, comm
, p
->comm
, TASK_COMM_LEN
)
458 ctf_integer(pid_t
, tid
, p
->pid
)
459 ctf_integer(int, prio
, p
->prio
- MAX_RT_PRIO
)
464 * Tracepoint for freeing a task:
466 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template
, sched_process_free
,
467 TP_PROTO(struct task_struct
*p
),
472 * Tracepoint for a task exiting:
474 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template
, sched_process_exit
,
475 TP_PROTO(struct task_struct
*p
),
479 * Tracepoint for waiting on task to unschedule:
481 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template
, sched_wait_task
,
482 TP_PROTO(struct task_struct
*p
),
486 * Tracepoint for a waiting task:
488 LTTNG_TRACEPOINT_EVENT(sched_process_wait
,
490 TP_PROTO(struct pid
*pid
),
495 ctf_array_text(char, comm
, current
->comm
, TASK_COMM_LEN
)
496 ctf_integer(pid_t
, tid
, pid_nr(pid
))
497 ctf_integer(int, prio
, current
->prio
- MAX_RT_PRIO
)
502 * Tracepoint for do_fork.
503 * Saving both TID and PID information, especially for the child, allows
504 * trace analyzers to distinguish between creation of a new process and
505 * creation of a new thread. Newly created processes will have child_tid
506 * == child_pid, while creation of a thread yields to child_tid !=
509 LTTNG_TRACEPOINT_EVENT_CODE(sched_process_fork
,
511 TP_PROTO(struct task_struct
*parent
, struct task_struct
*child
),
513 TP_ARGS(parent
, child
),
516 pid_t vtids
[LTTNG_MAX_PID_NS_LEVEL
];
517 unsigned int ns_level
;
522 struct pid
*child_pid
;
525 child_pid
= task_pid(child
);
526 tp_locvar
->ns_level
=
527 min_t(unsigned int, child_pid
->level
+ 1,
528 LTTNG_MAX_PID_NS_LEVEL
);
529 for (i
= 0; i
< tp_locvar
->ns_level
; i
++)
530 tp_locvar
->vtids
[i
] = child_pid
->numbers
[i
].nr
;
535 ctf_array_text(char, parent_comm
, parent
->comm
, TASK_COMM_LEN
)
536 ctf_integer(pid_t
, parent_tid
, parent
->pid
)
537 ctf_integer(pid_t
, parent_pid
, parent
->tgid
)
538 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,8,0))
539 ctf_integer(unsigned int, parent_ns_inum
,
541 unsigned int parent_ns_inum
= 0;
544 struct pid_namespace
*pid_ns
;
546 pid_ns
= task_active_pid_ns(parent
);
549 pid_ns
->lttng_ns_inum
;
554 ctf_array_text(char, child_comm
, child
->comm
, TASK_COMM_LEN
)
555 ctf_integer(pid_t
, child_tid
, child
->pid
)
556 ctf_sequence(pid_t
, vtids
, tp_locvar
->vtids
, u8
, tp_locvar
->ns_level
)
557 ctf_integer(pid_t
, child_pid
, child
->tgid
)
558 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,8,0))
559 ctf_integer(unsigned int, child_ns_inum
,
561 unsigned int child_ns_inum
= 0;
564 struct pid_namespace
*pid_ns
;
566 pid_ns
= task_active_pid_ns(child
);
569 pid_ns
->lttng_ns_inum
;
579 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0))
581 * Tracepoint for exec:
583 LTTNG_TRACEPOINT_EVENT(sched_process_exec
,
585 TP_PROTO(struct task_struct
*p
, pid_t old_pid
,
586 struct linux_binprm
*bprm
),
588 TP_ARGS(p
, old_pid
, bprm
),
591 ctf_string(filename
, bprm
->filename
)
592 ctf_integer(pid_t
, tid
, p
->pid
)
593 ctf_integer(pid_t
, old_tid
, old_pid
)
599 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
600 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
602 LTTNG_TRACEPOINT_EVENT_CLASS(sched_stat_template
,
604 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
609 ctf_array_text(char, comm
, tsk
->comm
, TASK_COMM_LEN
)
610 ctf_integer(pid_t
, tid
, tsk
->pid
)
611 ctf_integer(u64
, delay
, delay
)
617 * Tracepoint for accounting wait time (time the task is runnable
618 * but not actually running due to scheduler contention).
620 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template
, sched_stat_wait
,
621 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
625 * Tracepoint for accounting sleep time (time the task is not runnable,
626 * including iowait, see below).
628 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template
, sched_stat_sleep
,
629 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
633 * Tracepoint for accounting iowait time (time the task is not runnable
634 * due to waiting on IO to complete).
636 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template
, sched_stat_iowait
,
637 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
640 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,3,0))
642 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
644 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template
, sched_stat_blocked
,
645 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
650 * Tracepoint for accounting runtime (time the task is executing
653 LTTNG_TRACEPOINT_EVENT(sched_stat_runtime
,
655 TP_PROTO(struct task_struct
*tsk
, u64 runtime
, u64 vruntime
),
657 TP_ARGS(tsk
, runtime
, vruntime
),
660 ctf_array_text(char, comm
, tsk
->comm
, TASK_COMM_LEN
)
661 ctf_integer(pid_t
, tid
, tsk
->pid
)
662 ctf_integer(u64
, runtime
, runtime
)
663 ctf_integer(u64
, vruntime
, vruntime
)
667 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,12,0) || \
668 LTTNG_RT_KERNEL_RANGE(4,9,27,18, 4,10,0,0) || \
669 LTTNG_RT_KERNEL_RANGE(4,11,5,1, 4,12,0,0))
671 * Tracepoint for showing priority inheritance modifying a tasks
674 LTTNG_TRACEPOINT_EVENT(sched_pi_setprio
,
676 TP_PROTO(struct task_struct
*tsk
, struct task_struct
*pi_task
),
678 TP_ARGS(tsk
, pi_task
),
681 ctf_array_text(char, comm
, tsk
->comm
, TASK_COMM_LEN
)
682 ctf_integer(pid_t
, tid
, tsk
->pid
)
683 ctf_integer(int, oldprio
, tsk
->prio
- MAX_RT_PRIO
)
684 ctf_integer(int, newprio
, pi_task
? pi_task
->prio
- MAX_RT_PRIO
: tsk
->prio
- MAX_RT_PRIO
)
689 * Tracepoint for showing priority inheritance modifying a tasks
692 LTTNG_TRACEPOINT_EVENT(sched_pi_setprio
,
694 TP_PROTO(struct task_struct
*tsk
, int newprio
),
696 TP_ARGS(tsk
, newprio
),
699 ctf_array_text(char, comm
, tsk
->comm
, TASK_COMM_LEN
)
700 ctf_integer(pid_t
, tid
, tsk
->pid
)
701 ctf_integer(int, oldprio
, tsk
->prio
- MAX_RT_PRIO
)
702 ctf_integer(int, newprio
, newprio
- MAX_RT_PRIO
)
707 #endif /* LTTNG_TRACE_SCHED_H */
709 /* This part must be outside protection */
710 #include <probes/define_trace.h>
This page took 0.081848 seconds and 5 git commands to generate.