X-Git-Url: http://git.lttng.org./?a=blobdiff_plain;f=instrumentation%2Fevents%2Flttng-module%2Fsched.h;h=415e954b02ae3c64bf34acf3f980eb5e85a0d005;hb=360d3efee55154a55c18be32175bee608dcb21df;hp=f5c36692491ecbc8c7a65e2226e526085b94803e;hpb=f144f72a82816c2a4fd911108b1ec7a89c20b517;p=lttng-modules.git diff --git a/instrumentation/events/lttng-module/sched.h b/instrumentation/events/lttng-module/sched.h index f5c36692..415e954b 100644 --- a/instrumentation/events/lttng-module/sched.h +++ b/instrumentation/events/lttng-module/sched.h @@ -8,12 +8,12 @@ #include #include #include -#include -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)) +#include +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,9,0)) #include #endif -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0)) +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,19,0)) #define lttng_proc_inum ns.inum #else #define lttng_proc_inum proc_inum @@ -24,7 +24,63 @@ #ifndef _TRACE_SCHED_DEF_ #define _TRACE_SCHED_DEF_ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0)) +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,15,0)) + +static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p) +{ + unsigned int state; + +#ifdef CONFIG_SCHED_DEBUG + BUG_ON(p != current); +#endif /* CONFIG_SCHED_DEBUG */ + + /* + * Preemption ignores task state, therefore preempted tasks are always + * RUNNING (we will not have dequeued if state != RUNNING). + */ + if (preempt) + return TASK_REPORT_MAX; + + /* + * task_state_index() uses fls() and returns a value from 0-8 range. + * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using + * it for left shift operation to get the correct task->state + * mapping. + */ + state = task_state_index(p); + + return state ? (1 << (state - 1)) : state; +} + +#elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,14,0)) + +static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p) +{ + unsigned int state; + +#ifdef CONFIG_SCHED_DEBUG + BUG_ON(p != current); +#endif /* CONFIG_SCHED_DEBUG */ + + /* + * Preemption ignores task state, therefore preempted tasks are always + * RUNNING (we will not have dequeued if state != RUNNING). + */ + if (preempt) + return TASK_REPORT_MAX; + + /* + * __get_task_state() uses fls() and returns a value from 0-8 range. + * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using + * it for left shift operation to get the correct task->state + * mapping. + */ + state = __get_task_state(p); + + return state ? (1 << (state - 1)) : state; +} + +#elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,4,0)) static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p) { @@ -38,7 +94,7 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct * return preempt ? TASK_RUNNING | TASK_STATE_MAX : p->state; } -#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0)) +#elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,19,0)) static inline long __trace_sched_switch_state(struct task_struct *p) { @@ -58,7 +114,7 @@ static inline long __trace_sched_switch_state(struct task_struct *p) return state; } -#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0)) +#elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,13,0)) static inline long __trace_sched_switch_state(struct task_struct *p) { @@ -75,7 +131,7 @@ static inline long __trace_sched_switch_state(struct task_struct *p) return state; } -#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) +#elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,2,0)) static inline long __trace_sched_switch_state(struct task_struct *p) { @@ -92,7 +148,7 @@ static inline long __trace_sched_switch_state(struct task_struct *p) return state; } -#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) +#elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(2,6,35)) static inline long __trace_sched_switch_state(struct task_struct *p) { @@ -145,7 +201,7 @@ LTTNG_TRACEPOINT_EVENT(sched_kthread_stop_ret, /* * Tracepoint for waking up a task: */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0) || \ +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,3,0) || \ LTTNG_RT_KERNEL_RANGE(4,1,10,11, 4,2,0,0) || \ LTTNG_RT_KERNEL_RANGE(3,18,27,26, 3,19,0,0) || \ LTTNG_RT_KERNEL_RANGE(3,14,61,63, 3,15,0,0) || \ @@ -166,10 +222,10 @@ LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template, ctf_integer(int, target_cpu, task_cpu(p)) ) ) -#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) */ +#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,3,0)) */ LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template, -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(2,6,35)) TP_PROTO(struct task_struct *p, int success), TP_ARGS(p, success), @@ -184,14 +240,14 @@ LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template, ctf_integer(pid_t, tid, p->pid) ctf_integer(int, prio, p->prio - MAX_RT_PRIO) ctf_integer(int, success, success) -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(2,6,32)) ctf_integer(int, target_cpu, task_cpu(p)) #endif ) ) -#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) */ +#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,3,0)) */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0) || \ +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,3,0) || \ LTTNG_RT_KERNEL_RANGE(4,1,10,11, 4,2,0,0) || \ LTTNG_RT_KERNEL_RANGE(3,18,27,26, 3,19,0,0) || \ LTTNG_RT_KERNEL_RANGE(3,14,61,63, 3,15,0,0) || \ @@ -223,7 +279,7 @@ LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new, TP_PROTO(struct task_struct *p), TP_ARGS(p)) -#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) +#elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(2,6,35)) LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup, TP_PROTO(struct task_struct *p, int success), @@ -236,7 +292,7 @@ LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new, TP_PROTO(struct task_struct *p, int success), TP_ARGS(p, success)) -#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */ +#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(2,6,35)) */ LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup, TP_PROTO(struct rq *rq, struct task_struct *p, int success), @@ -249,38 +305,38 @@ LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new, TP_PROTO(struct rq *rq, struct task_struct *p, int success), TP_ARGS(rq, p, success)) -#endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */ +#endif /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(2,6,35)) */ /* * Tracepoint for task switches, performed by the scheduler: */ LTTNG_TRACEPOINT_EVENT(sched_switch, -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0)) +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,4,0)) TP_PROTO(bool preempt, struct task_struct *prev, struct task_struct *next), TP_ARGS(preempt, prev, next), -#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) +#elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(2,6,35)) TP_PROTO(struct task_struct *prev, struct task_struct *next), TP_ARGS(prev, next), -#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */ +#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(2,6,35)) */ TP_PROTO(struct rq *rq, struct task_struct *prev, struct task_struct *next), TP_ARGS(rq, prev, next), -#endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */ +#endif /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(2,6,35)) */ TP_FIELDS( ctf_array_text(char, prev_comm, prev->comm, TASK_COMM_LEN) ctf_integer(pid_t, prev_tid, prev->pid) ctf_integer(int, prev_prio, prev->prio - MAX_RT_PRIO) -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0)) +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,4,0)) ctf_integer(long, prev_state, __trace_sched_switch_state(preempt, prev)) -#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) +#elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(2,6,35)) ctf_integer(long, prev_state, __trace_sched_switch_state(prev)) #else ctf_integer(long, prev_state, prev->state) @@ -340,15 +396,15 @@ LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_process_exit, /* * Tracepoint for waiting on task to unschedule: */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(2,6,35)) LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_wait_task, TP_PROTO(struct task_struct *p), TP_ARGS(p)) -#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */ +#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(2,6,35)) */ LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_wait_task, TP_PROTO(struct rq *rq, struct task_struct *p), TP_ARGS(rq, p)) -#endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */ +#endif /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(2,6,35)) */ /* * Tracepoint for a waiting task: @@ -403,7 +459,7 @@ LTTNG_TRACEPOINT_EVENT_CODE(sched_process_fork, ctf_array_text(char, parent_comm, parent->comm, TASK_COMM_LEN) ctf_integer(pid_t, parent_tid, parent->pid) ctf_integer(pid_t, parent_pid, parent->tgid) -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)) +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,8,0)) ctf_integer(unsigned int, parent_ns_inum, ({ unsigned int parent_ns_inum = 0; @@ -423,7 +479,7 @@ LTTNG_TRACEPOINT_EVENT_CODE(sched_process_fork, ctf_integer(pid_t, child_tid, child->pid) ctf_sequence(pid_t, vtids, tp_locvar->vtids, u8, tp_locvar->ns_level) ctf_integer(pid_t, child_pid, child->tgid) -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)) +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,8,0)) ctf_integer(unsigned int, child_ns_inum, ({ unsigned int child_ns_inum = 0; @@ -444,7 +500,7 @@ LTTNG_TRACEPOINT_EVENT_CODE(sched_process_fork, TP_code_post() ) -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33)) +#if (LTTNG_LINUX_VERSION_CODE < LTTNG_KERNEL_VERSION(2,6,33)) /* * Tracepoint for sending a signal: */ @@ -462,7 +518,7 @@ LTTNG_TRACEPOINT_EVENT(sched_signal_send, ) #endif -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0)) /* * Tracepoint for exec: */ @@ -481,7 +537,7 @@ LTTNG_TRACEPOINT_EVENT(sched_process_exec, ) #endif -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(2,6,32)) /* * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE * adding sched_stat support to SCHED_FIFO/RR would be welcome. @@ -524,7 +580,7 @@ LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_iowait, TP_PROTO(struct task_struct *tsk, u64 delay), TP_ARGS(tsk, delay)) -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,3,0)) /* * Tracepoint for accounting blocked time (time the task is in uninterruptible). */ @@ -552,7 +608,7 @@ LTTNG_TRACEPOINT_EVENT(sched_stat_runtime, ) #endif -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0) || \ +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,12,0) || \ LTTNG_RT_KERNEL_RANGE(4,9,27,18, 4,10,0,0) || \ LTTNG_RT_KERNEL_RANGE(4,11,5,1, 4,12,0,0)) /* @@ -572,7 +628,7 @@ LTTNG_TRACEPOINT_EVENT(sched_pi_setprio, ctf_integer(int, newprio, pi_task ? pi_task->prio - MAX_RT_PRIO : tsk->prio - MAX_RT_PRIO) ) ) -#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) +#elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(2,6,37)) /* * Tracepoint for showing priority inheritance modifying a tasks * priority.