2 #define TRACE_SYSTEM workqueue
4 #if !defined(_TRACE_WORKQUEUE_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_WORKQUEUE_H
7 #include <linux/tracepoint.h>
8 #include <linux/workqueue.h>
9 #include <linux/version.h>
11 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
13 #ifndef _TRACE_WORKQUEUE_DEF_
14 #define _TRACE_WORKQUEUE_DEF_
19 /* Have to duplicate all these definitions from kernel/workqueue.c */
23 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
24 NR_WORKER_POOLS
= 2, /* # worker pools per gcwq */
26 BUSY_WORKER_HASH_ORDER
= 6, /* 64 pointers */
27 BUSY_WORKER_HASH_SIZE
= 1 << BUSY_WORKER_HASH_ORDER
,
30 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
32 struct global_cwq
*gcwq
; /* I: the owning gcwq */
33 unsigned int flags
; /* X: flags */
35 struct list_head worklist
; /* L: list of pending works */
36 int nr_workers
; /* L: total number of workers */
37 int nr_idle
; /* L: currently idle ones */
39 struct list_head idle_list
; /* X: list of idle workers */
40 struct timer_list idle_timer
; /* L: worker idle timeout */
41 struct timer_list mayday_timer
; /* L: SOS timer for workers */
43 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
44 struct mutex assoc_mutex
; /* protect GCWQ_DISASSOCUATED */
46 struct mutex manager_mutex
; /* mutex manager should hold */
48 struct ida worker_ida
; /* L: for worker IDs */
52 spinlock_t lock
; /* the gcwq lock */
53 unsigned int cpu
; /* I: the associated cpu */
54 unsigned int flags
; /* L: GCWQ_* flags */
56 /* workers are chained either in busy_hash or pool idle_list */
57 struct hlist_head busy_hash
[BUSY_WORKER_HASH_SIZE
];
58 /* L: hash of busy workers */
60 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
61 struct worker_pool pools
[NR_WORKER_POOLS
];
62 /* normal and highpri pools */
64 struct worker_pool pools
[2]; /* normal and highpri pools */
66 wait_queue_head_t rebind_hold
; /* rebind hold wait */
68 } ____cacheline_aligned_in_smp
;
73 spinlock_t lock
; /* the gcwq lock */
74 struct list_head worklist
; /* L: list of pending works */
75 unsigned int cpu
; /* I: the associated cpu */
76 unsigned int flags
; /* L: GCWQ_* flags */
78 int nr_workers
; /* L: total number of workers */
79 int nr_idle
; /* L: currently idle ones */
81 /* workers are chained either in the idle_list or busy_hash */
82 struct list_head idle_list
; /* X: list of idle workers */
83 struct hlist_head busy_hash
[BUSY_WORKER_HASH_SIZE
];
84 /* L: hash of busy workers */
86 struct timer_list idle_timer
; /* L: worker idle timeout */
87 struct timer_list mayday_timer
; /* L: SOS timer for dworkers */
89 struct ida worker_ida
; /* L: for worker IDs */
91 struct task_struct
*trustee
; /* L: for gcwq shutdown */
92 unsigned int trustee_state
; /* L: trustee state */
93 wait_queue_head_t trustee_wait
; /* trustee wait */
94 struct worker
*first_idle
; /* L: first idle worker */
95 } ____cacheline_aligned_in_smp
;
99 struct cpu_workqueue_struct
{
100 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
101 struct worker_pool
*pool
; /* I: The associated pool */
103 struct global_cwq
*gcwq
; /* I: the associated gcwq */
105 struct workqueue_struct
*wq
; /* I: the owning workqueue */
106 int work_color
; /* L: current color */
107 int flush_color
; /* L: flushing color */
108 int nr_in_flight
[WORK_NR_COLORS
];
109 /* L: nr of in_flight works */
110 int nr_active
; /* L: nr of active works */
111 int max_active
; /* L: max active works */
112 struct list_head delayed_works
; /* L: delayed works */
117 DECLARE_EVENT_CLASS(workqueue_work
,
119 TP_PROTO(struct work_struct
*work
),
124 __field( void *, work
)
128 tp_assign(work
, work
)
131 TP_printk("work struct %p", __entry
->work
)
134 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
136 * workqueue_queue_work - called when a work gets queued
137 * @req_cpu: the requested cpu
138 * @cwq: pointer to struct cpu_workqueue_struct
139 * @work: pointer to struct work_struct
141 * This event occurs when a work is queued immediately or once a
142 * delayed work is actually queued on a workqueue (ie: once the delay
145 TRACE_EVENT(workqueue_queue_work
,
147 TP_PROTO(unsigned int req_cpu
, struct cpu_workqueue_struct
*cwq
,
148 struct work_struct
*work
),
150 TP_ARGS(req_cpu
, cwq
, work
),
153 __field( void *, work
)
154 __field( void *, function
)
155 __field( void *, workqueue
)
156 __field( unsigned int, req_cpu
)
157 __field( unsigned int, cpu
)
161 tp_assign(work
, work
)
162 tp_assign(function
, work
->func
)
163 tp_assign(workqueue
, cwq
->wq
)
164 tp_assign(req_cpu
, req_cpu
)
165 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
166 tp_assign(cpu
, cwq
->pool
->gcwq
->cpu
)
168 tp_assign(cpu
, cwq
->gcwq
->cpu
)
172 TP_printk("work struct=%p function=%pf workqueue=%p req_cpu=%u cpu=%u",
173 __entry
->work
, __entry
->function
, __entry
->workqueue
,
174 __entry
->req_cpu
, __entry
->cpu
)
178 * workqueue_activate_work - called when a work gets activated
179 * @work: pointer to struct work_struct
181 * This event occurs when a queued work is put on the active queue,
182 * which happens immediately after queueing unless @max_active limit
185 DEFINE_EVENT(workqueue_work
, workqueue_activate_work
,
187 TP_PROTO(struct work_struct
*work
),
194 * workqueue_execute_start - called immediately before the workqueue callback
195 * @work: pointer to struct work_struct
197 * Allows to track workqueue execution.
199 TRACE_EVENT(workqueue_execute_start
,
201 TP_PROTO(struct work_struct
*work
),
206 __field( void *, work
)
207 __field( void *, function
)
211 tp_assign(work
, work
)
212 tp_assign(function
, work
->func
)
215 TP_printk("work struct %p: function %pf", __entry
->work
, __entry
->function
)
219 * workqueue_execute_end - called immediately after the workqueue callback
220 * @work: pointer to struct work_struct
222 * Allows to track workqueue execution.
224 DEFINE_EVENT(workqueue_work
, workqueue_execute_end
,
226 TP_PROTO(struct work_struct
*work
),
233 DECLARE_EVENT_CLASS(workqueue
,
235 TP_PROTO(struct task_struct
*wq_thread
, struct work_struct
*work
),
237 TP_ARGS(wq_thread
, work
),
240 __array(char, thread_comm
, TASK_COMM_LEN
)
241 __field(pid_t
, thread_pid
)
242 __field(work_func_t
, func
)
246 tp_memcpy(thread_comm
, wq_thread
->comm
, TASK_COMM_LEN
)
247 tp_assign(thread_pid
, wq_thread
->pid
)
248 tp_assign(func
, work
->func
)
251 TP_printk("thread=%s:%d func=%pf", __entry
->thread_comm
,
252 __entry
->thread_pid
, __entry
->func
)
255 DEFINE_EVENT(workqueue
, workqueue_insertion
,
257 TP_PROTO(struct task_struct
*wq_thread
, struct work_struct
*work
),
259 TP_ARGS(wq_thread
, work
)
262 DEFINE_EVENT(workqueue
, workqueue_execution
,
264 TP_PROTO(struct task_struct
*wq_thread
, struct work_struct
*work
),
266 TP_ARGS(wq_thread
, work
)
269 /* Trace the creation of one workqueue thread on a cpu */
270 TRACE_EVENT(workqueue_creation
,
272 TP_PROTO(struct task_struct
*wq_thread
, int cpu
),
274 TP_ARGS(wq_thread
, cpu
),
277 __array(char, thread_comm
, TASK_COMM_LEN
)
278 __field(pid_t
, thread_pid
)
283 tp_memcpy(thread_comm
, wq_thread
->comm
, TASK_COMM_LEN
)
284 tp_assign(thread_pid
, wq_thread
->pid
)
288 TP_printk("thread=%s:%d cpu=%d", __entry
->thread_comm
,
289 __entry
->thread_pid
, __entry
->cpu
)
292 TRACE_EVENT(workqueue_destruction
,
294 TP_PROTO(struct task_struct
*wq_thread
),
299 __array(char, thread_comm
, TASK_COMM_LEN
)
300 __field(pid_t
, thread_pid
)
304 tp_memcpy(thread_comm
, wq_thread
->comm
, TASK_COMM_LEN
)
305 tp_assign(thread_pid
, wq_thread
->pid
)
308 TP_printk("thread=%s:%d", __entry
->thread_comm
, __entry
->thread_pid
)
313 #endif /* _TRACE_WORKQUEUE_H */
315 /* This part must be outside protection */
316 #include "../../../probes/define_trace.h"