2 #define TRACE_SYSTEM writeback
4 #if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_WRITEBACK_H
7 #include <linux/backing-dev.h>
8 #include <linux/writeback.h>
9 #include <linux/version.h>
11 #ifndef _TRACE_WRITEBACK_DEF_
12 #define _TRACE_WRITEBACK_DEF_
13 /* Have to duplicate it here from fs/fs-writeback.c */
14 struct wb_writeback_work
{
16 struct super_block
*sb
;
17 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))
18 unsigned long *older_than_this
;
20 enum writeback_sync_modes sync_mode
;
21 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))
22 unsigned int tagged_writepages
:1;
24 unsigned int for_kupdate
:1;
25 unsigned int range_cyclic
:1;
26 unsigned int for_background
:1;
27 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
28 enum wb_reason reason
; /* why was writeback initiated? */
31 struct list_head list
; /* pending work list */
32 struct completion
*done
; /* set if the caller waits */
34 static inline struct backing_dev_info
*inode_to_bdi(struct inode
*inode
)
36 struct super_block
*sb
= inode
->i_sb
;
38 if (strcmp(sb
->s_type
->name
, "bdev") == 0)
39 return inode
->i_mapping
->backing_dev_info
;
45 #define show_inode_state(state) \
46 __print_flags(state, "|", \
47 {I_DIRTY_SYNC, "I_DIRTY_SYNC"}, \
48 {I_DIRTY_DATASYNC, "I_DIRTY_DATASYNC"}, \
49 {I_DIRTY_PAGES, "I_DIRTY_PAGES"}, \
51 {I_WILL_FREE, "I_WILL_FREE"}, \
52 {I_FREEING, "I_FREEING"}, \
53 {I_CLEAR, "I_CLEAR"}, \
55 {I_REFERENCED, "I_REFERENCED"} \
58 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
59 #define WB_WORK_REASON \
60 {WB_REASON_BACKGROUND, "background"}, \
61 {WB_REASON_TRY_TO_FREE_PAGES, "try_to_free_pages"}, \
62 {WB_REASON_SYNC, "sync"}, \
63 {WB_REASON_PERIODIC, "periodic"}, \
64 {WB_REASON_LAPTOP_TIMER, "laptop_timer"}, \
65 {WB_REASON_FREE_MORE_MEM, "free_more_memory"}, \
66 {WB_REASON_FS_FREE_SPACE, "fs_free_space"}, \
67 {WB_REASON_FORKER_THREAD, "forker_thread"}
70 DECLARE_EVENT_CLASS(writeback_work_class
,
71 TP_PROTO(struct backing_dev_info
*bdi
, struct wb_writeback_work
*work
),
74 __array(char, name
, 32)
75 __field(long, nr_pages
)
76 __field(dev_t
, sb_dev
)
77 __field(int, sync_mode
)
78 __field(int, for_kupdate
)
79 __field(int, range_cyclic
)
80 __field(int, for_background
)
81 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
86 tp_memcpy(name
, dev_name(bdi
->dev
? bdi
->dev
:
87 default_backing_dev_info
.dev
), 32)
88 tp_assign(nr_pages
, work
->nr_pages
)
89 tp_assign(sb_dev
, work
->sb
? work
->sb
->s_dev
: 0)
90 tp_assign(sync_mode
, work
->sync_mode
)
91 tp_assign(for_kupdate
, work
->for_kupdate
)
92 tp_assign(range_cyclic
, work
->range_cyclic
)
93 tp_assign(for_background
, work
->for_background
)
94 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
95 tp_assign(reason
, work
->reason
)
98 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
99 TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
100 "kupdate=%d range_cyclic=%d background=%d reason=%s",
102 MAJOR(__entry
->sb_dev
), MINOR(__entry
->sb_dev
),
105 __entry
->for_kupdate
,
106 __entry
->range_cyclic
,
107 __entry
->for_background
,
108 __print_symbolic(__entry
->reason
, WB_WORK_REASON
)
111 TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
112 "kupdate=%d range_cyclic=%d background=%d",
114 MAJOR(__entry
->sb_dev
), MINOR(__entry
->sb_dev
),
117 __entry
->for_kupdate
,
118 __entry
->range_cyclic
,
119 __entry
->for_background
123 #define DEFINE_WRITEBACK_WORK_EVENT(name) \
124 DEFINE_EVENT(writeback_work_class, name, \
125 TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), \
127 DEFINE_WRITEBACK_WORK_EVENT(writeback_nothread
)
128 DEFINE_WRITEBACK_WORK_EVENT(writeback_queue
)
129 DEFINE_WRITEBACK_WORK_EVENT(writeback_exec
)
130 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))
131 DEFINE_WRITEBACK_WORK_EVENT(writeback_start
)
132 DEFINE_WRITEBACK_WORK_EVENT(writeback_written
)
133 DEFINE_WRITEBACK_WORK_EVENT(writeback_wait
)
136 TRACE_EVENT(writeback_pages_written
,
137 TP_PROTO(long pages_written
),
138 TP_ARGS(pages_written
),
143 tp_assign(pages
, pages_written
)
145 TP_printk("%ld", __entry
->pages
)
148 DECLARE_EVENT_CLASS(writeback_class
,
149 TP_PROTO(struct backing_dev_info
*bdi
),
152 __array(char, name
, 32)
155 tp_memcpy(name
, dev_name(bdi
->dev
), 32)
161 #define DEFINE_WRITEBACK_EVENT(name) \
162 DEFINE_EVENT(writeback_class, name, \
163 TP_PROTO(struct backing_dev_info *bdi), \
166 DEFINE_WRITEBACK_EVENT(writeback_nowork
)
167 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38))
168 DEFINE_WRITEBACK_EVENT(writeback_wake_background
)
170 DEFINE_WRITEBACK_EVENT(writeback_wake_thread
)
171 DEFINE_WRITEBACK_EVENT(writeback_wake_forker_thread
)
172 DEFINE_WRITEBACK_EVENT(writeback_bdi_register
)
173 DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister
)
174 DEFINE_WRITEBACK_EVENT(writeback_thread_start
)
175 DEFINE_WRITEBACK_EVENT(writeback_thread_stop
)
176 #if (LTTNG_KERNEL_RANGE(3,1,0, 3,2,0))
177 DEFINE_WRITEBACK_EVENT(balance_dirty_start
)
178 DEFINE_WRITEBACK_EVENT(balance_dirty_wait
)
180 TRACE_EVENT(balance_dirty_written
,
182 TP_PROTO(struct backing_dev_info
*bdi
, int written
),
184 TP_ARGS(bdi
, written
),
187 __array(char, name
, 32)
188 __field(int, written
)
192 tp_memcpy(name
, dev_name(bdi
->dev
), 32)
193 tp_assign(written
, written
)
196 TP_printk("bdi %s written %d",
203 DECLARE_EVENT_CLASS(wbc_class
,
204 TP_PROTO(struct writeback_control
*wbc
, struct backing_dev_info
*bdi
),
207 __array(char, name
, 32)
208 __field(long, nr_to_write
)
209 __field(long, pages_skipped
)
210 __field(int, sync_mode
)
211 __field(int, for_kupdate
)
212 __field(int, for_background
)
213 __field(int, for_reclaim
)
214 __field(int, range_cyclic
)
215 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0))
216 __field(int, more_io
)
217 __field(unsigned long, older_than_this
)
219 __field(long, range_start
)
220 __field(long, range_end
)
224 tp_memcpy(name
, dev_name(bdi
->dev
), 32)
225 tp_assign(nr_to_write
, wbc
->nr_to_write
)
226 tp_assign(pages_skipped
, wbc
->pages_skipped
)
227 tp_assign(sync_mode
, wbc
->sync_mode
)
228 tp_assign(for_kupdate
, wbc
->for_kupdate
)
229 tp_assign(for_background
, wbc
->for_background
)
230 tp_assign(for_reclaim
, wbc
->for_reclaim
)
231 tp_assign(range_cyclic
, wbc
->range_cyclic
)
232 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0))
233 tp_assign(more_io
, wbc
->more_io
)
234 tp_assign(older_than_this
, wbc
->older_than_this
?
235 *wbc
->older_than_this
: 0)
237 tp_assign(range_start
, (long)wbc
->range_start
)
238 tp_assign(range_end
, (long)wbc
->range_end
)
241 TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
242 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0))
243 "bgrd=%d reclm=%d cyclic=%d more=%d older=0x%lx "
245 "bgrd=%d reclm=%d cyclic=%d "
247 "start=0x%lx end=0x%lx",
249 __entry
->nr_to_write
,
250 __entry
->pages_skipped
,
252 __entry
->for_kupdate
,
253 __entry
->for_background
,
254 __entry
->for_reclaim
,
255 __entry
->range_cyclic
,
256 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0))
258 __entry
->older_than_this
,
260 __entry
->range_start
,
264 #define DEFINE_WBC_EVENT(name) \
265 DEFINE_EVENT(wbc_class, name, \
266 TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
268 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0))
269 DEFINE_WBC_EVENT(wbc_writeback_start
)
270 DEFINE_WBC_EVENT(wbc_writeback_written
)
271 DEFINE_WBC_EVENT(wbc_writeback_wait
)
272 DEFINE_WBC_EVENT(wbc_balance_dirty_start
)
273 DEFINE_WBC_EVENT(wbc_balance_dirty_written
)
274 DEFINE_WBC_EVENT(wbc_balance_dirty_wait
)
276 DEFINE_WBC_EVENT(wbc_writepage
)
278 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))
279 TRACE_EVENT(writeback_queue_io
,
280 TP_PROTO(struct bdi_writeback
*wb
,
281 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
282 struct wb_writeback_work
*work
,
284 unsigned long *older_than_this
,
287 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
288 TP_ARGS(wb
, work
, moved
),
290 TP_ARGS(wb
, older_than_this
, moved
),
293 __array(char, name
, 32)
294 __field(unsigned long, older
)
297 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
302 tp_memcpy(name
, dev_name(wb
->bdi
->dev
), 32)
303 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
305 work
->older_than_this
? *(work
->older_than_this
) : 0)
306 tp_assign(age
, work
->older_than_this
?
307 (jiffies
- *(work
->older_than_this
)) * 1000 / HZ
: -1)
309 tp_assign(older
, older_than_this
? *older_than_this
: 0)
310 tp_assign(age
, older_than_this
?
311 (jiffies
- *older_than_this
) * 1000 / HZ
: -1)
313 tp_assign(moved
, moved
)
314 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
315 tp_assign(reason
, work
->reason
)
318 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
319 TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s",
321 __entry
->older
, /* older_than_this in jiffies */
322 __entry
->age
, /* older_than_this in relative milliseconds */
324 __print_symbolic(__entry
->reason
, WB_WORK_REASON
)
327 TP_printk("bdi %s: older=%lu age=%ld enqueue=%d",
329 __entry
->older
, /* older_than_this in jiffies */
330 __entry
->age
, /* older_than_this in relative milliseconds */
336 TRACE_EVENT(global_dirty_state
,
338 TP_PROTO(unsigned long background_thresh
,
339 unsigned long dirty_thresh
342 TP_ARGS(background_thresh
,
347 __field(unsigned long, nr_dirty
)
348 __field(unsigned long, nr_writeback
)
349 __field(unsigned long, nr_unstable
)
350 __field(unsigned long, background_thresh
)
351 __field(unsigned long, dirty_thresh
)
352 __field(unsigned long, dirty_limit
)
353 __field(unsigned long, nr_dirtied
)
354 __field(unsigned long, nr_written
)
358 tp_assign(nr_dirty
, global_page_state(NR_FILE_DIRTY
))
359 tp_assign(nr_writeback
, global_page_state(NR_WRITEBACK
))
360 tp_assign(nr_unstable
, global_page_state(NR_UNSTABLE_NFS
))
361 tp_assign(nr_dirtied
, global_page_state(NR_DIRTIED
))
362 tp_assign(nr_written
, global_page_state(NR_WRITTEN
))
363 tp_assign(background_thresh
, background_thresh
)
364 tp_assign(dirty_thresh
, dirty_thresh
)
365 tp_assign(dirty_limit
, global_dirty_limit
)
368 TP_printk("dirty=%lu writeback=%lu unstable=%lu "
369 "bg_thresh=%lu thresh=%lu limit=%lu "
370 "dirtied=%lu written=%lu",
372 __entry
->nr_writeback
,
373 __entry
->nr_unstable
,
374 __entry
->background_thresh
,
375 __entry
->dirty_thresh
,
376 __entry
->dirty_limit
,
383 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
385 #define KBps(x) ((x) << (PAGE_SHIFT - 10))
387 TRACE_EVENT(bdi_dirty_ratelimit
,
389 TP_PROTO(struct backing_dev_info
*bdi
,
390 unsigned long dirty_rate
,
391 unsigned long task_ratelimit
),
393 TP_ARGS(bdi
, dirty_rate
, task_ratelimit
),
396 __array(char, bdi
, 32)
397 __field(unsigned long, write_bw
)
398 __field(unsigned long, avg_write_bw
)
399 __field(unsigned long, dirty_rate
)
400 __field(unsigned long, dirty_ratelimit
)
401 __field(unsigned long, task_ratelimit
)
402 __field(unsigned long, balanced_dirty_ratelimit
)
406 tp_memcpy(bdi
, dev_name(bdi
->dev
), 32)
407 tp_assign(write_bw
, KBps(bdi
->write_bandwidth
))
408 tp_assign(avg_write_bw
, KBps(bdi
->avg_write_bandwidth
))
409 tp_assign(dirty_rate
, KBps(dirty_rate
))
410 tp_assign(dirty_ratelimit
, KBps(bdi
->dirty_ratelimit
))
411 tp_assign(task_ratelimit
, KBps(task_ratelimit
))
412 tp_assign(balanced_dirty_ratelimit
,
413 KBps(bdi
->balanced_dirty_ratelimit
))
417 "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
418 "dirty_ratelimit=%lu task_ratelimit=%lu "
419 "balanced_dirty_ratelimit=%lu",
421 __entry
->write_bw
, /* write bandwidth */
422 __entry
->avg_write_bw
, /* avg write bandwidth */
423 __entry
->dirty_rate
, /* bdi dirty rate */
424 __entry
->dirty_ratelimit
, /* base ratelimit */
425 __entry
->task_ratelimit
, /* ratelimit with position control */
426 __entry
->balanced_dirty_ratelimit
/* the balanced ratelimit */
430 TRACE_EVENT(balance_dirty_pages
,
432 TP_PROTO(struct backing_dev_info
*bdi
,
433 unsigned long thresh
,
434 unsigned long bg_thresh
,
436 unsigned long bdi_thresh
,
437 unsigned long bdi_dirty
,
438 unsigned long dirty_ratelimit
,
439 unsigned long task_ratelimit
,
440 unsigned long dirtied
,
441 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
442 unsigned long period
,
445 unsigned long start_time
),
447 TP_ARGS(bdi
, thresh
, bg_thresh
, dirty
, bdi_thresh
, bdi_dirty
,
448 dirty_ratelimit
, task_ratelimit
,
449 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
450 dirtied
, period
, pause
, start_time
),
452 dirtied
, pause
, start_time
),
455 __array( char, bdi
, 32)
456 __field(unsigned long, limit
)
457 __field(unsigned long, setpoint
)
458 __field(unsigned long, dirty
)
459 __field(unsigned long, bdi_setpoint
)
460 __field(unsigned long, bdi_dirty
)
461 __field(unsigned long, dirty_ratelimit
)
462 __field(unsigned long, task_ratelimit
)
463 __field(unsigned int, dirtied
)
464 __field(unsigned int, dirtied_pause
)
465 __field(unsigned long, paused
)
466 __field( long, pause
)
467 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
468 __field(unsigned long, period
)
469 __field( long, think
)
474 tp_memcpy(bdi
, dev_name(bdi
->dev
), 32)
475 tp_assign(limit
, global_dirty_limit
)
477 (global_dirty_limit
+ (thresh
+ bg_thresh
) / 2) / 2)
478 tp_assign(dirty
, dirty
)
479 tp_assign(bdi_setpoint
,
480 ((global_dirty_limit
+ (thresh
+ bg_thresh
) / 2) / 2) *
481 bdi_thresh
/ (thresh
+ 1))
482 tp_assign(bdi_dirty
, bdi_dirty
)
483 tp_assign(dirty_ratelimit
, KBps(dirty_ratelimit
))
484 tp_assign(task_ratelimit
, KBps(task_ratelimit
))
485 tp_assign(dirtied
, dirtied
)
486 tp_assign(dirtied_pause
, current
->nr_dirtied_pause
)
487 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
488 tp_assign(think
, current
->dirty_paused_when
== 0 ? 0 :
489 (long)(jiffies
- current
->dirty_paused_when
) * 1000/HZ
)
490 tp_assign(period
, period
* 1000 / HZ
)
492 tp_assign(pause
, pause
* 1000 / HZ
)
493 tp_assign(paused
, (jiffies
- start_time
) * 1000 / HZ
)
497 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
499 "limit=%lu setpoint=%lu dirty=%lu "
500 "bdi_setpoint=%lu bdi_dirty=%lu "
501 "dirty_ratelimit=%lu task_ratelimit=%lu "
502 "dirtied=%u dirtied_pause=%u "
503 "paused=%lu pause=%ld period=%lu think=%ld",
508 __entry
->bdi_setpoint
,
510 __entry
->dirty_ratelimit
,
511 __entry
->task_ratelimit
,
513 __entry
->dirtied_pause
,
514 __entry
->paused
, /* ms */
515 __entry
->pause
, /* ms */
516 __entry
->period
, /* ms */
517 __entry
->think
/* ms */
521 "limit=%lu setpoint=%lu dirty=%lu "
522 "bdi_setpoint=%lu bdi_dirty=%lu "
523 "dirty_ratelimit=%lu task_ratelimit=%lu "
524 "dirtied=%u dirtied_pause=%u "
525 "paused=%lu pause=%ld",
530 __entry
->bdi_setpoint
,
532 __entry
->dirty_ratelimit
,
533 __entry
->task_ratelimit
,
535 __entry
->dirtied_pause
,
536 __entry
->paused
, /* ms */
537 __entry
->pause
/* ms */
543 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0))
544 TRACE_EVENT(writeback_sb_inodes_requeue
,
546 TP_PROTO(struct inode
*inode
),
550 __array(char, name
, 32)
551 __field(unsigned long, ino
)
552 __field(unsigned long, state
)
553 __field(unsigned long, dirtied_when
)
557 tp_memcpy(name
, dev_name(inode_to_bdi(inode
)->dev
), 32)
558 tp_assign(ino
, inode
->i_ino
)
559 tp_assign(state
, inode
->i_state
)
560 tp_assign(dirtied_when
, inode
->dirtied_when
)
563 TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu",
566 show_inode_state(__entry
->state
),
567 __entry
->dirtied_when
,
568 (jiffies
- __entry
->dirtied_when
) / HZ
573 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
574 DECLARE_EVENT_CLASS(writeback_congest_waited_template
,
576 TP_PROTO(unsigned int usec_timeout
, unsigned int usec_delayed
),
578 TP_ARGS(usec_timeout
, usec_delayed
),
581 __field( unsigned int, usec_timeout
)
582 __field( unsigned int, usec_delayed
)
586 tp_assign(usec_timeout
, usec_timeout
)
587 tp_assign(usec_delayed
, usec_delayed
)
590 TP_printk("usec_timeout=%u usec_delayed=%u",
591 __entry
->usec_timeout
,
592 __entry
->usec_delayed
)
595 DEFINE_EVENT(writeback_congest_waited_template
, writeback_congestion_wait
,
597 TP_PROTO(unsigned int usec_timeout
, unsigned int usec_delayed
),
599 TP_ARGS(usec_timeout
, usec_delayed
)
602 DEFINE_EVENT(writeback_congest_waited_template
, writeback_wait_iff_congested
,
604 TP_PROTO(unsigned int usec_timeout
, unsigned int usec_delayed
),
606 TP_ARGS(usec_timeout
, usec_delayed
)
610 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))
611 DECLARE_EVENT_CLASS(writeback_single_inode_template
,
613 TP_PROTO(struct inode
*inode
,
614 struct writeback_control
*wbc
,
615 unsigned long nr_to_write
618 TP_ARGS(inode
, wbc
, nr_to_write
),
621 __array(char, name
, 32)
622 __field(unsigned long, ino
)
623 __field(unsigned long, state
)
624 __field(unsigned long, dirtied_when
)
625 __field(unsigned long, writeback_index
)
626 __field(long, nr_to_write
)
627 __field(unsigned long, wrote
)
631 tp_memcpy(name
, dev_name(inode_to_bdi(inode
)->dev
), 32)
632 tp_assign(ino
, inode
->i_ino
)
633 tp_assign(state
, inode
->i_state
)
634 tp_assign(dirtied_when
, inode
->dirtied_when
)
635 tp_assign(writeback_index
, inode
->i_mapping
->writeback_index
)
636 tp_assign(nr_to_write
, nr_to_write
)
637 tp_assign(wrote
, nr_to_write
- wbc
->nr_to_write
)
640 TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
641 "index=%lu to_write=%ld wrote=%lu",
644 show_inode_state(__entry
->state
),
645 __entry
->dirtied_when
,
646 (jiffies
- __entry
->dirtied_when
) / HZ
,
647 __entry
->writeback_index
,
648 __entry
->nr_to_write
,
653 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0))
654 DEFINE_EVENT(writeback_single_inode_template
, writeback_single_inode_requeue
,
655 TP_PROTO(struct inode
*inode
,
656 struct writeback_control
*wbc
,
657 unsigned long nr_to_write
),
658 TP_ARGS(inode
, wbc
, nr_to_write
)
662 DEFINE_EVENT(writeback_single_inode_template
, writeback_single_inode
,
663 TP_PROTO(struct inode
*inode
,
664 struct writeback_control
*wbc
,
665 unsigned long nr_to_write
),
666 TP_ARGS(inode
, wbc
, nr_to_write
)
670 #endif /* _TRACE_WRITEBACK_H */
672 /* This part must be outside protection */
673 #include "../../../probes/define_trace.h"