Add new kernel probes instrumentation
authorAndrew Gabbasov <andrew_gabbasov@mentor.com>
Mon, 10 Dec 2012 16:14:52 +0000 (11:14 -0500)
committerMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Mon, 10 Dec 2012 16:14:52 +0000 (11:14 -0500)
Add kernel probes for btrfs, compaction, ext4, printk, random, rcu,
regmap, rpm, sunrpc, workqueue, writeback.

Signed-off-by: Andrew Gabbasov <andrew_gabbasov@mentor.com>
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
34 files changed:
instrumentation/events/lttng-module/btrfs.h [new file with mode: 0644]
instrumentation/events/lttng-module/compaction.h [new file with mode: 0644]
instrumentation/events/lttng-module/ext4.h [new file with mode: 0644]
instrumentation/events/lttng-module/printk.h [new file with mode: 0644]
instrumentation/events/lttng-module/random.h [new file with mode: 0644]
instrumentation/events/lttng-module/rcu.h [new file with mode: 0644]
instrumentation/events/lttng-module/regmap.h [new file with mode: 0644]
instrumentation/events/lttng-module/rpm.h [new file with mode: 0644]
instrumentation/events/lttng-module/sunrpc.h [new file with mode: 0644]
instrumentation/events/lttng-module/workqueue.h [new file with mode: 0644]
instrumentation/events/lttng-module/writeback.h [new file with mode: 0644]
instrumentation/events/mainline/btrfs.h [new file with mode: 0644]
instrumentation/events/mainline/compaction.h [new file with mode: 0644]
instrumentation/events/mainline/ext4.h [new file with mode: 0644]
instrumentation/events/mainline/printk.h [new file with mode: 0644]
instrumentation/events/mainline/random.h [new file with mode: 0644]
instrumentation/events/mainline/rcu.h [new file with mode: 0644]
instrumentation/events/mainline/regmap.h [new file with mode: 0644]
instrumentation/events/mainline/rpm.h [new file with mode: 0644]
instrumentation/events/mainline/sunrpc.h [new file with mode: 0644]
instrumentation/events/mainline/workqueue.h [new file with mode: 0644]
instrumentation/events/mainline/writeback.h [new file with mode: 0644]
probes/Makefile
probes/lttng-probe-btrfs.c [new file with mode: 0644]
probes/lttng-probe-compaction.c [new file with mode: 0644]
probes/lttng-probe-ext4.c [new file with mode: 0644]
probes/lttng-probe-printk.c [new file with mode: 0644]
probes/lttng-probe-random.c [new file with mode: 0644]
probes/lttng-probe-rcu.c [new file with mode: 0644]
probes/lttng-probe-regmap.c [new file with mode: 0644]
probes/lttng-probe-rpm.c [new file with mode: 0644]
probes/lttng-probe-sunrpc.c [new file with mode: 0644]
probes/lttng-probe-workqueue.c [new file with mode: 0644]
probes/lttng-probe-writeback.c [new file with mode: 0644]

diff --git a/instrumentation/events/lttng-module/btrfs.h b/instrumentation/events/lttng-module/btrfs.h
new file mode 100644 (file)
index 0000000..b0ff3c6
--- /dev/null
@@ -0,0 +1,1005 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM btrfs
+
+#if !defined(_TRACE_BTRFS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_BTRFS_H
+
+#include <linux/writeback.h>
+#include <linux/tracepoint.h>
+#include <trace/events/gfpflags.h>
+#include <linux/version.h>
+
+#ifndef _TRACE_BTRFS_DEF_
+#define _TRACE_BTRFS_DEF_
+struct btrfs_root;
+struct btrfs_fs_info;
+struct btrfs_inode;
+struct extent_map;
+struct btrfs_ordered_extent;
+struct btrfs_delayed_ref_node;
+struct btrfs_delayed_tree_ref;
+struct btrfs_delayed_data_ref;
+struct btrfs_delayed_ref_head;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
+struct btrfs_block_group_cache;
+struct btrfs_free_cluster;
+#endif
+struct map_lookup;
+struct extent_buffer;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
+struct extent_state;
+#endif
+#endif
+
+#define show_ref_type(type)                                            \
+       __print_symbolic(type,                                          \
+               { BTRFS_TREE_BLOCK_REF_KEY,     "TREE_BLOCK_REF" },     \
+               { BTRFS_EXTENT_DATA_REF_KEY,    "EXTENT_DATA_REF" },    \
+               { BTRFS_EXTENT_REF_V0_KEY,      "EXTENT_REF_V0" },      \
+               { BTRFS_SHARED_BLOCK_REF_KEY,   "SHARED_BLOCK_REF" },   \
+               { BTRFS_SHARED_DATA_REF_KEY,    "SHARED_DATA_REF" })
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0))
+#define __show_root_type(obj)                                          \
+       __print_symbolic_u64(obj,                                       \
+               { BTRFS_ROOT_TREE_OBJECTID,     "ROOT_TREE"     },      \
+               { BTRFS_EXTENT_TREE_OBJECTID,   "EXTENT_TREE"   },      \
+               { BTRFS_CHUNK_TREE_OBJECTID,    "CHUNK_TREE"    },      \
+               { BTRFS_DEV_TREE_OBJECTID,      "DEV_TREE"      },      \
+               { BTRFS_FS_TREE_OBJECTID,       "FS_TREE"       },      \
+               { BTRFS_ROOT_TREE_DIR_OBJECTID, "ROOT_TREE_DIR" },      \
+               { BTRFS_CSUM_TREE_OBJECTID,     "CSUM_TREE"     },      \
+               { BTRFS_TREE_LOG_OBJECTID,      "TREE_LOG"      },      \
+               { BTRFS_TREE_RELOC_OBJECTID,    "TREE_RELOC"    },      \
+               { BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" })
+#else
+#define __show_root_type(obj)                                          \
+       __print_symbolic(obj,                                   \
+               { BTRFS_ROOT_TREE_OBJECTID,     "ROOT_TREE"     },      \
+               { BTRFS_EXTENT_TREE_OBJECTID,   "EXTENT_TREE"   },      \
+               { BTRFS_CHUNK_TREE_OBJECTID,    "CHUNK_TREE"    },      \
+               { BTRFS_DEV_TREE_OBJECTID,      "DEV_TREE"      },      \
+               { BTRFS_FS_TREE_OBJECTID,       "FS_TREE"       },      \
+               { BTRFS_ROOT_TREE_DIR_OBJECTID, "ROOT_TREE_DIR" },      \
+               { BTRFS_CSUM_TREE_OBJECTID,     "CSUM_TREE"     },      \
+               { BTRFS_TREE_LOG_OBJECTID,      "TREE_LOG"      },      \
+               { BTRFS_TREE_RELOC_OBJECTID,    "TREE_RELOC"    },      \
+               { BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" })
+#endif
+
+#define show_root_type(obj)                                            \
+       obj, ((obj >= BTRFS_DATA_RELOC_TREE_OBJECTID) ||                \
+             (obj <= BTRFS_CSUM_TREE_OBJECTID )) ? __show_root_type(obj) : "-"
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
+
+#define BTRFS_GROUP_FLAGS      \
+       { BTRFS_BLOCK_GROUP_DATA,       "DATA"}, \
+       { BTRFS_BLOCK_GROUP_SYSTEM,     "SYSTEM"}, \
+       { BTRFS_BLOCK_GROUP_METADATA,   "METADATA"}, \
+       { BTRFS_BLOCK_GROUP_RAID0,      "RAID0"}, \
+       { BTRFS_BLOCK_GROUP_RAID1,      "RAID1"}, \
+       { BTRFS_BLOCK_GROUP_DUP,        "DUP"}, \
+       { BTRFS_BLOCK_GROUP_RAID10,     "RAID10"}
+
+#define BTRFS_UUID_SIZE 16
+
+#endif
+
+TRACE_EVENT(btrfs_transaction_commit,
+
+       TP_PROTO(struct btrfs_root *root),
+
+       TP_ARGS(root),
+
+       TP_STRUCT__entry(
+               __field(        u64,  generation                )
+               __field(        u64,  root_objectid             )
+       ),
+
+       TP_fast_assign(
+               tp_assign(generation, root->fs_info->generation)
+               tp_assign(root_objectid, root->root_key.objectid)
+       ),
+
+       TP_printk("root = %llu(%s), gen = %llu",
+                 show_root_type(__entry->root_objectid),
+                 (unsigned long long)__entry->generation)
+)
+
+DECLARE_EVENT_CLASS(btrfs__inode,
+
+       TP_PROTO(struct inode *inode),
+
+       TP_ARGS(inode),
+
+       TP_STRUCT__entry(
+               __field(        ino_t,  ino                     )
+               __field(        blkcnt_t,  blocks               )
+               __field(        u64,  disk_i_size               )
+               __field(        u64,  generation                )
+               __field(        u64,  last_trans                )
+               __field(        u64,  logged_trans              )
+               __field(        u64,  root_objectid             )
+       ),
+
+       TP_fast_assign(
+               tp_assign(ino, inode->i_ino)
+               tp_assign(blocks, inode->i_blocks)
+               tp_assign(disk_i_size, BTRFS_I(inode)->disk_i_size)
+               tp_assign(generation, BTRFS_I(inode)->generation)
+               tp_assign(last_trans, BTRFS_I(inode)->last_trans)
+               tp_assign(logged_trans, BTRFS_I(inode)->logged_trans)
+               tp_assign(root_objectid,
+                               BTRFS_I(inode)->root->root_key.objectid)
+       ),
+
+       TP_printk("root = %llu(%s), gen = %llu, ino = %lu, blocks = %llu, "
+                 "disk_i_size = %llu, last_trans = %llu, logged_trans = %llu",
+                 show_root_type(__entry->root_objectid),
+                 (unsigned long long)__entry->generation,
+                 (unsigned long)__entry->ino,
+                 (unsigned long long)__entry->blocks,
+                 (unsigned long long)__entry->disk_i_size,
+                 (unsigned long long)__entry->last_trans,
+                 (unsigned long long)__entry->logged_trans)
+)
+
+DEFINE_EVENT(btrfs__inode, btrfs_inode_new,
+
+       TP_PROTO(struct inode *inode),
+
+       TP_ARGS(inode)
+)
+
+DEFINE_EVENT(btrfs__inode, btrfs_inode_request,
+
+       TP_PROTO(struct inode *inode),
+
+       TP_ARGS(inode)
+)
+
+DEFINE_EVENT(btrfs__inode, btrfs_inode_evict,
+
+       TP_PROTO(struct inode *inode),
+
+       TP_ARGS(inode)
+)
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0))
+#define __show_map_type(type)                                          \
+       __print_symbolic_u64(type,                                      \
+               { EXTENT_MAP_LAST_BYTE, "LAST_BYTE"     },              \
+               { EXTENT_MAP_HOLE,      "HOLE"          },              \
+               { EXTENT_MAP_INLINE,    "INLINE"        },              \
+               { EXTENT_MAP_DELALLOC,  "DELALLOC"      })
+#else
+#define __show_map_type(type)                                          \
+       __print_symbolic(type,                                  \
+               { EXTENT_MAP_LAST_BYTE, "LAST_BYTE"     },              \
+               { EXTENT_MAP_HOLE,      "HOLE"          },              \
+               { EXTENT_MAP_INLINE,    "INLINE"        },              \
+               { EXTENT_MAP_DELALLOC,  "DELALLOC"      })
+#endif
+
+#define show_map_type(type)                    \
+       type, (type >= EXTENT_MAP_LAST_BYTE) ? "-" :  __show_map_type(type)
+
+#define show_map_flags(flag)                                           \
+       __print_flags(flag, "|",                                        \
+               { EXTENT_FLAG_PINNED,           "PINNED"        },      \
+               { EXTENT_FLAG_COMPRESSED,       "COMPRESSED"    },      \
+               { EXTENT_FLAG_VACANCY,          "VACANCY"       },      \
+               { EXTENT_FLAG_PREALLOC,         "PREALLOC"      })
+
+TRACE_EVENT(btrfs_get_extent,
+
+       TP_PROTO(struct btrfs_root *root, struct extent_map *map),
+
+       TP_ARGS(root, map),
+
+       TP_STRUCT__entry(
+               __field(        u64,  root_objectid     )
+               __field(        u64,  start             )
+               __field(        u64,  len               )
+               __field(        u64,  orig_start        )
+               __field(        u64,  block_start       )
+               __field(        u64,  block_len         )
+               __field(        unsigned long,  flags   )
+               __field(        int,  refs              )
+               __field(        unsigned int,  compress_type    )
+       ),
+
+       TP_fast_assign(
+               tp_assign(root_objectid, root->root_key.objectid)
+               tp_assign(start, map->start)
+               tp_assign(len, map->len)
+               tp_assign(orig_start, map->orig_start)
+               tp_assign(block_start, map->block_start)
+               tp_assign(block_len, map->block_len)
+               tp_assign(flags, map->flags)
+               tp_assign(refs, atomic_read(&map->refs))
+               tp_assign(compress_type, map->compress_type)
+       ),
+
+       TP_printk("root = %llu(%s), start = %llu, len = %llu, "
+                 "orig_start = %llu, block_start = %llu(%s), "
+                 "block_len = %llu, flags = %s, refs = %u, "
+                 "compress_type = %u",
+                 show_root_type(__entry->root_objectid),
+                 (unsigned long long)__entry->start,
+                 (unsigned long long)__entry->len,
+                 (unsigned long long)__entry->orig_start,
+                 show_map_type(__entry->block_start),
+                 (unsigned long long)__entry->block_len,
+                 show_map_flags(__entry->flags),
+                 __entry->refs, __entry->compress_type)
+)
+
+#define show_ordered_flags(flags)                                      \
+       __print_symbolic(flags,                                 \
+               { BTRFS_ORDERED_IO_DONE,        "IO_DONE"       },      \
+               { BTRFS_ORDERED_COMPLETE,       "COMPLETE"      },      \
+               { BTRFS_ORDERED_NOCOW,          "NOCOW"         },      \
+               { BTRFS_ORDERED_COMPRESSED,     "COMPRESSED"    },      \
+               { BTRFS_ORDERED_PREALLOC,       "PREALLOC"      },      \
+               { BTRFS_ORDERED_DIRECT,         "DIRECT"        })
+
+DECLARE_EVENT_CLASS(btrfs__ordered_extent,
+
+       TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
+
+       TP_ARGS(inode, ordered),
+
+       TP_STRUCT__entry(
+               __field(        ino_t,  ino             )
+               __field(        u64,  file_offset       )
+               __field(        u64,  start             )
+               __field(        u64,  len               )
+               __field(        u64,  disk_len          )
+               __field(        u64,  bytes_left        )
+               __field(        unsigned long,  flags   )
+               __field(        int,  compress_type     )
+               __field(        int,  refs              )
+               __field(        u64,  root_objectid     )
+       ),
+
+       TP_fast_assign(
+               tp_assign(ino, inode->i_ino)
+               tp_assign(file_offset, ordered->file_offset)
+               tp_assign(start, ordered->start)
+               tp_assign(len, ordered->len)
+               tp_assign(disk_len, ordered->disk_len)
+               tp_assign(bytes_left, ordered->bytes_left)
+               tp_assign(flags, ordered->flags)
+               tp_assign(compress_type, ordered->compress_type)
+               tp_assign(refs, atomic_read(&ordered->refs))
+               tp_assign(root_objectid,
+                               BTRFS_I(inode)->root->root_key.objectid)
+       ),
+
+       TP_printk("root = %llu(%s), ino = %llu, file_offset = %llu, "
+                 "start = %llu, len = %llu, disk_len = %llu, "
+                 "bytes_left = %llu, flags = %s, compress_type = %d, "
+                 "refs = %d",
+                 show_root_type(__entry->root_objectid),
+                 (unsigned long long)__entry->ino,
+                 (unsigned long long)__entry->file_offset,
+                 (unsigned long long)__entry->start,
+                 (unsigned long long)__entry->len,
+                 (unsigned long long)__entry->disk_len,
+                 (unsigned long long)__entry->bytes_left,
+                 show_ordered_flags(__entry->flags),
+                 __entry->compress_type, __entry->refs)
+)
+
+DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_add,
+
+       TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
+
+       TP_ARGS(inode, ordered)
+)
+
+DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_remove,
+
+       TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
+
+       TP_ARGS(inode, ordered)
+)
+
+DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_start,
+
+       TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
+
+       TP_ARGS(inode, ordered)
+)
+
+DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_put,
+
+       TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
+
+       TP_ARGS(inode, ordered)
+)
+
+DECLARE_EVENT_CLASS(btrfs__writepage,
+
+       TP_PROTO(struct page *page, struct inode *inode,
+                struct writeback_control *wbc),
+
+       TP_ARGS(page, inode, wbc),
+
+       TP_STRUCT__entry(
+               __field(        ino_t,  ino                     )
+               __field(        pgoff_t,  index                 )
+               __field(        long,   nr_to_write             )
+               __field(        long,   pages_skipped           )
+               __field(        loff_t, range_start             )
+               __field(        loff_t, range_end               )
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0))
+               __field(        char,   nonblocking             )
+#endif
+               __field(        char,   for_kupdate             )
+               __field(        char,   for_reclaim             )
+               __field(        char,   range_cyclic            )
+               __field(        pgoff_t,  writeback_index       )
+               __field(        u64,    root_objectid           )
+       ),
+
+       TP_fast_assign(
+               tp_assign(ino, inode->i_ino)
+               tp_assign(index, page->index)
+               tp_assign(nr_to_write, wbc->nr_to_write)
+               tp_assign(pages_skipped, wbc->pages_skipped)
+               tp_assign(range_start, wbc->range_start)
+               tp_assign(range_end, wbc->range_end)
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0))
+               tp_assign(nonblocking, wbc->nonblocking)
+#endif
+               tp_assign(for_kupdate, wbc->for_kupdate)
+               tp_assign(for_reclaim, wbc->for_reclaim)
+               tp_assign(range_cyclic, wbc->range_cyclic)
+               tp_assign(writeback_index, inode->i_mapping->writeback_index)
+               tp_assign(root_objectid,
+                                BTRFS_I(inode)->root->root_key.objectid)
+       ),
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0))
+       TP_printk("root = %llu(%s), ino = %lu, page_index = %lu, "
+                 "nr_to_write = %ld, pages_skipped = %ld, range_start = %llu, "
+                 "range_end = %llu, nonblocking = %d, for_kupdate = %d, "
+                 "for_reclaim = %d, range_cyclic = %d, writeback_index = %lu",
+                 show_root_type(__entry->root_objectid),
+                 (unsigned long)__entry->ino, __entry->index,
+                 __entry->nr_to_write, __entry->pages_skipped,
+                 __entry->range_start, __entry->range_end,
+                 __entry->nonblocking, __entry->for_kupdate,
+                 __entry->for_reclaim, __entry->range_cyclic,
+                 (unsigned long)__entry->writeback_index)
+#else
+       TP_printk("root = %llu(%s), ino = %lu, page_index = %lu, "
+                 "nr_to_write = %ld, pages_skipped = %ld, range_start = %llu, "
+                 "range_end = %llu, for_kupdate = %d, "
+                 "for_reclaim = %d, range_cyclic = %d, writeback_index = %lu",
+                 show_root_type(__entry->root_objectid),
+                 (unsigned long)__entry->ino, __entry->index,
+                 __entry->nr_to_write, __entry->pages_skipped,
+                 __entry->range_start, __entry->range_end,
+                 __entry->for_kupdate,
+                 __entry->for_reclaim, __entry->range_cyclic,
+                 (unsigned long)__entry->writeback_index)
+#endif
+)
+
+DEFINE_EVENT(btrfs__writepage, __extent_writepage,
+
+       TP_PROTO(struct page *page, struct inode *inode,
+                struct writeback_control *wbc),
+
+       TP_ARGS(page, inode, wbc)
+)
+
+TRACE_EVENT(btrfs_writepage_end_io_hook,
+
+       TP_PROTO(struct page *page, u64 start, u64 end, int uptodate),
+
+       TP_ARGS(page, start, end, uptodate),
+
+       TP_STRUCT__entry(
+               __field(        ino_t,   ino            )
+               __field(        pgoff_t, index          )
+               __field(        u64,     start          )
+               __field(        u64,     end            )
+               __field(        int,     uptodate       )
+               __field(        u64,    root_objectid   )
+       ),
+
+       TP_fast_assign(
+               tp_assign(ino, page->mapping->host->i_ino)
+               tp_assign(index, page->index)
+               tp_assign(start, start)
+               tp_assign(end, end)
+               tp_assign(uptodate, uptodate)
+               tp_assign(root_objectid,
+                        BTRFS_I(page->mapping->host)->root->root_key.objectid)
+       ),
+
+       TP_printk("root = %llu(%s), ino = %lu, page_index = %lu, start = %llu, "
+                 "end = %llu, uptodate = %d",
+                 show_root_type(__entry->root_objectid),
+                 (unsigned long)__entry->ino, (unsigned long)__entry->index,
+                 (unsigned long long)__entry->start,
+                 (unsigned long long)__entry->end, __entry->uptodate)
+)
+
+TRACE_EVENT(btrfs_sync_file,
+
+       TP_PROTO(struct file *file, int datasync),
+
+       TP_ARGS(file, datasync),
+
+       TP_STRUCT__entry(
+               __field(        ino_t,  ino             )
+               __field(        ino_t,  parent          )
+               __field(        int,    datasync        )
+               __field(        u64,    root_objectid   )
+       ),
+
+       TP_fast_assign(
+               tp_assign(ino, file->f_path.dentry->d_inode->i_ino)
+               tp_assign(parent, file->f_path.dentry->d_parent->d_inode->i_ino)
+               tp_assign(datasync, datasync)
+               tp_assign(root_objectid,
+                       BTRFS_I(file->f_path.dentry->d_inode)->root->root_key.objectid)
+       ),
+
+       TP_printk("root = %llu(%s), ino = %ld, parent = %ld, datasync = %d",
+                 show_root_type(__entry->root_objectid),
+                 (unsigned long)__entry->ino, (unsigned long)__entry->parent,
+                 __entry->datasync)
+)
+
+TRACE_EVENT(btrfs_sync_fs,
+
+       TP_PROTO(int wait),
+
+       TP_ARGS(wait),
+
+       TP_STRUCT__entry(
+               __field(        int,  wait              )
+       ),
+
+       TP_fast_assign(
+               tp_assign(wait, wait)
+       ),
+
+       TP_printk("wait = %d", __entry->wait)
+)
+
+#define show_ref_action(action)                                                \
+       __print_symbolic(action,                                        \
+               { BTRFS_ADD_DELAYED_REF,    "ADD_DELAYED_REF" },        \
+               { BTRFS_DROP_DELAYED_REF,   "DROP_DELAYED_REF" },       \
+               { BTRFS_ADD_DELAYED_EXTENT, "ADD_DELAYED_EXTENT" },     \
+               { BTRFS_UPDATE_DELAYED_HEAD, "UPDATE_DELAYED_HEAD" })
+                       
+
+TRACE_EVENT(btrfs_delayed_tree_ref,
+
+       TP_PROTO(struct btrfs_delayed_ref_node *ref,
+                struct btrfs_delayed_tree_ref *full_ref,
+                int action),
+
+       TP_ARGS(ref, full_ref, action),
+
+       TP_STRUCT__entry(
+               __field(        u64,  bytenr            )
+               __field(        u64,  num_bytes         )
+               __field(        int,  action            ) 
+               __field(        u64,  parent            )
+               __field(        u64,  ref_root          )
+               __field(        int,  level             )
+               __field(        int,  type              )
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+               __field(        u64,  seq               )
+#endif
+       ),
+
+       TP_fast_assign(
+               tp_assign(bytenr, ref->bytenr)
+               tp_assign(num_bytes, ref->num_bytes)
+               tp_assign(action, action)
+               tp_assign(parent, full_ref->parent)
+               tp_assign(ref_root, full_ref->root)
+               tp_assign(level, full_ref->level)
+               tp_assign(type, ref->type)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+               tp_assign(seq, ref->seq)
+#endif
+       ),
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+       TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, "
+                 "parent = %llu(%s), ref_root = %llu(%s), level = %d, "
+                 "type = %s, seq = %llu",
+#else
+       TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, "
+                 "parent = %llu(%s), ref_root = %llu(%s), level = %d, "
+                 "type = %s",
+#endif
+                 (unsigned long long)__entry->bytenr,
+                 (unsigned long long)__entry->num_bytes,
+                 show_ref_action(__entry->action),
+                 show_root_type(__entry->parent),
+                 show_root_type(__entry->ref_root),
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+                 __entry->level, show_ref_type(__entry->type),
+                 (unsigned long long)__entry->seq)
+#else
+                 __entry->level, show_ref_type(__entry->type))
+#endif
+)
+
+TRACE_EVENT(btrfs_delayed_data_ref,
+
+       TP_PROTO(struct btrfs_delayed_ref_node *ref,
+                struct btrfs_delayed_data_ref *full_ref,
+                int action),
+
+       TP_ARGS(ref, full_ref, action),
+
+       TP_STRUCT__entry(
+               __field(        u64,  bytenr            )
+               __field(        u64,  num_bytes         )
+               __field(        int,  action            ) 
+               __field(        u64,  parent            )
+               __field(        u64,  ref_root          )
+               __field(        u64,  owner             )
+               __field(        u64,  offset            )
+               __field(        int,  type              )
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+               __field(        u64,  seq               )
+#endif
+       ),
+
+       TP_fast_assign(
+               tp_assign(bytenr, ref->bytenr)
+               tp_assign(num_bytes, ref->num_bytes)
+               tp_assign(action, action)
+               tp_assign(parent, full_ref->parent)
+               tp_assign(ref_root, full_ref->root)
+               tp_assign(owner, full_ref->objectid)
+               tp_assign(offset, full_ref->offset)
+               tp_assign(type, ref->type)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+               tp_assign(seq, ref->seq)
+#endif
+       ),
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+       TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, "
+                 "parent = %llu(%s), ref_root = %llu(%s), owner = %llu, "
+                 "offset = %llu, type = %s, seq = %llu",
+#else
+       TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, "
+                 "parent = %llu(%s), ref_root = %llu(%s), owner = %llu, "
+                 "offset = %llu, type = %s",
+#endif
+                 (unsigned long long)__entry->bytenr,
+                 (unsigned long long)__entry->num_bytes,
+                 show_ref_action(__entry->action),
+                 show_root_type(__entry->parent),
+                 show_root_type(__entry->ref_root),
+                 (unsigned long long)__entry->owner,
+                 (unsigned long long)__entry->offset,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+                 show_ref_type(__entry->type),
+                 (unsigned long long)__entry->seq)
+#else
+                 show_ref_type(__entry->type))
+#endif
+)
+
+TRACE_EVENT(btrfs_delayed_ref_head,
+
+       TP_PROTO(struct btrfs_delayed_ref_node *ref,
+                struct btrfs_delayed_ref_head *head_ref,
+                int action),
+
+       TP_ARGS(ref, head_ref, action),
+
+       TP_STRUCT__entry(
+               __field(        u64,  bytenr            )
+               __field(        u64,  num_bytes         )
+               __field(        int,  action            ) 
+               __field(        int,  is_data           )
+       ),
+
+       TP_fast_assign(
+               tp_assign(bytenr, ref->bytenr)
+               tp_assign(num_bytes, ref->num_bytes)
+               tp_assign(action, action)
+               tp_assign(is_data, head_ref->is_data)
+       ),
+
+       TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, is_data = %d",
+                 (unsigned long long)__entry->bytenr,
+                 (unsigned long long)__entry->num_bytes,
+                 show_ref_action(__entry->action),
+                 __entry->is_data)
+)
+
+#define show_chunk_type(type)                                  \
+       __print_flags(type, "|",                                \
+               { BTRFS_BLOCK_GROUP_DATA,       "DATA"  },      \
+               { BTRFS_BLOCK_GROUP_SYSTEM,     "SYSTEM"},      \
+               { BTRFS_BLOCK_GROUP_METADATA,   "METADATA"},    \
+               { BTRFS_BLOCK_GROUP_RAID0,      "RAID0" },      \
+               { BTRFS_BLOCK_GROUP_RAID1,      "RAID1" },      \
+               { BTRFS_BLOCK_GROUP_DUP,        "DUP"   },      \
+               { BTRFS_BLOCK_GROUP_RAID10,     "RAID10"})
+
+DECLARE_EVENT_CLASS(btrfs__chunk,
+
+       TP_PROTO(struct btrfs_root *root, struct map_lookup *map,
+                u64 offset, u64 size),
+
+       TP_ARGS(root, map, offset, size),
+
+       TP_STRUCT__entry(
+               __field(        int,  num_stripes               )
+               __field(        u64,  type                      )
+               __field(        int,  sub_stripes               )
+               __field(        u64,  offset                    )
+               __field(        u64,  size                      )
+               __field(        u64,  root_objectid             )
+       ),
+
+       TP_fast_assign(
+               tp_assign(num_stripes, map->num_stripes)
+               tp_assign(type, map->type)
+               tp_assign(sub_stripes, map->sub_stripes)
+               tp_assign(offset, offset)
+               tp_assign(size, size)
+               tp_assign(root_objectid, root->root_key.objectid)
+       ),
+
+       TP_printk("root = %llu(%s), offset = %llu, size = %llu, "
+                 "num_stripes = %d, sub_stripes = %d, type = %s",
+                 show_root_type(__entry->root_objectid),
+                 (unsigned long long)__entry->offset,
+                 (unsigned long long)__entry->size,
+                 __entry->num_stripes, __entry->sub_stripes,
+                 show_chunk_type(__entry->type))
+)
+
+DEFINE_EVENT(btrfs__chunk,  btrfs_chunk_alloc,
+
+       TP_PROTO(struct btrfs_root *root, struct map_lookup *map,
+                u64 offset, u64 size),
+
+       TP_ARGS(root, map, offset, size)
+)
+
+DEFINE_EVENT(btrfs__chunk,  btrfs_chunk_free,
+
+       TP_PROTO(struct btrfs_root *root, struct map_lookup *map,
+                u64 offset, u64 size),
+
+       TP_ARGS(root, map, offset, size)
+)
+
+TRACE_EVENT(btrfs_cow_block,
+
+       TP_PROTO(struct btrfs_root *root, struct extent_buffer *buf,
+                struct extent_buffer *cow),
+
+       TP_ARGS(root, buf, cow),
+
+       TP_STRUCT__entry(
+               __field(        u64,  root_objectid             )
+               __field(        u64,  buf_start                 )
+               __field(        int,  refs                      )
+               __field(        u64,  cow_start                 )
+               __field(        int,  buf_level                 )
+               __field(        int,  cow_level                 )
+       ),
+
+       TP_fast_assign(
+               tp_assign(root_objectid, root->root_key.objectid)
+               tp_assign(buf_start, buf->start)
+               tp_assign(refs, atomic_read(&buf->refs))
+               tp_assign(cow_start, cow->start)
+               tp_assign(buf_level, btrfs_header_level(buf))
+               tp_assign(cow_level, btrfs_header_level(cow))
+       ),
+
+       TP_printk("root = %llu(%s), refs = %d, orig_buf = %llu "
+                 "(orig_level = %d), cow_buf = %llu (cow_level = %d)",
+                 show_root_type(__entry->root_objectid),
+                 __entry->refs,
+                 (unsigned long long)__entry->buf_start,
+                 __entry->buf_level,
+                 (unsigned long long)__entry->cow_start,
+                 __entry->cow_level)
+)
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
+TRACE_EVENT(btrfs_space_reservation,
+
+       TP_PROTO(struct btrfs_fs_info *fs_info, char *type, u64 val,
+                u64 bytes, int reserve),
+
+       TP_ARGS(fs_info, type, val, bytes, reserve),
+
+       TP_STRUCT__entry(
+               __array(        u8,     fsid,   BTRFS_UUID_SIZE )
+               __string(       type,   type                    )
+               __field(        u64,    val                     )
+               __field(        u64,    bytes                   )
+               __field(        int,    reserve                 )
+       ),
+
+       TP_fast_assign(
+               tp_memcpy(fsid, fs_info->fsid, BTRFS_UUID_SIZE)
+               tp_strcpy(type, type)
+               tp_assign(val, val)
+               tp_assign(bytes, bytes)
+               tp_assign(reserve, reserve)
+       ),
+
+       TP_printk("%pU: %s: %Lu %s %Lu", __entry->fsid, __get_str(type),
+                 __entry->val, __entry->reserve ? "reserve" : "release",
+                 __entry->bytes)
+)
+#endif
+
+DECLARE_EVENT_CLASS(btrfs__reserved_extent,
+
+       TP_PROTO(struct btrfs_root *root, u64 start, u64 len),
+
+       TP_ARGS(root, start, len),
+
+       TP_STRUCT__entry(
+               __field(        u64,  root_objectid             )
+               __field(        u64,  start                     )
+               __field(        u64,  len                       )
+       ),
+
+       TP_fast_assign(
+               tp_assign(root_objectid, root->root_key.objectid)
+               tp_assign(start, start)
+               tp_assign(len, len)
+       ),
+
+       TP_printk("root = %llu(%s), start = %llu, len = %llu",
+                 show_root_type(__entry->root_objectid),
+                 (unsigned long long)__entry->start,
+                 (unsigned long long)__entry->len)
+)
+
+DEFINE_EVENT(btrfs__reserved_extent,  btrfs_reserved_extent_alloc,
+
+       TP_PROTO(struct btrfs_root *root, u64 start, u64 len),
+
+       TP_ARGS(root, start, len)
+)
+
+DEFINE_EVENT(btrfs__reserved_extent,  btrfs_reserved_extent_free,
+
+       TP_PROTO(struct btrfs_root *root, u64 start, u64 len),
+
+       TP_ARGS(root, start, len)
+)
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
+TRACE_EVENT(find_free_extent,
+
+       TP_PROTO(struct btrfs_root *root, u64 num_bytes, u64 empty_size,
+                u64 data),
+
+       TP_ARGS(root, num_bytes, empty_size, data),
+
+       TP_STRUCT__entry(
+               __field(        u64,    root_objectid           )
+               __field(        u64,    num_bytes               )
+               __field(        u64,    empty_size              )
+               __field(        u64,    data                    )
+       ),
+
+       TP_fast_assign(
+               tp_assign(root_objectid, root->root_key.objectid)
+               tp_assign(num_bytes, num_bytes)
+               tp_assign(empty_size, empty_size)
+               tp_assign(data, data)
+       ),
+
+       TP_printk("root = %Lu(%s), len = %Lu, empty_size = %Lu, "
+                 "flags = %Lu(%s)", show_root_type(__entry->root_objectid),
+                 __entry->num_bytes, __entry->empty_size, __entry->data,
+                 __print_flags((unsigned long)__entry->data, "|",
+                                BTRFS_GROUP_FLAGS))
+)
+
+DECLARE_EVENT_CLASS(btrfs__reserve_extent,
+
+       TP_PROTO(struct btrfs_root *root,
+                struct btrfs_block_group_cache *block_group, u64 start,
+                u64 len),
+
+       TP_ARGS(root, block_group, start, len),
+
+       TP_STRUCT__entry(
+               __field(        u64,    root_objectid           )
+               __field(        u64,    bg_objectid             )
+               __field(        u64,    flags                   )
+               __field(        u64,    start                   )
+               __field(        u64,    len                     )
+       ),
+
+       TP_fast_assign(
+               tp_assign(root_objectid, root->root_key.objectid)
+               tp_assign(bg_objectid, block_group->key.objectid)
+               tp_assign(flags, block_group->flags)
+               tp_assign(start, start)
+               tp_assign(len, len)
+       ),
+
+       TP_printk("root = %Lu(%s), block_group = %Lu, flags = %Lu(%s), "
+                 "start = %Lu, len = %Lu",
+                 show_root_type(__entry->root_objectid), __entry->bg_objectid,
+                 __entry->flags, __print_flags((unsigned long)__entry->flags,
+                                               "|", BTRFS_GROUP_FLAGS),
+                 __entry->start, __entry->len)
+)
+
+DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent,
+
+       TP_PROTO(struct btrfs_root *root,
+                struct btrfs_block_group_cache *block_group, u64 start,
+                u64 len),
+
+       TP_ARGS(root, block_group, start, len)
+)
+
+DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent_cluster,
+
+       TP_PROTO(struct btrfs_root *root,
+                struct btrfs_block_group_cache *block_group, u64 start,
+                u64 len),
+
+       TP_ARGS(root, block_group, start, len)
+)
+
+TRACE_EVENT(btrfs_find_cluster,
+
+       TP_PROTO(struct btrfs_block_group_cache *block_group, u64 start,
+                u64 bytes, u64 empty_size, u64 min_bytes),
+
+       TP_ARGS(block_group, start, bytes, empty_size, min_bytes),
+
+       TP_STRUCT__entry(
+               __field(        u64,    bg_objectid             )
+               __field(        u64,    flags                   )
+               __field(        u64,    start                   )
+               __field(        u64,    bytes                   )
+               __field(        u64,    empty_size              )
+               __field(        u64,    min_bytes               )
+       ),
+
+       TP_fast_assign(
+               tp_assign(bg_objectid, block_group->key.objectid)
+               tp_assign(flags, block_group->flags)
+               tp_assign(start, start)
+               tp_assign(bytes, bytes)
+               tp_assign(empty_size, empty_size)
+               tp_assign(min_bytes, min_bytes)
+       ),
+
+       TP_printk("block_group = %Lu, flags = %Lu(%s), start = %Lu, len = %Lu,"
+                 " empty_size = %Lu, min_bytes = %Lu", __entry->bg_objectid,
+                 __entry->flags,
+                 __print_flags((unsigned long)__entry->flags, "|",
+                               BTRFS_GROUP_FLAGS), __entry->start,
+                 __entry->bytes, __entry->empty_size,  __entry->min_bytes)
+)
+
+TRACE_EVENT(btrfs_failed_cluster_setup,
+
+       TP_PROTO(struct btrfs_block_group_cache *block_group),
+
+       TP_ARGS(block_group),
+
+       TP_STRUCT__entry(
+               __field(        u64,    bg_objectid             )
+       ),
+
+       TP_fast_assign(
+               tp_assign(bg_objectid, block_group->key.objectid)
+       ),
+
+       TP_printk("block_group = %Lu", __entry->bg_objectid)
+)
+
+TRACE_EVENT(btrfs_setup_cluster,
+
+       TP_PROTO(struct btrfs_block_group_cache *block_group,
+                struct btrfs_free_cluster *cluster, u64 size, int bitmap),
+
+       TP_ARGS(block_group, cluster, size, bitmap),
+
+       TP_STRUCT__entry(
+               __field(        u64,    bg_objectid             )
+               __field(        u64,    flags                   )
+               __field(        u64,    start                   )
+               __field(        u64,    max_size                )
+               __field(        u64,    size                    )
+               __field(        int,    bitmap                  )
+       ),
+
+       TP_fast_assign(
+               tp_assign(bg_objectid, block_group->key.objectid)
+               tp_assign(flags, block_group->flags)
+               tp_assign(start, cluster->window_start)
+               tp_assign(max_size, cluster->max_size)
+               tp_assign(size, size)
+               tp_assign(bitmap, bitmap)
+       ),
+
+       TP_printk("block_group = %Lu, flags = %Lu(%s), window_start = %Lu, "
+                 "size = %Lu, max_size = %Lu, bitmap = %d",
+                 __entry->bg_objectid,
+                 __entry->flags,
+                 __print_flags((unsigned long)__entry->flags, "|",
+                               BTRFS_GROUP_FLAGS), __entry->start,
+                 __entry->size, __entry->max_size, __entry->bitmap)
+)
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
+TRACE_EVENT(alloc_extent_state,
+
+       TP_PROTO(struct extent_state *state, gfp_t mask, unsigned long IP),
+
+       TP_ARGS(state, mask, IP),
+
+       TP_STRUCT__entry(
+               __field(struct extent_state *, state)
+               __field(gfp_t, mask)
+               __field(unsigned long, ip)
+       ),
+
+       TP_fast_assign(
+               tp_assign(state, state)
+               tp_assign(mask, mask)
+               tp_assign(ip, IP)
+       ),
+
+       TP_printk("state=%p; mask = %s; caller = %pF", __entry->state,
+                 show_gfp_flags(__entry->mask), (void *)__entry->ip)
+)
+
+TRACE_EVENT(free_extent_state,
+
+       TP_PROTO(struct extent_state *state, unsigned long IP),
+
+       TP_ARGS(state, IP),
+
+       TP_STRUCT__entry(
+               __field(struct extent_state *, state)
+               __field(unsigned long, ip)
+       ),
+
+       TP_fast_assign(
+               tp_assign(state, state)
+               tp_assign(ip, IP)
+       ),
+
+       TP_printk(" state=%p; caller = %pF", __entry->state,
+                 (void *)__entry->ip)
+)
+#endif
+
+#endif /* _TRACE_BTRFS_H */
+
+/* This part must be outside protection */
+#include "../../../probes/define_trace.h"
diff --git a/instrumentation/events/lttng-module/compaction.h b/instrumentation/events/lttng-module/compaction.h
new file mode 100644 (file)
index 0000000..1b237fa
--- /dev/null
@@ -0,0 +1,74 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM compaction
+
+#if !defined(_TRACE_COMPACTION_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_COMPACTION_H
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <trace/events/gfpflags.h>
+
+DECLARE_EVENT_CLASS(mm_compaction_isolate_template,
+
+       TP_PROTO(unsigned long nr_scanned,
+               unsigned long nr_taken),
+
+       TP_ARGS(nr_scanned, nr_taken),
+
+       TP_STRUCT__entry(
+               __field(unsigned long, nr_scanned)
+               __field(unsigned long, nr_taken)
+       ),
+
+       TP_fast_assign(
+               tp_assign(nr_scanned, nr_scanned)
+               tp_assign(nr_taken, nr_taken)
+       ),
+
+       TP_printk("nr_scanned=%lu nr_taken=%lu",
+               __entry->nr_scanned,
+               __entry->nr_taken)
+)
+
+DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_migratepages,
+
+       TP_PROTO(unsigned long nr_scanned,
+               unsigned long nr_taken),
+
+       TP_ARGS(nr_scanned, nr_taken)
+)
+
+DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_freepages,
+       TP_PROTO(unsigned long nr_scanned,
+               unsigned long nr_taken),
+
+       TP_ARGS(nr_scanned, nr_taken)
+)
+
+TRACE_EVENT(mm_compaction_migratepages,
+
+       TP_PROTO(unsigned long nr_migrated,
+               unsigned long nr_failed),
+
+       TP_ARGS(nr_migrated, nr_failed),
+
+       TP_STRUCT__entry(
+               __field(unsigned long, nr_migrated)
+               __field(unsigned long, nr_failed)
+       ),
+
+       TP_fast_assign(
+               tp_assign(nr_migrated, nr_migrated)
+               tp_assign(nr_failed, nr_failed)
+       ),
+
+       TP_printk("nr_migrated=%lu nr_failed=%lu",
+               __entry->nr_migrated,
+               __entry->nr_failed)
+)
+
+
+#endif /* _TRACE_COMPACTION_H */
+
+/* This part must be outside protection */
+#include "../../../probes/define_trace.h"
diff --git a/instrumentation/events/lttng-module/ext4.h b/instrumentation/events/lttng-module/ext4.h
new file mode 100644 (file)
index 0000000..661a5a9
--- /dev/null
@@ -0,0 +1,2398 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ext4
+
+#if !defined(_TRACE_EXT4_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_EXT4_H
+
+#include <linux/writeback.h>
+#include <linux/tracepoint.h>
+#include <linux/version.h>
+
+#ifndef _TRACE_EXT4_DEF_
+#define _TRACE_EXT4_DEF_
+struct ext4_allocation_context;
+struct ext4_allocation_request;
+struct ext4_prealloc_space;
+struct ext4_inode_info;
+struct mpage_da_data;
+struct ext4_map_blocks;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+struct ext4_extent;
+#endif
+#endif
+
+#define EXT4_I(inode) (container_of(inode, struct ext4_inode_info, vfs_inode))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))
+#define TP_MODE_T      __u16
+#else
+#define TP_MODE_T      umode_t
+#endif
+
+TRACE_EVENT(ext4_free_inode,
+       TP_PROTO(struct inode *inode),
+
+       TP_ARGS(inode),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        uid_t,  uid                     )
+               __field(        gid_t,  gid                     )
+               __field(        __u64, blocks                   )
+               __field(        TP_MODE_T, mode                 )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0))
+               tp_assign(uid, i_uid_read(inode))
+               tp_assign(gid, i_gid_read(inode))
+#else
+               tp_assign(uid, inode->i_uid)
+               tp_assign(gid, inode->i_gid)
+#endif
+               tp_assign(blocks, inode->i_blocks)
+               tp_assign(mode, inode->i_mode)
+       ),
+
+       TP_printk("dev %d,%d ino %lu mode 0%o uid %u gid %u blocks %llu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino, __entry->mode,
+                 __entry->uid, __entry->gid, __entry->blocks)
+)
+
+TRACE_EVENT(ext4_request_inode,
+       TP_PROTO(struct inode *dir, int mode),
+
+       TP_ARGS(dir, mode),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  dir                     )
+               __field(        TP_MODE_T, mode                 )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, dir->i_sb->s_dev)
+               tp_assign(dir, dir->i_ino)
+               tp_assign(mode, mode)
+       ),
+
+       TP_printk("dev %d,%d dir %lu mode 0%o",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->dir, __entry->mode)
+)
+
+TRACE_EVENT(ext4_allocate_inode,
+       TP_PROTO(struct inode *inode, struct inode *dir, int mode),
+
+       TP_ARGS(inode, dir, mode),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        ino_t,  dir                     )
+               __field(        TP_MODE_T, mode                 )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(dir, dir->i_ino)
+               tp_assign(mode, mode)
+       ),
+
+       TP_printk("dev %d,%d ino %lu dir %lu mode 0%o",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned long) __entry->dir, __entry->mode)
+)
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+TRACE_EVENT(ext4_evict_inode,
+       TP_PROTO(struct inode *inode),
+
+       TP_ARGS(inode),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        int,    nlink                   )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(nlink, inode->i_nlink)
+       ),
+
+       TP_printk("dev %d,%d ino %lu nlink %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino, __entry->nlink)
+)
+
+TRACE_EVENT(ext4_drop_inode,
+       TP_PROTO(struct inode *inode, int drop),
+
+       TP_ARGS(inode, drop),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        int,    drop                    )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(drop, drop)
+       ),
+
+       TP_printk("dev %d,%d ino %lu drop %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino, __entry->drop)
+)
+
+TRACE_EVENT(ext4_mark_inode_dirty,
+       TP_PROTO(struct inode *inode, unsigned long IP),
+
+       TP_ARGS(inode, IP),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(unsigned long,  ip                      )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(ip, IP)
+       ),
+
+       TP_printk("dev %d,%d ino %lu caller %pF",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino, (void *)__entry->ip)
+)
+
+TRACE_EVENT(ext4_begin_ordered_truncate,
+       TP_PROTO(struct inode *inode, loff_t new_size),
+
+       TP_ARGS(inode, new_size),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        loff_t, new_size                )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(new_size, new_size)
+       ),
+
+       TP_printk("dev %d,%d ino %lu new_size %lld",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->new_size)
+)
+#endif
+
+DECLARE_EVENT_CLASS(ext4__write_begin,
+
+       TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
+                unsigned int flags),
+
+       TP_ARGS(inode, pos, len, flags),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        loff_t, pos                     )
+               __field(        unsigned int, len               )
+               __field(        unsigned int, flags             )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(pos, pos)
+               tp_assign(len, len)
+               tp_assign(flags, flags)
+       ),
+
+       TP_printk("dev %d,%d ino %lu pos %lld len %u flags %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->pos, __entry->len, __entry->flags)
+)
+
+DEFINE_EVENT(ext4__write_begin, ext4_write_begin,
+
+       TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
+                unsigned int flags),
+
+       TP_ARGS(inode, pos, len, flags)
+)
+
+DEFINE_EVENT(ext4__write_begin, ext4_da_write_begin,
+
+       TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
+                unsigned int flags),
+
+       TP_ARGS(inode, pos, len, flags)
+)
+
+DECLARE_EVENT_CLASS(ext4__write_end,
+       TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
+                       unsigned int copied),
+
+       TP_ARGS(inode, pos, len, copied),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        loff_t, pos                     )
+               __field(        unsigned int, len               )
+               __field(        unsigned int, copied            )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(pos, pos)
+               tp_assign(len, len)
+               tp_assign(copied, copied)
+       ),
+
+       TP_printk("dev %d,%d ino %lu pos %lld len %u copied %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->pos, __entry->len, __entry->copied)
+)
+
+DEFINE_EVENT(ext4__write_end, ext4_ordered_write_end,
+
+       TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
+                unsigned int copied),
+
+       TP_ARGS(inode, pos, len, copied)
+)
+
+DEFINE_EVENT(ext4__write_end, ext4_writeback_write_end,
+
+       TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
+                unsigned int copied),
+
+       TP_ARGS(inode, pos, len, copied)
+)
+
+DEFINE_EVENT(ext4__write_end, ext4_journalled_write_end,
+
+       TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
+                unsigned int copied),
+
+       TP_ARGS(inode, pos, len, copied)
+)
+
+DEFINE_EVENT(ext4__write_end, ext4_da_write_end,
+
+       TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
+                unsigned int copied),
+
+       TP_ARGS(inode, pos, len, copied)
+)
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0))
+TRACE_EVENT(ext4_writepage,
+       TP_PROTO(struct inode *inode, struct page *page),
+
+       TP_ARGS(inode, page),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        pgoff_t, index                  )
+
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(index, page->index)
+       ),
+
+       TP_printk("dev %d,%d ino %lu page_index %lu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino, __entry->index)
+)
+#endif
+
+TRACE_EVENT(ext4_da_writepages,
+       TP_PROTO(struct inode *inode, struct writeback_control *wbc),
+
+       TP_ARGS(inode, wbc),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        long,   nr_to_write             )
+               __field(        long,   pages_skipped           )
+               __field(        loff_t, range_start             )
+               __field(        loff_t, range_end               )
+               __field(       pgoff_t, writeback_index         )
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
+               __field(        int,    sync_mode               )
+#endif
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37))
+               __field(        char,   nonblocking             )
+#endif
+               __field(        char,   for_kupdate             )
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39))
+               __field(        char,   for_reclaim             )
+#endif
+               __field(        char,   range_cyclic            )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(nr_to_write, wbc->nr_to_write)
+               tp_assign(pages_skipped, wbc->pages_skipped)
+               tp_assign(range_start, wbc->range_start)
+               tp_assign(range_end, wbc->range_end)
+               tp_assign(writeback_index, inode->i_mapping->writeback_index)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
+               tp_assign(sync_mode, wbc->sync_mode)
+#endif
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37))
+               tp_assign(nonblocking, wbc->nonblocking)
+#endif
+               tp_assign(for_kupdate, wbc->for_kupdate)
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39))
+               tp_assign(for_reclaim, wbc->for_reclaim)
+#endif
+               tp_assign(range_cyclic, wbc->range_cyclic)
+       ),
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
+       TP_printk("dev %d,%d ino %lu nr_to_write %ld pages_skipped %ld "
+                 "range_start %lld range_end %lld sync_mode %d "
+                 "for_kupdate %d range_cyclic %d writeback_index %lu",
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+       TP_printk("dev %d,%d ino %lu nr_to_write %ld pages_skipped %ld "
+                 "range_start %llu range_end %llu "
+                 "for_kupdate %d for_reclaim %d "
+                 "range_cyclic %d writeback_index %lu",
+#else
+       TP_printk("dev %d,%d ino %lu nr_to_write %ld pages_skipped %ld "
+                 "range_start %llu range_end %llu "
+                 "nonblocking %d for_kupdate %d for_reclaim %d "
+                 "range_cyclic %d writeback_index %lu",
+#endif
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino, __entry->nr_to_write,
+                 __entry->pages_skipped, __entry->range_start,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
+                 __entry->range_end, __entry->sync_mode,
+                 __entry->for_kupdate, __entry->range_cyclic,
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+                 __entry->range_end,
+                 __entry->for_kupdate, __entry->for_reclaim,
+                 __entry->range_cyclic,
+#else
+                 __entry->range_end, __entry->nonblocking,
+                 __entry->for_kupdate, __entry->for_reclaim,
+                 __entry->range_cyclic,
+#endif
+                 (unsigned long) __entry->writeback_index)
+)
+
+TRACE_EVENT(ext4_da_write_pages,
+       TP_PROTO(struct inode *inode, struct mpage_da_data *mpd),
+
+       TP_ARGS(inode, mpd),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        __u64,  b_blocknr               )
+               __field(        __u32,  b_size                  )
+               __field(        __u32,  b_state                 )
+               __field(        unsigned long,  first_page      )
+               __field(        int,    io_done                 )
+               __field(        int,    pages_written           )
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
+               __field(        int,    sync_mode               )
+#endif
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(b_blocknr, mpd->b_blocknr)
+               tp_assign(b_size, mpd->b_size)
+               tp_assign(b_state, mpd->b_state)
+               tp_assign(first_page, mpd->first_page)
+               tp_assign(io_done, mpd->io_done)
+               tp_assign(pages_written, mpd->pages_written)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
+               tp_assign(sync_mode, mpd->wbc->sync_mode)
+#endif
+       ),
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
+       TP_printk("dev %d,%d ino %lu b_blocknr %llu b_size %u b_state 0x%04x "
+                 "first_page %lu io_done %d pages_written %d sync_mode %d",
+#else
+       TP_printk("dev %d,%d ino %lu b_blocknr %llu b_size %u b_state 0x%04x "
+                 "first_page %lu io_done %d pages_written %d",
+#endif
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->b_blocknr, __entry->b_size,
+                 __entry->b_state, __entry->first_page,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
+                 __entry->io_done, __entry->pages_written,
+                 __entry->sync_mode
+#else
+                 __entry->io_done, __entry->pages_written
+#endif
+                  )
+)
+
+TRACE_EVENT(ext4_da_writepages_result,
+       TP_PROTO(struct inode *inode, struct writeback_control *wbc,
+                       int ret, int pages_written),
+
+       TP_ARGS(inode, wbc, ret, pages_written),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        int,    ret                     )
+               __field(        int,    pages_written           )
+               __field(        long,   pages_skipped           )
+               __field(       pgoff_t, writeback_index         )
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
+               __field(        int,    sync_mode               )
+#endif
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33))
+               __field(        char,   encountered_congestion  )
+#endif
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0))
+               __field(        char,   more_io                 )
+#endif
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
+               __field(        char,   no_nrwrite_index_update )
+#endif
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(ret, ret)
+               tp_assign(pages_written, pages_written)
+               tp_assign(pages_skipped, wbc->pages_skipped)
+               tp_assign(writeback_index, inode->i_mapping->writeback_index)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
+               tp_assign(sync_mode, wbc->sync_mode)
+#endif
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33))
+               tp_assign(encountered_congestion, wbc->encountered_congestion)
+#endif
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0))
+               tp_assign(more_io, wbc->more_io)
+#endif
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
+               tp_assign(no_nrwrite_index_update, wbc->no_nrwrite_index_update)
+#endif
+       ),
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))
+       TP_printk("dev %d,%d ino %lu ret %d pages_written %d pages_skipped %ld "
+                 "sync_mode %d writeback_index %lu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino, __entry->ret,
+                 __entry->pages_written, __entry->pages_skipped,
+                 __entry->sync_mode,
+                 (unsigned long) __entry->writeback_index)
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
+       TP_printk("dev %d,%d ino %lu ret %d pages_written %d pages_skipped %ld "
+                 " more_io %d sync_mode %d writeback_index %lu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino, __entry->ret,
+                 __entry->pages_written, __entry->pages_skipped,
+                 __entry->more_io, __entry->sync_mode,
+                 (unsigned long) __entry->writeback_index)
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+       TP_printk("dev %d,%d ino %lu ret %d pages_written %d pages_skipped %ld "
+                 " more_io %d writeback_index %lu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino, __entry->ret,
+                 __entry->pages_written, __entry->pages_skipped,
+                 __entry->more_io,
+                 (unsigned long) __entry->writeback_index)
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
+       TP_printk("dev %d,%d ino %lu ret %d pages_written %d pages_skipped %ld "
+                 " more_io %d no_nrwrite_index_update %d writeback_index %lu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino, __entry->ret,
+                 __entry->pages_written, __entry->pages_skipped,
+                 __entry->more_io, __entry->no_nrwrite_index_update,
+                 (unsigned long) __entry->writeback_index)
+#else
+       TP_printk("dev %d,%d ino %lu ret %d pages_written %d pages_skipped %ld "
+                 " congestion %d"
+                 " more_io %d no_nrwrite_index_update %d writeback_index %lu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino, __entry->ret,
+                 __entry->pages_written, __entry->pages_skipped,
+                 __entry->encountered_congestion,
+                 __entry->more_io, __entry->no_nrwrite_index_update,
+                 (unsigned long) __entry->writeback_index)
+#endif
+)
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
+DECLARE_EVENT_CLASS(ext4__page_op,
+       TP_PROTO(struct page *page),
+
+       TP_ARGS(page),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        pgoff_t, index                  )
+
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, page->mapping->host->i_sb->s_dev)
+               tp_assign(ino, page->mapping->host->i_ino)
+               tp_assign(index, page->index)
+       ),
+
+       TP_printk("dev %d,%d ino %lu page_index %lu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned long) __entry->index)
+)
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0))
+DEFINE_EVENT(ext4__page_op, ext4_writepage,
+
+       TP_PROTO(struct page *page),
+
+       TP_ARGS(page)
+)
+#endif
+
+DEFINE_EVENT(ext4__page_op, ext4_readpage,
+
+       TP_PROTO(struct page *page),
+
+       TP_ARGS(page)
+)
+
+DEFINE_EVENT(ext4__page_op, ext4_releasepage,
+
+       TP_PROTO(struct page *page),
+
+       TP_ARGS(page)
+)
+
+TRACE_EVENT(ext4_invalidatepage,
+       TP_PROTO(struct page *page, unsigned long offset),
+
+       TP_ARGS(page, offset),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        pgoff_t, index                  )
+               __field(        unsigned long, offset           )
+
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, page->mapping->host->i_sb->s_dev)
+               tp_assign(ino, page->mapping->host->i_ino)
+               tp_assign(index, page->index)
+               tp_assign(offset, offset)
+       ),
+
+       TP_printk("dev %d,%d ino %lu page_index %lu offset %lu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned long) __entry->index, __entry->offset)
+)
+#endif
+
+TRACE_EVENT(ext4_discard_blocks,
+       TP_PROTO(struct super_block *sb, unsigned long long blk,
+                       unsigned long long count),
+
+       TP_ARGS(sb, blk, count),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        __u64,  blk                     )
+               __field(        __u64,  count                   )
+
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, sb->s_dev)
+               tp_assign(blk, blk)
+               tp_assign(count, count)
+       ),
+
+       TP_printk("dev %d,%d blk %llu count %llu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 __entry->blk, __entry->count)
+)
+
+DECLARE_EVENT_CLASS(ext4__mb_new_pa,
+       TP_PROTO(struct ext4_allocation_context *ac,
+                struct ext4_prealloc_space *pa),
+
+       TP_ARGS(ac, pa),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        __u64,  pa_pstart               )
+               __field(        __u64,  pa_lstart               )
+               __field(        __u32,  pa_len                  )
+
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, ac->ac_sb->s_dev)
+               tp_assign(ino, ac->ac_inode->i_ino)
+               tp_assign(pa_pstart, pa->pa_pstart)
+               tp_assign(pa_lstart, pa->pa_lstart)
+               tp_assign(pa_len, pa->pa_len)
+       ),
+
+       TP_printk("dev %d,%d ino %lu pstart %llu len %u lstart %llu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->pa_pstart, __entry->pa_len, __entry->pa_lstart)
+)
+
+DEFINE_EVENT(ext4__mb_new_pa, ext4_mb_new_inode_pa,
+
+       TP_PROTO(struct ext4_allocation_context *ac,
+                struct ext4_prealloc_space *pa),
+
+       TP_ARGS(ac, pa)
+)
+
+DEFINE_EVENT(ext4__mb_new_pa, ext4_mb_new_group_pa,
+
+       TP_PROTO(struct ext4_allocation_context *ac,
+                struct ext4_prealloc_space *pa),
+
+       TP_ARGS(ac, pa)
+)
+
+TRACE_EVENT(ext4_mb_release_inode_pa,
+       TP_PROTO(
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+                struct super_block *sb,
+                struct inode *inode,
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
+                struct super_block *sb,
+                struct ext4_allocation_context *ac,
+#else
+                struct ext4_allocation_context *ac,
+#endif
+#endif
+                struct ext4_prealloc_space *pa,
+                unsigned long long block, unsigned int count),
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0))
+       TP_ARGS(pa, block, count),
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+       TP_ARGS(sb, inode, pa, block, count),
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
+       TP_ARGS(sb, ac, pa, block, count),
+#else
+       TP_ARGS(ac, pa, block, count),
+#endif
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        __u64,  block                   )
+               __field(        __u32,  count                   )
+
+       ),
+
+       TP_fast_assign(
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0))
+               tp_assign(dev, pa->pa_inode->i_sb->s_dev)
+               tp_assign(ino, pa->pa_inode->i_ino)
+#else
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
+               tp_assign(dev, sb->s_dev)
+#else
+               tp_assign(dev, ac->ac_sb->s_dev)
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+               tp_assign(ino, inode->i_ino)
+#else
+               tp_assign(ino, (ac && ac->ac_inode) ? ac->ac_inode->i_ino : 0)
+#endif
+#endif
+               tp_assign(block, block)
+               tp_assign(count, count)
+       ),
+
+       TP_printk("dev %d,%d ino %lu block %llu count %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->block, __entry->count)
+)
+
+TRACE_EVENT(ext4_mb_release_group_pa,
+
+#if (LTTNG_KERNEL_RANGE(3,0,0, 3,3,0))
+       TP_PROTO(struct ext4_prealloc_space *pa),
+
+       TP_ARGS(pa),
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+       TP_PROTO(struct super_block *sb, struct ext4_prealloc_space *pa),
+
+       TP_ARGS(sb, pa),
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
+       TP_PROTO(struct super_block *sb,
+                struct ext4_allocation_context *ac,
+                struct ext4_prealloc_space *pa),
+
+       TP_ARGS(sb, ac, pa),
+#else
+       TP_PROTO(struct ext4_allocation_context *ac,
+                struct ext4_prealloc_space *pa),
+
+       TP_ARGS(ac, pa),
+#endif
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37))
+               __field(        ino_t,  ino                     )
+#endif
+               __field(        __u64,  pa_pstart               )
+               __field(        __u32,  pa_len                  )
+
+       ),
+
+       TP_fast_assign(
+#if (LTTNG_KERNEL_RANGE(3,0,0, 3,3,0))
+               tp_assign(dev, pa->pa_inode->i_sb->s_dev)
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
+               tp_assign(dev, sb->s_dev)
+#else
+               tp_assign(dev, ac->ac_sb->s_dev)
+#endif
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37))
+               tp_assign(ino, (ac && ac->ac_inode) ? ac->ac_inode->i_ino : 0)
+#endif
+               tp_assign(pa_pstart, pa->pa_pstart)
+               tp_assign(pa_len, pa->pa_len)
+       ),
+
+       TP_printk("dev %d,%d pstart %llu len %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 __entry->pa_pstart, __entry->pa_len)
+)
+
+TRACE_EVENT(ext4_discard_preallocations,
+       TP_PROTO(struct inode *inode),
+
+       TP_ARGS(inode),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+       ),
+
+       TP_printk("dev %d,%d ino %lu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino)
+)
+
+TRACE_EVENT(ext4_mb_discard_preallocations,
+       TP_PROTO(struct super_block *sb, int needed),
+
+       TP_ARGS(sb, needed),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        int,    needed                  )
+
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, sb->s_dev)
+               tp_assign(needed, needed)
+       ),
+
+       TP_printk("dev %d,%d needed %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 __entry->needed)
+)
+
+TRACE_EVENT(ext4_request_blocks,
+       TP_PROTO(struct ext4_allocation_request *ar),
+
+       TP_ARGS(ar),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        unsigned int, len               )
+               __field(        __u32,  logical                 )
+               __field(        __u32,  lleft                   )
+               __field(        __u32,  lright                  )
+               __field(        __u64,  goal                    )
+               __field(        __u64,  pleft                   )
+               __field(        __u64,  pright                  )
+               __field(        unsigned int, flags             )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, ar->inode->i_sb->s_dev)
+               tp_assign(ino, ar->inode->i_ino)
+               tp_assign(len, ar->len)
+               tp_assign(logical, ar->logical)
+               tp_assign(goal, ar->goal)
+               tp_assign(lleft, ar->lleft)
+               tp_assign(lright, ar->lright)
+               tp_assign(pleft, ar->pleft)
+               tp_assign(pright, ar->pright)
+               tp_assign(flags, ar->flags)
+       ),
+
+       TP_printk("dev %d,%d ino %lu flags %u len %u lblk %u goal %llu "
+                 "lleft %u lright %u pleft %llu pright %llu ",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino, __entry->flags,
+                 __entry->len, __entry->logical, __entry->goal,
+                 __entry->lleft, __entry->lright, __entry->pleft,
+                 __entry->pright)
+)
+
+TRACE_EVENT(ext4_allocate_blocks,
+       TP_PROTO(struct ext4_allocation_request *ar, unsigned long long block),
+
+       TP_ARGS(ar, block),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        __u64,  block                   )
+               __field(        unsigned int, len               )
+               __field(        __u32,  logical                 )
+               __field(        __u32,  lleft                   )
+               __field(        __u32,  lright                  )
+               __field(        __u64,  goal                    )
+               __field(        __u64,  pleft                   )
+               __field(        __u64,  pright                  )
+               __field(        unsigned int, flags             )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, ar->inode->i_sb->s_dev)
+               tp_assign(ino, ar->inode->i_ino)
+               tp_assign(block, block)
+               tp_assign(len, ar->len)
+               tp_assign(logical, ar->logical)
+               tp_assign(goal, ar->goal)
+               tp_assign(lleft, ar->lleft)
+               tp_assign(lright, ar->lright)
+               tp_assign(pleft, ar->pleft)
+               tp_assign(pright, ar->pright)
+               tp_assign(flags, ar->flags)
+       ),
+
+       TP_printk("dev %d,%d ino %lu flags %u len %u block %llu lblk %u "
+                 "goal %llu lleft %u lright %u pleft %llu pright %llu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino, __entry->flags,
+                 __entry->len, __entry->block, __entry->logical,
+                 __entry->goal,  __entry->lleft, __entry->lright,
+                 __entry->pleft, __entry->pright)
+)
+
+TRACE_EVENT(ext4_free_blocks,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
+       TP_PROTO(struct inode *inode, __u64 block, unsigned long count,
+                int flags),
+
+       TP_ARGS(inode, block, count, flags),
+#else
+       TP_PROTO(struct inode *inode, __u64 block, unsigned long count,
+                int metadata),
+
+       TP_ARGS(inode, block, count, metadata),
+#endif
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        __u64,  block                   )
+               __field(        unsigned long,  count           )
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
+               __field(        int,    flags                   )
+               __field(        TP_MODE_T, mode                 )
+#else
+               __field(        int,    metadata                )
+#endif
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(block, block)
+               tp_assign(count, count)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
+               tp_assign(flags, flags)
+               tp_assign(mode, inode->i_mode)
+#else
+               tp_assign(metadata, metadata)
+#endif
+       ),
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
+       TP_printk("dev %d,%d ino %lu mode 0%o block %llu count %lu flags %d",
+#else
+       TP_printk("dev %d,%d ino %lu block %llu count %lu metadata %d",
+#endif
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
+                 __entry->mode, __entry->block, __entry->count,
+                 __entry->flags)
+#else
+                 __entry->block, __entry->count, __entry->metadata)
+#endif
+)
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
+TRACE_EVENT(ext4_sync_file_enter,
+#else
+TRACE_EVENT(ext4_sync_file,
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+       TP_PROTO(struct file *file, int datasync),
+
+       TP_ARGS(file, datasync),
+#else
+       TP_PROTO(struct file *file, struct dentry *dentry, int datasync),
+
+       TP_ARGS(file, dentry, datasync),
+#endif
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        ino_t,  parent                  )
+               __field(        int,    datasync                )
+       ),
+
+       TP_fast_assign(
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+               tp_assign(dev, file->f_path.dentry->d_inode->i_sb->s_dev)
+               tp_assign(ino, file->f_path.dentry->d_inode->i_ino)
+               tp_assign(datasync, datasync)
+               tp_assign(parent, file->f_path.dentry->d_parent->d_inode->i_ino)
+#else
+               tp_assign(dev, dentry->d_inode->i_sb->s_dev)
+               tp_assign(ino, dentry->d_inode->i_ino)
+               tp_assign(datasync, datasync)
+               tp_assign(parent, dentry->d_parent->d_inode->i_ino)
+#endif
+       ),
+
+       TP_printk("dev %d,%d ino %lu parent %lu datasync %d ",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned long) __entry->parent, __entry->datasync)
+)
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
+TRACE_EVENT(ext4_sync_file_exit,
+       TP_PROTO(struct inode *inode, int ret),
+
+       TP_ARGS(inode, ret),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        int,    ret                     )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(ret, ret)
+       ),
+
+       TP_printk("dev %d,%d ino %lu ret %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->ret)
+)
+#endif
+
+TRACE_EVENT(ext4_sync_fs,
+       TP_PROTO(struct super_block *sb, int wait),
+
+       TP_ARGS(sb, wait),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        int,    wait                    )
+
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, sb->s_dev)
+               tp_assign(wait, wait)
+       ),
+
+       TP_printk("dev %d,%d wait %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 __entry->wait)
+)
+
+TRACE_EVENT(ext4_alloc_da_blocks,
+       TP_PROTO(struct inode *inode),
+
+       TP_ARGS(inode),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field( unsigned int,  data_blocks     )
+               __field( unsigned int,  meta_blocks     )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(data_blocks, EXT4_I(inode)->i_reserved_data_blocks)
+               tp_assign(meta_blocks, EXT4_I(inode)->i_reserved_meta_blocks)
+       ),
+
+       TP_printk("dev %d,%d ino %lu data_blocks %u meta_blocks %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->data_blocks, __entry->meta_blocks)
+)
+
+TRACE_EVENT(ext4_mballoc_alloc,
+       TP_PROTO(struct ext4_allocation_context *ac),
+
+       TP_ARGS(ac),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        __u32,  orig_logical            )
+               __field(          int,  orig_start              )
+               __field(        __u32,  orig_group              )
+               __field(          int,  orig_len                )
+               __field(        __u32,  goal_logical            )
+               __field(          int,  goal_start              )
+               __field(        __u32,  goal_group              )
+               __field(          int,  goal_len                )
+               __field(        __u32,  result_logical          )
+               __field(          int,  result_start            )
+               __field(        __u32,  result_group            )
+               __field(          int,  result_len              )
+               __field(        __u16,  found                   )
+               __field(        __u16,  groups                  )
+               __field(        __u16,  buddy                   )
+               __field(        __u16,  flags                   )
+               __field(        __u16,  tail                    )
+               __field(        __u8,   cr                      )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, ac->ac_inode->i_sb->s_dev)
+               tp_assign(ino, ac->ac_inode->i_ino)
+               tp_assign(orig_logical, ac->ac_o_ex.fe_logical)
+               tp_assign(orig_start, ac->ac_o_ex.fe_start)
+               tp_assign(orig_group, ac->ac_o_ex.fe_group)
+               tp_assign(orig_len, ac->ac_o_ex.fe_len)
+               tp_assign(goal_logical, ac->ac_g_ex.fe_logical)
+               tp_assign(goal_start, ac->ac_g_ex.fe_start)
+               tp_assign(goal_group, ac->ac_g_ex.fe_group)
+               tp_assign(goal_len, ac->ac_g_ex.fe_len)
+               tp_assign(result_logical, ac->ac_f_ex.fe_logical)
+               tp_assign(result_start, ac->ac_f_ex.fe_start)
+               tp_assign(result_group, ac->ac_f_ex.fe_group)
+               tp_assign(result_len, ac->ac_f_ex.fe_len)
+               tp_assign(found, ac->ac_found)
+               tp_assign(flags, ac->ac_flags)
+               tp_assign(groups, ac->ac_groups_scanned)
+               tp_assign(buddy, ac->ac_buddy)
+               tp_assign(tail, ac->ac_tail)
+               tp_assign(cr, ac->ac_criteria)
+       ),
+
+       TP_printk("dev %d,%d inode %lu orig %u/%d/%u@%u goal %u/%d/%u@%u "
+                 "result %u/%d/%u@%u blks %u grps %u cr %u flags 0x%04x "
+                 "tail %u broken %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->orig_group, __entry->orig_start,
+                 __entry->orig_len, __entry->orig_logical,
+                 __entry->goal_group, __entry->goal_start,
+                 __entry->goal_len, __entry->goal_logical,
+                 __entry->result_group, __entry->result_start,
+                 __entry->result_len, __entry->result_logical,
+                 __entry->found, __entry->groups, __entry->cr,
+                 __entry->flags, __entry->tail,
+                 __entry->buddy ? 1 << __entry->buddy : 0)
+)
+
+TRACE_EVENT(ext4_mballoc_prealloc,
+       TP_PROTO(struct ext4_allocation_context *ac),
+
+       TP_ARGS(ac),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        __u32,  orig_logical            )
+               __field(          int,  orig_start              )
+               __field(        __u32,  orig_group              )
+               __field(          int,  orig_len                )
+               __field(        __u32,  result_logical          )
+               __field(          int,  result_start            )
+               __field(        __u32,  result_group            )
+               __field(          int,  result_len              )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, ac->ac_inode->i_sb->s_dev)
+               tp_assign(ino, ac->ac_inode->i_ino)
+               tp_assign(orig_logical, ac->ac_o_ex.fe_logical)
+               tp_assign(orig_start, ac->ac_o_ex.fe_start)
+               tp_assign(orig_group, ac->ac_o_ex.fe_group)
+               tp_assign(orig_len, ac->ac_o_ex.fe_len)
+               tp_assign(result_logical, ac->ac_b_ex.fe_logical)
+               tp_assign(result_start, ac->ac_b_ex.fe_start)
+               tp_assign(result_group, ac->ac_b_ex.fe_group)
+               tp_assign(result_len, ac->ac_b_ex.fe_len)
+       ),
+
+       TP_printk("dev %d,%d inode %lu orig %u/%d/%u@%u result %u/%d/%u@%u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->orig_group, __entry->orig_start,
+                 __entry->orig_len, __entry->orig_logical,
+                 __entry->result_group, __entry->result_start,
+                 __entry->result_len, __entry->result_logical)
+)
+
+DECLARE_EVENT_CLASS(ext4__mballoc,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+       TP_PROTO(struct super_block *sb,
+                struct inode *inode,
+                ext4_group_t group,
+                ext4_grpblk_t start,
+                ext4_grpblk_t len),
+
+       TP_ARGS(sb, inode, group, start, len),
+#else
+       TP_PROTO(struct ext4_allocation_context *ac),
+
+       TP_ARGS(ac),
+#endif
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37))
+               __field(        __u32,  result_logical          )
+#endif
+               __field(          int,  result_start            )
+               __field(        __u32,  result_group            )
+               __field(          int,  result_len              )
+       ),
+
+       TP_fast_assign(
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+               tp_assign(dev, sb->s_dev)
+               tp_assign(ino, inode ? inode->i_ino : 0)
+               tp_assign(result_start, start)
+               tp_assign(result_group, group)
+               tp_assign(result_len, len)
+#else
+               tp_assign(dev, ac->ac_sb->s_dev)
+               tp_assign(ino, ac->ac_inode ? ac->ac_inode->i_ino : 0)
+               tp_assign(result_logical, ac->ac_b_ex.fe_logical)
+               tp_assign(result_start, ac->ac_b_ex.fe_start)
+               tp_assign(result_group, ac->ac_b_ex.fe_group)
+               tp_assign(result_len, ac->ac_b_ex.fe_len)
+#endif
+       ),
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+       TP_printk("dev %d,%d inode %lu extent %u/%d/%d ",
+#else
+       TP_printk("dev %d,%d inode %lu extent %u/%d/%u@%u ",
+#endif
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->result_group, __entry->result_start,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+                 __entry->result_len
+#else
+                 __entry->result_len, __entry->result_logical
+#endif
+       )
+)
+
+DEFINE_EVENT(ext4__mballoc, ext4_mballoc_discard,
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+       TP_PROTO(struct super_block *sb,
+                struct inode *inode,
+                ext4_group_t group,
+                ext4_grpblk_t start,
+                ext4_grpblk_t len),
+
+       TP_ARGS(sb, inode, group, start, len)
+#else
+       TP_PROTO(struct ext4_allocation_context *ac),
+
+       TP_ARGS(ac)
+#endif
+)
+
+DEFINE_EVENT(ext4__mballoc, ext4_mballoc_free,
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+       TP_PROTO(struct super_block *sb,
+                struct inode *inode,
+                ext4_group_t group,
+                ext4_grpblk_t start,
+                ext4_grpblk_t len),
+
+       TP_ARGS(sb, inode, group, start, len)
+#else
+       TP_PROTO(struct ext4_allocation_context *ac),
+
+       TP_ARGS(ac)
+#endif
+)
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
+TRACE_EVENT(ext4_forget,
+       TP_PROTO(struct inode *inode, int is_metadata, __u64 block),
+
+       TP_ARGS(inode, is_metadata, block),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        __u64,  block                   )
+               __field(        int,    is_metadata             )
+               __field(        TP_MODE_T, mode                 )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(block, block)
+               tp_assign(is_metadata, is_metadata)
+               tp_assign(mode, inode->i_mode)
+       ),
+
+       TP_printk("dev %d,%d ino %lu mode 0%o is_metadata %d block %llu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->mode, __entry->is_metadata, __entry->block)
+)
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34))
+TRACE_EVENT(ext4_da_update_reserve_space,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+       TP_PROTO(struct inode *inode, int used_blocks, int quota_claim),
+
+       TP_ARGS(inode, used_blocks, quota_claim),
+#else
+       TP_PROTO(struct inode *inode, int used_blocks),
+
+       TP_ARGS(inode, used_blocks),
+#endif
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        __u64,  i_blocks                )
+               __field(        int,    used_blocks             )
+               __field(        int,    reserved_data_blocks    )
+               __field(        int,    reserved_meta_blocks    )
+               __field(        int,    allocated_meta_blocks   )
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+               __field(        int,    quota_claim             )
+#endif
+               __field(        TP_MODE_T, mode                 )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(i_blocks, inode->i_blocks)
+               tp_assign(used_blocks, used_blocks)
+               tp_assign(reserved_data_blocks,
+                               EXT4_I(inode)->i_reserved_data_blocks)
+               tp_assign(reserved_meta_blocks,
+                               EXT4_I(inode)->i_reserved_meta_blocks)
+               tp_assign(allocated_meta_blocks,
+                               EXT4_I(inode)->i_allocated_meta_blocks)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+               tp_assign(quota_claim, quota_claim)
+#endif
+               tp_assign(mode, inode->i_mode)
+       ),
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+       TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu used_blocks %d "
+                 "reserved_data_blocks %d reserved_meta_blocks %d "
+                 "allocated_meta_blocks %d quota_claim %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->mode, __entry->i_blocks,
+                 __entry->used_blocks, __entry->reserved_data_blocks,
+                 __entry->reserved_meta_blocks, __entry->allocated_meta_blocks,
+                 __entry->quota_claim)
+#else
+       TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu used_blocks %d "
+                 "reserved_data_blocks %d reserved_meta_blocks %d "
+                 "allocated_meta_blocks %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->mode, __entry->i_blocks,
+                 __entry->used_blocks, __entry->reserved_data_blocks,
+                 __entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
+#endif
+)
+
+TRACE_EVENT(ext4_da_reserve_space,
+       TP_PROTO(struct inode *inode, int md_needed),
+
+       TP_ARGS(inode, md_needed),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        __u64,  i_blocks                )
+               __field(        int,    md_needed               )
+               __field(        int,    reserved_data_blocks    )
+               __field(        int,    reserved_meta_blocks    )
+               __field(        TP_MODE_T, mode                 )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(i_blocks, inode->i_blocks)
+               tp_assign(md_needed, md_needed)
+               tp_assign(reserved_data_blocks,
+                               EXT4_I(inode)->i_reserved_data_blocks)
+               tp_assign(reserved_meta_blocks,
+                               EXT4_I(inode)->i_reserved_meta_blocks)
+               tp_assign(mode, inode->i_mode)
+       ),
+
+       TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu md_needed %d "
+                 "reserved_data_blocks %d reserved_meta_blocks %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->mode, __entry->i_blocks,
+                 __entry->md_needed, __entry->reserved_data_blocks,
+                 __entry->reserved_meta_blocks)
+)
+
+TRACE_EVENT(ext4_da_release_space,
+       TP_PROTO(struct inode *inode, int freed_blocks),
+
+       TP_ARGS(inode, freed_blocks),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        __u64,  i_blocks                )
+               __field(        int,    freed_blocks            )
+               __field(        int,    reserved_data_blocks    )
+               __field(        int,    reserved_meta_blocks    )
+               __field(        int,    allocated_meta_blocks   )
+               __field(        TP_MODE_T, mode                 )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(i_blocks, inode->i_blocks)
+               tp_assign(freed_blocks, freed_blocks)
+               tp_assign(reserved_data_blocks,
+                               EXT4_I(inode)->i_reserved_data_blocks)
+               tp_assign(reserved_meta_blocks,
+                               EXT4_I(inode)->i_reserved_meta_blocks)
+               tp_assign(allocated_meta_blocks,
+                               EXT4_I(inode)->i_allocated_meta_blocks)
+               tp_assign(mode, inode->i_mode)
+       ),
+
+       TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu freed_blocks %d "
+                 "reserved_data_blocks %d reserved_meta_blocks %d "
+                 "allocated_meta_blocks %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->mode, __entry->i_blocks,
+                 __entry->freed_blocks, __entry->reserved_data_blocks,
+                 __entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
+)
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+DECLARE_EVENT_CLASS(ext4__bitmap_load,
+       TP_PROTO(struct super_block *sb, unsigned long group),
+
+       TP_ARGS(sb, group),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        __u32,  group                   )
+
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, sb->s_dev)
+               tp_assign(group, group)
+       ),
+
+       TP_printk("dev %d,%d group %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 __entry->group)
+)
+
+DEFINE_EVENT(ext4__bitmap_load, ext4_mb_bitmap_load,
+
+       TP_PROTO(struct super_block *sb, unsigned long group),
+
+       TP_ARGS(sb, group)
+)
+
+DEFINE_EVENT(ext4__bitmap_load, ext4_mb_buddy_bitmap_load,
+
+       TP_PROTO(struct super_block *sb, unsigned long group),
+
+       TP_ARGS(sb, group)
+)
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
+DEFINE_EVENT(ext4__bitmap_load, ext4_read_block_bitmap_load,
+
+       TP_PROTO(struct super_block *sb, unsigned long group),
+
+       TP_ARGS(sb, group)
+)
+
+DEFINE_EVENT(ext4__bitmap_load, ext4_load_inode_bitmap,
+
+       TP_PROTO(struct super_block *sb, unsigned long group),
+
+       TP_ARGS(sb, group)
+)
+
+TRACE_EVENT(ext4_direct_IO_enter,
+       TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw),
+
+       TP_ARGS(inode, offset, len, rw),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        loff_t, pos                     )
+               __field(        unsigned long,  len             )
+               __field(        int,    rw                      )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(pos, offset)
+               tp_assign(len, len)
+               tp_assign(rw, rw)
+       ),
+
+       TP_printk("dev %d,%d ino %lu pos %lld len %lu rw %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->pos, __entry->len, __entry->rw)
+)
+
+TRACE_EVENT(ext4_direct_IO_exit,
+       TP_PROTO(struct inode *inode, loff_t offset, unsigned long len,
+                int rw, int ret),
+
+       TP_ARGS(inode, offset, len, rw, ret),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        loff_t, pos                     )
+               __field(        unsigned long,  len             )
+               __field(        int,    rw                      )
+               __field(        int,    ret                     )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(pos, offset)
+               tp_assign(len, len)
+               tp_assign(rw, rw)
+               tp_assign(ret, ret)
+       ),
+
+       TP_printk("dev %d,%d ino %lu pos %lld len %lu rw %d ret %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->pos, __entry->len,
+                 __entry->rw, __entry->ret)
+)
+
+TRACE_EVENT(ext4_fallocate_enter,
+       TP_PROTO(struct inode *inode, loff_t offset, loff_t len, int mode),
+
+       TP_ARGS(inode, offset, len, mode),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        loff_t, pos                     )
+               __field(        loff_t, len                     )
+               __field(        int,    mode                    )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(pos, offset)
+               tp_assign(len, len)
+               tp_assign(mode, mode)
+       ),
+
+       TP_printk("dev %d,%d ino %lu pos %lld len %lld mode %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino, __entry->pos,
+                 __entry->len, __entry->mode)
+)
+
+TRACE_EVENT(ext4_fallocate_exit,
+       TP_PROTO(struct inode *inode, loff_t offset,
+                unsigned int max_blocks, int ret),
+
+       TP_ARGS(inode, offset, max_blocks, ret),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        loff_t, pos                     )
+               __field(        unsigned int,   blocks          )
+               __field(        int,    ret                     )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(pos, offset)
+               tp_assign(blocks, max_blocks)
+               tp_assign(ret, ret)
+       ),
+
+       TP_printk("dev %d,%d ino %lu pos %lld blocks %u ret %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->pos, __entry->blocks,
+                 __entry->ret)
+)
+
+TRACE_EVENT(ext4_unlink_enter,
+       TP_PROTO(struct inode *parent, struct dentry *dentry),
+
+       TP_ARGS(parent, dentry),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        ino_t,  parent                  )
+               __field(        loff_t, size                    )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, dentry->d_inode->i_sb->s_dev)
+               tp_assign(ino, dentry->d_inode->i_ino)
+               tp_assign(parent, parent->i_ino)
+               tp_assign(size, dentry->d_inode->i_size)
+       ),
+
+       TP_printk("dev %d,%d ino %lu size %lld parent %lu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino, __entry->size,
+                 (unsigned long) __entry->parent)
+)
+
+TRACE_EVENT(ext4_unlink_exit,
+       TP_PROTO(struct dentry *dentry, int ret),
+
+       TP_ARGS(dentry, ret),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        int,    ret                     )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, dentry->d_inode->i_sb->s_dev)
+               tp_assign(ino, dentry->d_inode->i_ino)
+               tp_assign(ret, ret)
+       ),
+
+       TP_printk("dev %d,%d ino %lu ret %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->ret)
+)
+
+DECLARE_EVENT_CLASS(ext4__truncate,
+       TP_PROTO(struct inode *inode),
+
+       TP_ARGS(inode),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev             )
+               __field(        ino_t,          ino             )
+               __field(        __u64,          blocks          )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(blocks, inode->i_blocks)
+       ),
+
+       TP_printk("dev %d,%d ino %lu blocks %llu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino, __entry->blocks)
+)
+
+DEFINE_EVENT(ext4__truncate, ext4_truncate_enter,
+
+       TP_PROTO(struct inode *inode),
+
+       TP_ARGS(inode)
+)
+
+DEFINE_EVENT(ext4__truncate, ext4_truncate_exit,
+
+       TP_PROTO(struct inode *inode),
+
+       TP_ARGS(inode)
+)
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+/* 'ux' is the uninitialized extent. */
+TRACE_EVENT(ext4_ext_convert_to_initialized_enter,
+       TP_PROTO(struct inode *inode, struct ext4_map_blocks *map,
+                struct ext4_extent *ux),
+
+       TP_ARGS(inode, map, ux),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev     )
+               __field(        ino_t,          ino     )
+               __field(        ext4_lblk_t,    m_lblk  )
+               __field(        unsigned,       m_len   )
+               __field(        ext4_lblk_t,    u_lblk  )
+               __field(        unsigned,       u_len   )
+               __field(        ext4_fsblk_t,   u_pblk  )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(m_lblk, map->m_lblk)
+               tp_assign(m_len, map->m_len)
+               tp_assign(u_lblk, le32_to_cpu(ux->ee_block))
+               tp_assign(u_len, ext4_ext_get_actual_len(ux))
+               tp_assign(u_pblk, ext4_ext_pblock(ux))
+       ),
+
+       TP_printk("dev %d,%d ino %lu m_lblk %u m_len %u u_lblk %u u_len %u "
+                 "u_pblk %llu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->m_lblk, __entry->m_len,
+                 __entry->u_lblk, __entry->u_len, __entry->u_pblk)
+)
+
+/*
+ * 'ux' is the uninitialized extent.
+ * 'ix' is the initialized extent to which blocks are transferred.
+ */
+TRACE_EVENT(ext4_ext_convert_to_initialized_fastpath,
+       TP_PROTO(struct inode *inode, struct ext4_map_blocks *map,
+                struct ext4_extent *ux, struct ext4_extent *ix),
+
+       TP_ARGS(inode, map, ux, ix),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev     )
+               __field(        ino_t,          ino     )
+               __field(        ext4_lblk_t,    m_lblk  )
+               __field(        unsigned,       m_len   )
+               __field(        ext4_lblk_t,    u_lblk  )
+               __field(        unsigned,       u_len   )
+               __field(        ext4_fsblk_t,   u_pblk  )
+               __field(        ext4_lblk_t,    i_lblk  )
+               __field(        unsigned,       i_len   )
+               __field(        ext4_fsblk_t,   i_pblk  )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(m_lblk, map->m_lblk)
+               tp_assign(m_len, map->m_len)
+               tp_assign(u_lblk, le32_to_cpu(ux->ee_block))
+               tp_assign(u_len, ext4_ext_get_actual_len(ux))
+               tp_assign(u_pblk, ext4_ext_pblock(ux))
+               tp_assign(i_lblk, le32_to_cpu(ix->ee_block))
+               tp_assign(i_len, ext4_ext_get_actual_len(ix))
+               tp_assign(i_pblk, ext4_ext_pblock(ix))
+       ),
+
+       TP_printk("dev %d,%d ino %lu m_lblk %u m_len %u "
+                 "u_lblk %u u_len %u u_pblk %llu "
+                 "i_lblk %u i_len %u i_pblk %llu ",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->m_lblk, __entry->m_len,
+                 __entry->u_lblk, __entry->u_len, __entry->u_pblk,
+                 __entry->i_lblk, __entry->i_len, __entry->i_pblk)
+)
+#endif
+
+DECLARE_EVENT_CLASS(ext4__map_blocks_enter,
+       TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
+                unsigned int len, unsigned int flags),
+
+       TP_ARGS(inode, lblk, len, flags),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev             )
+               __field(        ino_t,          ino             )
+               __field(        ext4_lblk_t,    lblk            )
+               __field(        unsigned int,   len             )
+               __field(        unsigned int,   flags           )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(lblk, lblk)
+               tp_assign(len, len)
+               tp_assign(flags, flags)
+       ),
+
+       TP_printk("dev %d,%d ino %lu lblk %u len %u flags %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->lblk, __entry->len, __entry->flags)
+)
+
+DEFINE_EVENT(ext4__map_blocks_enter, ext4_ext_map_blocks_enter,
+       TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
+                unsigned len, unsigned flags),
+
+       TP_ARGS(inode, lblk, len, flags)
+)
+
+DEFINE_EVENT(ext4__map_blocks_enter, ext4_ind_map_blocks_enter,
+       TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
+                unsigned len, unsigned flags),
+
+       TP_ARGS(inode, lblk, len, flags)
+)
+
+DECLARE_EVENT_CLASS(ext4__map_blocks_exit,
+       TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
+                ext4_fsblk_t pblk, unsigned int len, int ret),
+
+       TP_ARGS(inode, lblk, pblk, len, ret),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev             )
+               __field(        ino_t,          ino             )
+               __field(        ext4_fsblk_t,   pblk            )
+               __field(        ext4_lblk_t,    lblk            )
+               __field(        unsigned int,   len             )
+               __field(        int,            ret             )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(pblk, pblk)
+               tp_assign(lblk, lblk)
+               tp_assign(len, len)
+               tp_assign(ret, ret)
+       ),
+
+       TP_printk("dev %d,%d ino %lu lblk %u pblk %llu len %u ret %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->lblk, __entry->pblk,
+                 __entry->len, __entry->ret)
+)
+
+DEFINE_EVENT(ext4__map_blocks_exit, ext4_ext_map_blocks_exit,
+       TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
+                ext4_fsblk_t pblk, unsigned len, int ret),
+
+       TP_ARGS(inode, lblk, pblk, len, ret)
+)
+
+DEFINE_EVENT(ext4__map_blocks_exit, ext4_ind_map_blocks_exit,
+       TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
+                ext4_fsblk_t pblk, unsigned len, int ret),
+
+       TP_ARGS(inode, lblk, pblk, len, ret)
+)
+
+TRACE_EVENT(ext4_ext_load_extent,
+       TP_PROTO(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk),
+
+       TP_ARGS(inode, lblk, pblk),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev             )
+               __field(        ino_t,          ino             )
+               __field(        ext4_fsblk_t,   pblk            )
+               __field(        ext4_lblk_t,    lblk            )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(pblk, pblk)
+               tp_assign(lblk, lblk)
+       ),
+
+       TP_printk("dev %d,%d ino %lu lblk %u pblk %llu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->lblk, __entry->pblk)
+)
+
+TRACE_EVENT(ext4_load_inode,
+       TP_PROTO(struct inode *inode),
+
+       TP_ARGS(inode),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev             )
+               __field(        ino_t,  ino             )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+       ),
+
+       TP_printk("dev %d,%d ino %ld",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino)
+)
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))
+TRACE_EVENT(ext4_journal_start,
+       TP_PROTO(struct super_block *sb, int nblocks, unsigned long IP),
+
+       TP_ARGS(sb, nblocks, IP),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(unsigned long,  ip                      )
+               __field(          int,  nblocks                 )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, sb->s_dev)
+               tp_assign(ip, IP)
+               tp_assign(nblocks, nblocks)
+       ),
+
+       TP_printk("dev %d,%d nblocks %d caller %pF",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 __entry->nblocks, (void *)__entry->ip)
+)
+
+DECLARE_EVENT_CLASS(ext4__trim,
+       TP_PROTO(struct super_block *sb,
+                ext4_group_t group,
+                ext4_grpblk_t start,
+                ext4_grpblk_t len),
+
+       TP_ARGS(sb, group, start, len),
+
+       TP_STRUCT__entry(
+               __field(        int,    dev_major               )
+               __field(        int,    dev_minor               )
+               __field(        __u32,  group                   )
+               __field(        int,    start                   )
+               __field(        int,    len                     )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev_major, MAJOR(sb->s_dev))
+               tp_assign(dev_minor, MINOR(sb->s_dev))
+               tp_assign(group, group)
+               tp_assign(start, start)
+               tp_assign(len, len)
+       ),
+
+       TP_printk("dev %d,%d group %u, start %d, len %d",
+                 __entry->dev_major, __entry->dev_minor,
+                 __entry->group, __entry->start, __entry->len)
+)
+
+DEFINE_EVENT(ext4__trim, ext4_trim_extent,
+
+       TP_PROTO(struct super_block *sb,
+                ext4_group_t group,
+                ext4_grpblk_t start,
+                ext4_grpblk_t len),
+
+       TP_ARGS(sb, group, start, len)
+)
+
+DEFINE_EVENT(ext4__trim, ext4_trim_all_free,
+
+       TP_PROTO(struct super_block *sb,
+                ext4_group_t group,
+                ext4_grpblk_t start,
+                ext4_grpblk_t len),
+
+       TP_ARGS(sb, group, start, len)
+)
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+TRACE_EVENT(ext4_ext_handle_uninitialized_extents,
+       TP_PROTO(struct inode *inode, struct ext4_map_blocks *map,
+                unsigned int allocated, ext4_fsblk_t newblock),
+
+       TP_ARGS(inode, map, allocated, newblock),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev             )
+               __field(        ino_t,          ino             )
+               __field(        int,            flags           )
+               __field(        ext4_lblk_t,    lblk            )
+               __field(        ext4_fsblk_t,   pblk            )
+               __field(        unsigned int,   len             )
+               __field(        unsigned int,   allocated       )
+               __field(        ext4_fsblk_t,   newblk          )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(flags, map->m_flags)
+               tp_assign(lblk, map->m_lblk)
+               tp_assign(pblk, map->m_pblk)
+               tp_assign(len, map->m_len)
+               tp_assign(allocated, allocated)
+               tp_assign(newblk, newblock)
+       ),
+
+       TP_printk("dev %d,%d ino %lu m_lblk %u m_pblk %llu m_len %u flags %d"
+                 "allocated %d newblock %llu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned) __entry->lblk, (unsigned long long) __entry->pblk,
+                 __entry->len, __entry->flags,
+                 (unsigned int) __entry->allocated,
+                 (unsigned long long) __entry->newblk)
+)
+
+TRACE_EVENT(ext4_get_implied_cluster_alloc_exit,
+       TP_PROTO(struct super_block *sb, struct ext4_map_blocks *map, int ret),
+
+       TP_ARGS(sb, map, ret),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev     )
+               __field(        unsigned int,   flags   )
+               __field(        ext4_lblk_t,    lblk    )
+               __field(        ext4_fsblk_t,   pblk    )
+               __field(        unsigned int,   len     )
+               __field(        int,            ret     )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, sb->s_dev)
+               tp_assign(flags, map->m_flags)
+               tp_assign(lblk, map->m_lblk)
+               tp_assign(pblk, map->m_pblk)
+               tp_assign(len, map->m_len)
+               tp_assign(ret, ret)
+       ),
+
+       TP_printk("dev %d,%d m_lblk %u m_pblk %llu m_len %u m_flags %u ret %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 __entry->lblk, (unsigned long long) __entry->pblk,
+                 __entry->len, __entry->flags, __entry->ret)
+)
+
+TRACE_EVENT(ext4_ext_put_in_cache,
+       TP_PROTO(struct inode *inode, ext4_lblk_t lblk, unsigned int len,
+                ext4_fsblk_t start),
+
+       TP_ARGS(inode, lblk, len, start),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev     )
+               __field(        ino_t,          ino     )
+               __field(        ext4_lblk_t,    lblk    )
+               __field(        unsigned int,   len     )
+               __field(        ext4_fsblk_t,   start   )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(lblk, lblk)
+               tp_assign(len, len)
+               tp_assign(start, start)
+       ),
+
+       TP_printk("dev %d,%d ino %lu lblk %u len %u start %llu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned) __entry->lblk,
+                 __entry->len,
+                 (unsigned long long) __entry->start)
+)
+
+TRACE_EVENT(ext4_ext_in_cache,
+       TP_PROTO(struct inode *inode, ext4_lblk_t lblk, int ret),
+
+       TP_ARGS(inode, lblk, ret),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev     )
+               __field(        ino_t,          ino     )
+               __field(        ext4_lblk_t,    lblk    )
+               __field(        int,            ret     )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(lblk, lblk)
+               tp_assign(ret, ret)
+       ),
+
+       TP_printk("dev %d,%d ino %lu lblk %u ret %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned) __entry->lblk,
+                 __entry->ret)
+
+)
+
+TRACE_EVENT(ext4_find_delalloc_range,
+       TP_PROTO(struct inode *inode, ext4_lblk_t from, ext4_lblk_t to,
+               int reverse, int found, ext4_lblk_t found_blk),
+
+       TP_ARGS(inode, from, to, reverse, found, found_blk),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev             )
+               __field(        ino_t,          ino             )
+               __field(        ext4_lblk_t,    from            )
+               __field(        ext4_lblk_t,    to              )
+               __field(        int,            reverse         )
+               __field(        int,            found           )
+               __field(        ext4_lblk_t,    found_blk       )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(from, from)
+               tp_assign(to, to)
+               tp_assign(reverse, reverse)
+               tp_assign(found, found)
+               tp_assign(found_blk, found_blk)
+       ),
+
+       TP_printk("dev %d,%d ino %lu from %u to %u reverse %d found %d "
+                 "(blk = %u)",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned) __entry->from, (unsigned) __entry->to,
+                 __entry->reverse, __entry->found,
+                 (unsigned) __entry->found_blk)
+)
+
+TRACE_EVENT(ext4_get_reserved_cluster_alloc,
+       TP_PROTO(struct inode *inode, ext4_lblk_t lblk, unsigned int len),
+
+       TP_ARGS(inode, lblk, len),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev     )
+               __field(        ino_t,          ino     )
+               __field(        ext4_lblk_t,    lblk    )
+               __field(        unsigned int,   len     )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(lblk, lblk)
+               tp_assign(len, len)
+       ),
+
+       TP_printk("dev %d,%d ino %lu lblk %u len %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned) __entry->lblk,
+                 __entry->len)
+)
+
+TRACE_EVENT(ext4_ext_show_extent,
+       TP_PROTO(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
+                unsigned short len),
+
+       TP_ARGS(inode, lblk, pblk, len),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev     )
+               __field(        ino_t,          ino     )
+               __field(        ext4_fsblk_t,   pblk    )
+               __field(        ext4_lblk_t,    lblk    )
+               __field(        unsigned short, len     )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(pblk, pblk)
+               tp_assign(lblk, lblk)
+               tp_assign(len, len)
+       ),
+
+       TP_printk("dev %d,%d ino %lu lblk %u pblk %llu len %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned) __entry->lblk,
+                 (unsigned long long) __entry->pblk,
+                 (unsigned short) __entry->len)
+)
+
+TRACE_EVENT(ext4_remove_blocks,
+           TP_PROTO(struct inode *inode, struct ext4_extent *ex,
+               ext4_lblk_t from, ext4_fsblk_t to,
+               ext4_fsblk_t partial_cluster),
+
+       TP_ARGS(inode, ex, from, to, partial_cluster),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev     )
+               __field(        ino_t,          ino     )
+               __field(        ext4_lblk_t,    from    )
+               __field(        ext4_lblk_t,    to      )
+               __field(        ext4_fsblk_t,   partial )
+               __field(        ext4_fsblk_t,   ee_pblk )
+               __field(        ext4_lblk_t,    ee_lblk )
+               __field(        unsigned short, ee_len  )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(from, from)
+               tp_assign(to, to)
+               tp_assign(partial, partial_cluster)
+               tp_assign(ee_pblk, ext4_ext_pblock(ex))
+               tp_assign(ee_lblk, cpu_to_le32(ex->ee_block))
+               tp_assign(ee_len, ext4_ext_get_actual_len(ex))
+       ),
+
+       TP_printk("dev %d,%d ino %lu extent [%u(%llu), %u]"
+                 "from %u to %u partial_cluster %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned) __entry->ee_lblk,
+                 (unsigned long long) __entry->ee_pblk,
+                 (unsigned short) __entry->ee_len,
+                 (unsigned) __entry->from,
+                 (unsigned) __entry->to,
+                 (unsigned) __entry->partial)
+)
+
+TRACE_EVENT(ext4_ext_rm_leaf,
+       TP_PROTO(struct inode *inode, ext4_lblk_t start,
+                struct ext4_extent *ex, ext4_fsblk_t partial_cluster),
+
+       TP_ARGS(inode, start, ex, partial_cluster),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev     )
+               __field(        ino_t,          ino     )
+               __field(        ext4_fsblk_t,   partial )
+               __field(        ext4_lblk_t,    start   )
+               __field(        ext4_lblk_t,    ee_lblk )
+               __field(        ext4_fsblk_t,   ee_pblk )
+               __field(        short,          ee_len  )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(partial, partial_cluster)
+               tp_assign(start, start)
+               tp_assign(ee_lblk, le32_to_cpu(ex->ee_block))
+               tp_assign(ee_pblk, ext4_ext_pblock(ex))
+               tp_assign(ee_len, ext4_ext_get_actual_len(ex))
+       ),
+
+       TP_printk("dev %d,%d ino %lu start_lblk %u last_extent [%u(%llu), %u]"
+                 "partial_cluster %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned) __entry->start,
+                 (unsigned) __entry->ee_lblk,
+                 (unsigned long long) __entry->ee_pblk,
+                 (unsigned short) __entry->ee_len,
+                 (unsigned) __entry->partial)
+)
+
+TRACE_EVENT(ext4_ext_rm_idx,
+       TP_PROTO(struct inode *inode, ext4_fsblk_t pblk),
+
+       TP_ARGS(inode, pblk),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev     )
+               __field(        ino_t,          ino     )
+               __field(        ext4_fsblk_t,   pblk    )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(pblk, pblk)
+       ),
+
+       TP_printk("dev %d,%d ino %lu index_pblk %llu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned long long) __entry->pblk)
+)
+
+TRACE_EVENT(ext4_ext_remove_space,
+       TP_PROTO(struct inode *inode, ext4_lblk_t start, int depth),
+
+       TP_ARGS(inode, start, depth),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev     )
+               __field(        ino_t,          ino     )
+               __field(        ext4_lblk_t,    start   )
+               __field(        int,            depth   )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(start, start)
+               tp_assign(depth, depth)
+       ),
+
+       TP_printk("dev %d,%d ino %lu since %u depth %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned) __entry->start,
+                 __entry->depth)
+)
+
+TRACE_EVENT(ext4_ext_remove_space_done,
+       TP_PROTO(struct inode *inode, ext4_lblk_t start, int depth,
+               ext4_lblk_t partial, unsigned short eh_entries),
+
+       TP_ARGS(inode, start, depth, partial, eh_entries),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev             )
+               __field(        ino_t,          ino             )
+               __field(        ext4_lblk_t,    start           )
+               __field(        int,            depth           )
+               __field(        ext4_lblk_t,    partial         )
+               __field(        unsigned short, eh_entries      )
+       ),
+
+       TP_fast_assign(
+               tp_assign(dev, inode->i_sb->s_dev)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(start, start)
+               tp_assign(depth, depth)
+               tp_assign(partial, partial)
+               tp_assign(eh_entries, eh_entries)
+       ),
+
+       TP_printk("dev %d,%d ino %lu since %u depth %d partial %u "
+                 "remaining_entries %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned) __entry->start,
+                 __entry->depth,
+                 (unsigned) __entry->partial,
+                 (unsigned short) __entry->eh_entries)
+)
+#endif
+
+#endif /* _TRACE_EXT4_H */
+
+/* This part must be outside protection */
+#include "../../../probes/define_trace.h"
diff --git a/instrumentation/events/lttng-module/printk.h b/instrumentation/events/lttng-module/printk.h
new file mode 100644 (file)
index 0000000..4c744f9
--- /dev/null
@@ -0,0 +1,64 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM printk
+
+#if !defined(_TRACE_PRINTK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PRINTK_H
+
+#include <linux/tracepoint.h>
+#include <linux/version.h>
+
+#define MSG_TRACE_MAX_LEN      2048
+
+TRACE_EVENT_CONDITION(console,
+       TP_PROTO(const char *log_buf, unsigned start, unsigned end,
+                unsigned log_buf_len),
+
+       TP_ARGS(log_buf, start, end, log_buf_len),
+
+       TP_CONDITION(start != end),
+
+       TP_STRUCT__entry(
+               __dynamic_array_text(char, msg,
+                       min_t(unsigned, end - start, MSG_TRACE_MAX_LEN) + 1)
+       ),
+
+       TP_fast_assign(
+               tp_memcpy_dyn(msg,
+                       ({
+                               char lmsg[MSG_TRACE_MAX_LEN + 1];
+
+                               if ((end - start) > MSG_TRACE_MAX_LEN)
+                                       start = end - MSG_TRACE_MAX_LEN;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0))
+                               if ((start & (log_buf_len - 1)) >
+                                       (end & (log_buf_len - 1))) {
+                                       memcpy(lmsg,
+                                               log_buf +
+                                               (start & (log_buf_len - 1)),
+                                               log_buf_len -
+                                               (start & (log_buf_len - 1)));
+                                       memcpy(lmsg + log_buf_len -
+                                               (start & (log_buf_len - 1)),
+                                               log_buf,
+                                               end & (log_buf_len - 1));
+                               } else
+                                       memcpy(lmsg,
+                                               log_buf +
+                                               (start & (log_buf_len - 1)),
+                                               end - start);
+#else
+                               memcpy(lmsg, log_buf + start, end - start);
+#endif
+                               lmsg[end - start] = 0;
+                               lmsg;
+                       })
+               )
+       ),
+
+       TP_printk("%s", __get_str(msg))
+)
+#endif /* _TRACE_PRINTK_H */
+
+/* This part must be outside protection */
+#include "../../../probes/define_trace.h"
diff --git a/instrumentation/events/lttng-module/random.h b/instrumentation/events/lttng-module/random.h
new file mode 100644 (file)
index 0000000..6ecdd90
--- /dev/null
@@ -0,0 +1,134 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM random
+
+#if !defined(_TRACE_RANDOM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RANDOM_H
+
+#include <linux/writeback.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(random__mix_pool_bytes,
+       TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
+
+       TP_ARGS(pool_name, bytes, IP),
+
+       TP_STRUCT__entry(
+               __string(   pool_name,  pool_name               )
+               __field(          int,  bytes                   )
+               __field(unsigned long,  IP                      )
+       ),
+
+       TP_fast_assign(
+               tp_strcpy(pool_name, pool_name)
+               tp_assign(bytes, bytes)
+               tp_assign(IP, IP)
+       ),
+
+       TP_printk("%s pool: bytes %d caller %pF",
+                 __get_str(pool_name), __entry->bytes, (void *)__entry->IP)
+)
+
+DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes,
+       TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
+
+       TP_ARGS(pool_name, bytes, IP)
+)
+
+DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes_nolock,
+       TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
+
+       TP_ARGS(pool_name, bytes, IP)
+)
+
+TRACE_EVENT(credit_entropy_bits,
+       TP_PROTO(const char *pool_name, int bits, int entropy_count,
+                int entropy_total, unsigned long IP),
+
+       TP_ARGS(pool_name, bits, entropy_count, entropy_total, IP),
+
+       TP_STRUCT__entry(
+               __string(   pool_name,  pool_name               )
+               __field(          int,  bits                    )
+               __field(          int,  entropy_count           )
+               __field(          int,  entropy_total           )
+               __field(unsigned long,  IP                      )
+       ),
+
+       TP_fast_assign(
+               tp_strcpy(pool_name, pool_name)
+               tp_assign(bits, bits)
+               tp_assign(entropy_count, entropy_count)
+               tp_assign(entropy_total, entropy_total)
+               tp_assign(IP, IP)
+       ),
+
+       TP_printk("%s pool: bits %d entropy_count %d entropy_total %d "
+                 "caller %pF", __get_str(pool_name), __entry->bits,
+                 __entry->entropy_count, __entry->entropy_total,
+                 (void *)__entry->IP)
+)
+
+TRACE_EVENT(get_random_bytes,
+       TP_PROTO(int nbytes, unsigned long IP),
+
+       TP_ARGS(nbytes, IP),
+
+       TP_STRUCT__entry(
+               __field(          int,  nbytes                  )
+               __field(unsigned long,  IP                      )
+       ),
+
+       TP_fast_assign(
+               tp_assign(nbytes, nbytes)
+               tp_assign(IP, IP)
+       ),
+
+       TP_printk("nbytes %d caller %pF", __entry->nbytes, (void *)__entry->IP)
+)
+
+DECLARE_EVENT_CLASS(random__extract_entropy,
+       TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
+                unsigned long IP),
+
+       TP_ARGS(pool_name, nbytes, entropy_count, IP),
+
+       TP_STRUCT__entry(
+               __string(   pool_name,  pool_name               )
+               __field(          int,  nbytes                  )
+               __field(          int,  entropy_count           )
+               __field(unsigned long,  IP                      )
+       ),
+
+       TP_fast_assign(
+               tp_strcpy(pool_name, pool_name)
+               tp_assign(nbytes, nbytes)
+               tp_assign(entropy_count, entropy_count)
+               tp_assign(IP, IP)
+       ),
+
+       TP_printk("%s pool: nbytes %d entropy_count %d caller %pF",
+                 __get_str(pool_name), __entry->nbytes, __entry->entropy_count,
+                 (void *)__entry->IP)
+)
+
+
+DEFINE_EVENT(random__extract_entropy, extract_entropy,
+       TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
+                unsigned long IP),
+
+       TP_ARGS(pool_name, nbytes, entropy_count, IP)
+)
+
+DEFINE_EVENT(random__extract_entropy, extract_entropy_user,
+       TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
+                unsigned long IP),
+
+       TP_ARGS(pool_name, nbytes, entropy_count, IP)
+)
+
+
+
+#endif /* _TRACE_RANDOM_H */
+
+/* This part must be outside protection */
+#include "../../../probes/define_trace.h"
diff --git a/instrumentation/events/lttng-module/rcu.h b/instrumentation/events/lttng-module/rcu.h
new file mode 100644 (file)
index 0000000..56434aa
--- /dev/null
@@ -0,0 +1,721 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rcu
+
+#if !defined(_TRACE_RCU_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RCU_H
+
+#include <linux/tracepoint.h>
+#include <linux/version.h>
+
+/*
+ * Tracepoint for start/end markers used for utilization calculations.
+ * By convention, the string is of the following forms:
+ *
+ * "Start <activity>" -- Mark the start of the specified activity,
+ *                      such as "context switch".  Nesting is permitted.
+ * "End <activity>" -- Mark the end of the specified activity.
+ *
+ * An "@" character within "<activity>" is a comment character: Data
+ * reduction scripts will ignore the "@" and the remainder of the line.
+ */
+TRACE_EVENT(rcu_utilization,
+
+       TP_PROTO(char *s),
+
+       TP_ARGS(s),
+
+       TP_STRUCT__entry(
+               __string(s, s)
+       ),
+
+       TP_fast_assign(
+               tp_strcpy(s, s)
+       ),
+
+       TP_printk("%s", __get_str(s))
+)
+
+#ifdef CONFIG_RCU_TRACE
+
+#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
+
+/*
+ * Tracepoint for grace-period events: starting and ending a grace
+ * period ("start" and "end", respectively), a CPU noting the start
+ * of a new grace period or the end of an old grace period ("cpustart"
+ * and "cpuend", respectively), a CPU passing through a quiescent
+ * state ("cpuqs"), a CPU coming online or going offline ("cpuonl"
+ * and "cpuofl", respectively), and a CPU being kicked for being too
+ * long in dyntick-idle mode ("kick").
+ */
+TRACE_EVENT(rcu_grace_period,
+
+       TP_PROTO(char *rcuname, unsigned long gpnum, char *gpevent),
+
+       TP_ARGS(rcuname, gpnum, gpevent),
+
+       TP_STRUCT__entry(
+               __string(rcuname, rcuname)
+               __field(unsigned long, gpnum)
+               __string(gpevent, gpevent)
+       ),
+
+       TP_fast_assign(
+               tp_strcpy(rcuname, rcuname)
+               tp_assign(gpnum, gpnum)
+               tp_strcpy(gpevent, gpevent)
+       ),
+
+       TP_printk("%s %lu %s",
+                 __get_str(rcuname), __entry->gpnum, __get_str(gpevent))
+)
+
+/*
+ * Tracepoint for grace-period-initialization events.  These are
+ * distinguished by the type of RCU, the new grace-period number, the
+ * rcu_node structure level, the starting and ending CPU covered by the
+ * rcu_node structure, and the mask of CPUs that will be waited for.
+ * All but the type of RCU are extracted from the rcu_node structure.
+ */
+TRACE_EVENT(rcu_grace_period_init,
+
+       TP_PROTO(char *rcuname, unsigned long gpnum, u8 level,
+                int grplo, int grphi, unsigned long qsmask),
+
+       TP_ARGS(rcuname, gpnum, level, grplo, grphi, qsmask),
+
+       TP_STRUCT__entry(
+               __string(rcuname, rcuname)
+               __field(unsigned long, gpnum)
+               __field(u8, level)
+               __field(int, grplo)
+               __field(int, grphi)
+               __field(unsigned long, qsmask)
+       ),
+
+       TP_fast_assign(
+               tp_strcpy(rcuname, rcuname)
+               tp_assign(gpnum, gpnum)
+               tp_assign(level, level)
+               tp_assign(grplo, grplo)
+               tp_assign(grphi, grphi)
+               tp_assign(qsmask, qsmask)
+       ),
+
+       TP_printk("%s %lu %u %d %d %lx",
+                 __get_str(rcuname), __entry->gpnum, __entry->level,
+                 __entry->grplo, __entry->grphi, __entry->qsmask)
+)
+
+/*
+ * Tracepoint for tasks blocking within preemptible-RCU read-side
+ * critical sections.  Track the type of RCU (which one day might
+ * include SRCU), the grace-period number that the task is blocking
+ * (the current or the next), and the task's PID.
+ */
+TRACE_EVENT(rcu_preempt_task,
+
+       TP_PROTO(char *rcuname, int pid, unsigned long gpnum),
+
+       TP_ARGS(rcuname, pid, gpnum),
+
+       TP_STRUCT__entry(
+               __string(rcuname, rcuname)
+               __field(unsigned long, gpnum)
+               __field(int, pid)
+       ),
+
+       TP_fast_assign(
+               tp_strcpy(rcuname, rcuname)
+               tp_assign(gpnum, gpnum)
+               tp_assign(pid, pid)
+       ),
+
+       TP_printk("%s %lu %d",
+                 __get_str(rcuname), __entry->gpnum, __entry->pid)
+)
+
+/*
+ * Tracepoint for tasks that blocked within a given preemptible-RCU
+ * read-side critical section exiting that critical section.  Track the
+ * type of RCU (which one day might include SRCU) and the task's PID.
+ */
+TRACE_EVENT(rcu_unlock_preempted_task,
+
+       TP_PROTO(char *rcuname, unsigned long gpnum, int pid),
+
+       TP_ARGS(rcuname, gpnum, pid),
+
+       TP_STRUCT__entry(
+               __string(rcuname, rcuname)
+               __field(unsigned long, gpnum)
+               __field(int, pid)
+       ),
+
+       TP_fast_assign(
+               tp_strcpy(rcuname, rcuname)
+               tp_assign(gpnum, gpnum)
+               tp_assign(pid, pid)
+       ),
+
+       TP_printk("%s %lu %d", __get_str(rcuname), __entry->gpnum, __entry->pid)
+)
+
+/*
+ * Tracepoint for quiescent-state-reporting events.  These are
+ * distinguished by the type of RCU, the grace-period number, the
+ * mask of quiescent lower-level entities, the rcu_node structure level,
+ * the starting and ending CPU covered by the rcu_node structure, and
+ * whether there are any blocked tasks blocking the current grace period.
+ * All but the type of RCU are extracted from the rcu_node structure.
+ */
+TRACE_EVENT(rcu_quiescent_state_report,
+
+       TP_PROTO(char *rcuname, unsigned long gpnum,
+                unsigned long mask, unsigned long qsmask,
+                u8 level, int grplo, int grphi, int gp_tasks),
+
+       TP_ARGS(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks),
+
+       TP_STRUCT__entry(
+               __string(rcuname, rcuname)
+               __field(unsigned long, gpnum)
+               __field(unsigned long, mask)
+               __field(unsigned long, qsmask)
+               __field(u8, level)
+               __field(int, grplo)
+               __field(int, grphi)
+               __field(u8, gp_tasks)
+       ),
+
+       TP_fast_assign(
+               tp_strcpy(rcuname, rcuname)
+               tp_assign(gpnum, gpnum)
+               tp_assign(mask, mask)
+               tp_assign(qsmask, qsmask)
+               tp_assign(level, level)
+               tp_assign(grplo, grplo)
+               tp_assign(grphi, grphi)
+               tp_assign(gp_tasks, gp_tasks)
+       ),
+
+       TP_printk("%s %lu %lx>%lx %u %d %d %u",
+                 __get_str(rcuname), __entry->gpnum,
+                 __entry->mask, __entry->qsmask, __entry->level,
+                 __entry->grplo, __entry->grphi, __entry->gp_tasks)
+)
+
+/*
+ * Tracepoint for quiescent states detected by force_quiescent_state().
+ * These trace events include the type of RCU, the grace-period number
+ * that was blocked by the CPU, the CPU itself, and the type of quiescent
+ * state, which can be "dti" for dyntick-idle mode, "ofl" for CPU offline,
+ * or "kick" when kicking a CPU that has been in dyntick-idle mode for
+ * too long.
+ */
+TRACE_EVENT(rcu_fqs,
+
+       TP_PROTO(char *rcuname, unsigned long gpnum, int cpu, char *qsevent),
+
+       TP_ARGS(rcuname, gpnum, cpu, qsevent),
+
+       TP_STRUCT__entry(
+               __string(rcuname, rcuname)
+               __field(unsigned long, gpnum)
+               __field(int, cpu)
+               __string(qsevent, qsevent)
+       ),
+
+       TP_fast_assign(
+               tp_strcpy(rcuname, rcuname)
+               tp_assign(gpnum, gpnum)
+               tp_assign(cpu, cpu)
+               tp_strcpy(qsevent, qsevent)
+       ),
+
+       TP_printk("%s %lu %d %s",
+                 __get_str(rcuname), __entry->gpnum,
+                 __entry->cpu, __get_str(qsevent))
+)
+
+#endif /* #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) */
+
+/*
+ * Tracepoint for dyntick-idle entry/exit events.  These take a string
+ * as argument: "Start" for entering dyntick-idle mode, "End" for
+ * leaving it, "--=" for events moving towards idle, and "++=" for events
+ * moving away from idle.  "Error on entry: not idle task" and "Error on
+ * exit: not idle task" indicate that a non-idle task is erroneously
+ * toying with the idle loop.
+ *
+ * These events also take a pair of numbers, which indicate the nesting
+ * depth before and after the event of interest.  Note that task-related
+ * events use the upper bits of each number, while interrupt-related
+ * events use the lower bits.
+ */
+TRACE_EVENT(rcu_dyntick,
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
+       TP_PROTO(char *polarity, long long oldnesting, long long newnesting),
+
+       TP_ARGS(polarity, oldnesting, newnesting),
+#else
+       TP_PROTO(char *polarity),
+
+       TP_ARGS(polarity),
+#endif
+
+       TP_STRUCT__entry(
+               __string(polarity, polarity)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
+               __field(long long, oldnesting)
+               __field(long long, newnesting)
+#endif
+       ),
+
+       TP_fast_assign(
+               tp_strcpy(polarity, polarity)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
+               tp_assign(oldnesting, oldnesting)
+               tp_assign(newnesting, newnesting)
+#endif
+       ),
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
+       TP_printk("%s %llx %llx", __get_str(polarity),
+                 __entry->oldnesting, __entry->newnesting)
+#else
+       TP_printk("%s", __get_str(polarity))
+#endif
+)
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
+/*
+ * Tracepoint for RCU preparation for idle, the goal being to get RCU
+ * processing done so that the current CPU can shut off its scheduling
+ * clock and enter dyntick-idle mode.  One way to accomplish this is
+ * to drain all RCU callbacks from this CPU, and the other is to have
+ * done everything RCU requires for the current grace period.  In this
+ * latter case, the CPU will be awakened at the end of the current grace
+ * period in order to process the remainder of its callbacks.
+ *
+ * These tracepoints take a string as argument:
+ *
+ *     "No callbacks": Nothing to do, no callbacks on this CPU.
+ *     "In holdoff": Nothing to do, holding off after unsuccessful attempt.
+ *     "Begin holdoff": Attempt failed, don't retry until next jiffy.
+ *     "Dyntick with callbacks": Entering dyntick-idle despite callbacks.
+ *     "Dyntick with lazy callbacks": Entering dyntick-idle w/lazy callbacks.
+ *     "More callbacks": Still more callbacks, try again to clear them out.
+ *     "Callbacks drained": All callbacks processed, off to dyntick idle!
+ *     "Timer": Timer fired to cause CPU to continue processing callbacks.
+ *     "Demigrate": Timer fired on wrong CPU, woke up correct CPU.
+ *     "Cleanup after idle": Idle exited, timer canceled.
+ */
+TRACE_EVENT(rcu_prep_idle,
+
+       TP_PROTO(char *reason),
+
+       TP_ARGS(reason),
+
+       TP_STRUCT__entry(
+               __string(reason, reason)
+       ),
+
+       TP_fast_assign(
+               tp_strcpy(reason, reason)
+       ),
+
+       TP_printk("%s", __get_str(reason))
+)
+#endif
+
+/*
+ * Tracepoint for the registration of a single RCU callback function.
+ * The first argument is the type of RCU, the second argument is
+ * a pointer to the RCU callback itself, the third element is the
+ * number of lazy callbacks queued, and the fourth element is the
+ * total number of callbacks queued.
+ */
+TRACE_EVENT(rcu_callback,
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
+       TP_PROTO(char *rcuname, struct rcu_head *rhp, long qlen_lazy,
+                long qlen),
+
+       TP_ARGS(rcuname, rhp, qlen_lazy, qlen),
+#else
+       TP_PROTO(char *rcuname, struct rcu_head *rhp, long qlen),
+
+       TP_ARGS(rcuname, rhp, qlen),
+#endif
+
+       TP_STRUCT__entry(
+               __string(rcuname, rcuname)
+               __field(void *, rhp)
+               __field(void *, func)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
+               __field(long, qlen_lazy)
+#endif
+               __field(long, qlen)
+       ),
+
+       TP_fast_assign(
+               tp_strcpy(rcuname, rcuname)
+               tp_assign(rhp, rhp)
+               tp_assign(func, rhp->func)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
+               tp_assign(qlen_lazy, qlen_lazy)
+#endif
+               tp_assign(qlen, qlen)
+       ),
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
+       TP_printk("%s rhp=%p func=%pf %ld/%ld",
+                 __get_str(rcuname), __entry->rhp, __entry->func,
+                 __entry->qlen_lazy, __entry->qlen)
+#else
+       TP_printk("%s rhp=%p func=%pf %ld",
+                 __get_str(rcuname), __entry->rhp, __entry->func,
+                 __entry->qlen)
+#endif
+)
+
+/*
+ * Tracepoint for the registration of a single RCU callback of the special
+ * kfree() form.  The first argument is the RCU type, the second argument
+ * is a pointer to the RCU callback, the third argument is the offset
+ * of the callback within the enclosing RCU-protected data structure,
+ * the fourth argument is the number of lazy callbacks queued, and the
+ * fifth argument is the total number of callbacks queued.
+ */
+TRACE_EVENT(rcu_kfree_callback,
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
+       TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset,
+                long qlen_lazy, long qlen),
+
+       TP_ARGS(rcuname, rhp, offset, qlen_lazy, qlen),
+#else
+       TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset,
+                long qlen),
+
+       TP_ARGS(rcuname, rhp, offset, qlen),
+#endif
+
+       TP_STRUCT__entry(
+               __string(rcuname, rcuname)
+               __field(void *, rhp)
+               __field(unsigned long, offset)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
+               __field(long, qlen_lazy)
+#endif
+               __field(long, qlen)
+       ),
+
+       TP_fast_assign(
+               tp_strcpy(rcuname, rcuname)
+               tp_assign(rhp, rhp)
+               tp_assign(offset, offset)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
+               tp_assign(qlen_lazy, qlen_lazy)
+#endif
+               tp_assign(qlen, qlen)
+       ),
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
+       TP_printk("%s rhp=%p func=%ld %ld/%ld",
+                 __get_str(rcuname), __entry->rhp, __entry->offset,
+                 __entry->qlen_lazy, __entry->qlen)
+#else
+       TP_printk("%s rhp=%p func=%ld %ld",
+                 __get_str(rcuname), __entry->rhp, __entry->offset,
+                 __entry->qlen)
+#endif
+)
+
+/*
+ * Tracepoint for marking the beginning rcu_do_batch, performed to start
+ * RCU callback invocation.  The first argument is the RCU flavor,
+ * the second is the number of lazy callbacks queued, the third is
+ * the total number of callbacks queued, and the fourth argument is
+ * the current RCU-callback batch limit.
+ */
+TRACE_EVENT(rcu_batch_start,
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
+       TP_PROTO(char *rcuname, long qlen_lazy, long qlen, int blimit),
+
+       TP_ARGS(rcuname, qlen_lazy, qlen, blimit),
+#else
+       TP_PROTO(char *rcuname, long qlen, int blimit),
+
+       TP_ARGS(rcuname, qlen, blimit),
+#endif
+
+       TP_STRUCT__entry(
+               __string(rcuname, rcuname)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
+               __field(long, qlen_lazy)
+#endif
+               __field(long, qlen)
+               __field(int, blimit)
+       ),
+
+       TP_fast_assign(
+               tp_strcpy(rcuname, rcuname)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
+               tp_assign(qlen_lazy, qlen_lazy)
+#endif
+               tp_assign(qlen, qlen)
+               tp_assign(blimit, blimit)
+       ),
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
+       TP_printk("%s CBs=%ld/%ld bl=%d",
+                 __get_str(rcuname), __entry->qlen_lazy, __entry->qlen,
+                 __entry->blimit)
+#else
+       TP_printk("%s CBs=%ld bl=%d",
+                 __get_str(rcuname), __entry->qlen, __entry->blimit)
+#endif
+)
+
+/*
+ * Tracepoint for the invocation of a single RCU callback function.
+ * The first argument is the type of RCU, and the second argument is
+ * a pointer to the RCU callback itself.
+ */
+TRACE_EVENT(rcu_invoke_callback,
+
+       TP_PROTO(char *rcuname, struct rcu_head *rhp),
+
+       TP_ARGS(rcuname, rhp),
+
+       TP_STRUCT__entry(
+               __string(rcuname, rcuname)
+               __field(void *, rhp)
+               __field(void *, func)
+       ),
+
+       TP_fast_assign(
+               tp_strcpy(rcuname, rcuname)
+               tp_assign(rhp, rhp)
+               tp_assign(func, rhp->func)
+       ),
+
+       TP_printk("%s rhp=%p func=%pf",
+                 __get_str(rcuname), __entry->rhp, __entry->func)
+)
+
+/*
+ * Tracepoint for the invocation of a single RCU callback of the special
+ * kfree() form.  The first argument is the RCU flavor, the second
+ * argument is a pointer to the RCU callback, and the third argument
+ * is the offset of the callback within the enclosing RCU-protected
+ * data structure.
+ */
+TRACE_EVENT(rcu_invoke_kfree_callback,
+
+       TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset),
+
+       TP_ARGS(rcuname, rhp, offset),
+
+       TP_STRUCT__entry(
+               __string(rcuname, rcuname)
+               __field(void *, rhp)
+               __field(unsigned long, offset)
+       ),
+
+       TP_fast_assign(
+               tp_strcpy(rcuname, rcuname)
+               tp_assign(rhp, rhp)
+               tp_assign(offset, offset)
+       ),
+
+       TP_printk("%s rhp=%p func=%ld",
+                 __get_str(rcuname), __entry->rhp, __entry->offset)
+)
+
+/*
+ * Tracepoint for exiting rcu_do_batch after RCU callbacks have been
+ * invoked.  The first argument is the name of the RCU flavor,
+ * the second argument is number of callbacks actually invoked,
+ * the third argument (cb) is whether or not any of the callbacks that
+ * were ready to invoke at the beginning of this batch are still
+ * queued, the fourth argument (nr) is the return value of need_resched(),
+ * the fifth argument (iit) is 1 if the current task is the idle task,
+ * and the sixth argument (risk) is the return value from
+ * rcu_is_callbacks_kthread().
+ */
+TRACE_EVENT(rcu_batch_end,
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
+       TP_PROTO(char *rcuname, int callbacks_invoked,
+                bool cb, bool nr, bool iit, bool risk),
+
+       TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk),
+#else
+       TP_PROTO(char *rcuname, int callbacks_invoked),
+
+       TP_ARGS(rcuname, callbacks_invoked),
+#endif
+
+       TP_STRUCT__entry(
+               __string(rcuname, rcuname)
+               __field(int, callbacks_invoked)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
+               __field(bool, cb)
+               __field(bool, nr)
+               __field(bool, iit)
+               __field(bool, risk)
+#endif
+       ),
+
+       TP_fast_assign(
+               tp_strcpy(rcuname, rcuname)
+               tp_assign(callbacks_invoked, callbacks_invoked)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
+               tp_assign(cb, cb)
+               tp_assign(nr, nr)
+               tp_assign(iit, iit)
+               tp_assign(risk, risk)
+#endif
+       ),
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
+       TP_printk("%s CBs-invoked=%d idle=%c%c%c%c",
+                 __get_str(rcuname), __entry->callbacks_invoked,
+                 __entry->cb ? 'C' : '.',
+                 __entry->nr ? 'S' : '.',
+                 __entry->iit ? 'I' : '.',
+                 __entry->risk ? 'R' : '.')
+#else
+       TP_printk("%s CBs-invoked=%d",
+                 __get_str(rcuname), __entry->callbacks_invoked)
+#endif
+)
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
+/*
+ * Tracepoint for rcutorture readers.  The first argument is the name
+ * of the RCU flavor from rcutorture's viewpoint and the second argument
+ * is the callback address.
+ */
+TRACE_EVENT(rcu_torture_read,
+
+       TP_PROTO(char *rcutorturename, struct rcu_head *rhp),
+
+       TP_ARGS(rcutorturename, rhp),
+
+       TP_STRUCT__entry(
+               __string(rcutorturename, rcutorturename)
+               __field(struct rcu_head *, rhp)
+       ),
+
+       TP_fast_assign(
+               tp_strcpy(rcutorturename, rcutorturename)
+               tp_assign(rhp, rhp)
+       ),
+
+       TP_printk("%s torture read %p",
+                 __get_str(rcutorturename), __entry->rhp)
+)
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
+/*
+ * Tracepoint for _rcu_barrier() execution.  The string "s" describes
+ * the _rcu_barrier phase:
+ *     "Begin": rcu_barrier_callback() started.
+ *     "Check": rcu_barrier_callback() checking for piggybacking.
+ *     "EarlyExit": rcu_barrier_callback() piggybacked, thus early exit.
+ *     "Inc1": rcu_barrier_callback() piggyback check counter incremented.
+ *     "Offline": rcu_barrier_callback() found offline CPU
+ *     "OnlineQ": rcu_barrier_callback() found online CPU with callbacks.
+ *     "OnlineNQ": rcu_barrier_callback() found online CPU, no callbacks.
+ *     "IRQ": An rcu_barrier_callback() callback posted on remote CPU.
+ *     "CB": An rcu_barrier_callback() invoked a callback, not the last.
+ *     "LastCB": An rcu_barrier_callback() invoked the last callback.
+ *     "Inc2": rcu_barrier_callback() piggyback check counter incremented.
+ * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
+ * is the count of remaining callbacks, and "done" is the piggybacking count.
+ */
+TRACE_EVENT(rcu_barrier,
+
+       TP_PROTO(char *rcuname, char *s, int cpu, int cnt, unsigned long done),
+
+       TP_ARGS(rcuname, s, cpu, cnt, done),
+
+       TP_STRUCT__entry(
+               __string(rcuname, rcuname)
+               __string(s, s)
+               __field(int, cpu)
+               __field(int, cnt)
+               __field(unsigned long, done)
+       ),
+
+       TP_fast_assign(
+               tp_strcpy(rcuname, rcuname)
+               tp_strcpy(s, s)
+               tp_assign(cpu, cpu)
+               tp_assign(cnt, cnt)
+               tp_assign(done, done)
+       ),
+
+       TP_printk("%s %s cpu %d remaining %d # %lu",
+                 __get_str(rcuname), __get_str(s), __entry->cpu, __entry->cnt,
+                 __entry->done)
+)
+#endif
+
+#else /* #ifdef CONFIG_RCU_TRACE */
+
+#define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
+#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \
+                                   qsmask) do { } while (0)
+#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
+#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
+#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \
+                                        grplo, grphi, gp_tasks) do { } \
+       while (0)
+#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
+#define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0)
+#else
+#define trace_rcu_dyntick(polarity) do { } while (0)
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
+#define trace_rcu_prep_idle(reason) do { } while (0)
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
+#define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
+#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
+       do { } while (0)
+#define trace_rcu_batch_start(rcuname, qlen_lazy, qlen, blimit) \
+       do { } while (0)
+#else
+#define trace_rcu_callback(rcuname, rhp, qlen) do { } while (0)
+#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen) do { } while (0)
+#define trace_rcu_batch_start(rcuname, qlen, blimit) do { } while (0)
+#endif
+#define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0)
+#define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
+#define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \
+       do { } while (0)
+#else
+#define trace_rcu_batch_end(rcuname, callbacks_invoked) do { } while (0)
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
+#define trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
+#define trace_rcu_barrier(name, s, cpu, cnt, done) do { } while (0)
+#endif
+#endif /* #else #ifdef CONFIG_RCU_TRACE */
+
+#endif /* _TRACE_RCU_H */
+
+/* This part must be outside protection */
+#include "../../../probes/define_trace.h"
diff --git a/instrumentation/events/lttng-module/regmap.h b/instrumentation/events/lttng-module/regmap.h
new file mode 100644 (file)
index 0000000..df56ac0
--- /dev/null
@@ -0,0 +1,188 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM regmap
+
+#if !defined(_TRACE_REGMAP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_REGMAP_H
+
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+#include <linux/version.h>
+
+#ifndef _TRACE_REGMAP_DEF_
+#define _TRACE_REGMAP_DEF_
+struct device;
+struct regmap;
+#endif
+
+/*
+ * Log register events
+ */
+DECLARE_EVENT_CLASS(regmap_reg,
+
+       TP_PROTO(struct device *dev, unsigned int reg,
+                unsigned int val),
+
+       TP_ARGS(dev, reg, val),
+
+       TP_STRUCT__entry(
+               __string(       name,           dev_name(dev)   )
+               __field(        unsigned int,   reg             )
+               __field(        unsigned int,   val             )
+       ),
+
+       TP_fast_assign(
+               tp_strcpy(name, dev_name(dev))
+               tp_assign(reg, reg)
+               tp_assign(val, val)
+       ),
+
+       TP_printk("%s reg=%x val=%x", __get_str(name),
+                 (unsigned int)__entry->reg,
+                 (unsigned int)__entry->val)
+)
+
+DEFINE_EVENT(regmap_reg, regmap_reg_write,
+
+       TP_PROTO(struct device *dev, unsigned int reg,
+                unsigned int val),
+
+       TP_ARGS(dev, reg, val)
+
+)
+
+DEFINE_EVENT(regmap_reg, regmap_reg_read,
+
+       TP_PROTO(struct device *dev, unsigned int reg,
+                unsigned int val),
+
+       TP_ARGS(dev, reg, val)
+
+)
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
+DEFINE_EVENT(regmap_reg, regmap_reg_read_cache,
+
+       TP_PROTO(struct device *dev, unsigned int reg,
+                unsigned int val),
+
+       TP_ARGS(dev, reg, val)
+
+)
+#endif
+
+DECLARE_EVENT_CLASS(regmap_block,
+
+       TP_PROTO(struct device *dev, unsigned int reg, int count),
+
+       TP_ARGS(dev, reg, count),
+
+       TP_STRUCT__entry(
+               __string(       name,           dev_name(dev)   )
+               __field(        unsigned int,   reg             )
+               __field(        int,            count           )
+       ),
+
+       TP_fast_assign(
+               tp_strcpy(name, dev_name(dev))
+               tp_assign(reg, reg)
+               tp_assign(count, count)
+       ),
+
+       TP_printk("%s reg=%x count=%d", __get_str(name),
+                 (unsigned int)__entry->reg,
+                 (int)__entry->count)
+)
+
+DEFINE_EVENT(regmap_block, regmap_hw_read_start,
+
+       TP_PROTO(struct device *dev, unsigned int reg, int count),
+
+       TP_ARGS(dev, reg, count)
+)
+
+DEFINE_EVENT(regmap_block, regmap_hw_read_done,
+
+       TP_PROTO(struct device *dev, unsigned int reg, int count),
+
+       TP_ARGS(dev, reg, count)
+)
+
+DEFINE_EVENT(regmap_block, regmap_hw_write_start,
+
+       TP_PROTO(struct device *dev, unsigned int reg, int count),
+
+       TP_ARGS(dev, reg, count)
+)
+
+DEFINE_EVENT(regmap_block, regmap_hw_write_done,
+
+       TP_PROTO(struct device *dev, unsigned int reg, int count),
+
+       TP_ARGS(dev, reg, count)
+)
+
+TRACE_EVENT(regcache_sync,
+
+       TP_PROTO(struct device *dev, const char *type,
+                const char *status),
+
+       TP_ARGS(dev, type, status),
+
+       TP_STRUCT__entry(
+               __string(       name,           dev_name(dev)   )
+               __string(       status,         status          )
+               __string(       type,           type            )
+       ),
+
+       TP_fast_assign(
+               tp_strcpy(name, dev_name(dev))
+               tp_strcpy(status, status)
+               tp_strcpy(type, type)
+       ),
+
+       TP_printk("%s type=%s status=%s", __get_str(name),
+                 __get_str(type), __get_str(status))
+)
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
+DECLARE_EVENT_CLASS(regmap_bool,
+
+       TP_PROTO(struct device *dev, bool flag),
+
+       TP_ARGS(dev, flag),
+
+       TP_STRUCT__entry(
+               __string(       name,           dev_name(dev)   )
+               __field(        int,            flag            )
+       ),
+
+       TP_fast_assign(
+               tp_strcpy(name, dev_name(dev))
+               tp_assign(flag, flag)
+       ),
+
+       TP_printk("%s flag=%d", __get_str(name),
+                 (int)__entry->flag)
+)
+
+DEFINE_EVENT(regmap_bool, regmap_cache_only,
+
+       TP_PROTO(struct device *dev, bool flag),
+
+       TP_ARGS(dev, flag)
+
+)
+
+DEFINE_EVENT(regmap_bool, regmap_cache_bypass,
+
+       TP_PROTO(struct device *dev, bool flag),
+
+       TP_ARGS(dev, flag)
+
+)
+#endif
+
+#endif /* _TRACE_REGMAP_H */
+
+/* This part must be outside protection */
+#include "../../../probes/define_trace.h"
diff --git a/instrumentation/events/lttng-module/rpm.h b/instrumentation/events/lttng-module/rpm.h
new file mode 100644 (file)
index 0000000..16bcad0
--- /dev/null
@@ -0,0 +1,101 @@
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rpm
+
+#if !defined(_TRACE_RUNTIME_POWER_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RUNTIME_POWER_H
+
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+
+#ifndef _TRACE_RPM_DEF_
+#define _TRACE_RPM_DEF_
+struct device;
+#endif
+
+/*
+ * The rpm_internal events are used for tracing some important
+ * runtime pm internal functions.
+ */
+DECLARE_EVENT_CLASS(rpm_internal,
+
+       TP_PROTO(struct device *dev, int flags),
+
+       TP_ARGS(dev, flags),
+
+       TP_STRUCT__entry(
+               __string(       name,           dev_name(dev)   )
+               __field(        int,            flags           )
+               __field(        int ,           usage_count     )
+               __field(        int ,           disable_depth   )
+               __field(        int ,           runtime_auto    )
+               __field(        int ,           request_pending )
+               __field(        int ,           irq_safe        )
+               __field(        int ,           child_count     )
+       ),
+
+       TP_fast_assign(
+               tp_strcpy(name, dev_name(dev))
+               tp_assign(flags, flags)
+               tp_assign(usage_count, atomic_read(&dev->power.usage_count))
+               tp_assign(disable_depth, dev->power.disable_depth)
+               tp_assign(runtime_auto, dev->power.runtime_auto)
+               tp_assign(request_pending, dev->power.request_pending)
+               tp_assign(irq_safe, dev->power.irq_safe)
+               tp_assign(child_count, atomic_read(&dev->power.child_count))
+       ),
+
+       TP_printk("%s flags-%x cnt-%-2d dep-%-2d auto-%-1d p-%-1d"
+                       " irq-%-1d child-%d",
+                       __get_str(name), __entry->flags,
+                       __entry->usage_count,
+                       __entry->disable_depth,
+                       __entry->runtime_auto,
+                       __entry->request_pending,
+                       __entry->irq_safe,
+                       __entry->child_count
+                )
+)
+DEFINE_EVENT(rpm_internal, rpm_suspend,
+
+       TP_PROTO(struct device *dev, int flags),
+
+       TP_ARGS(dev, flags)
+)
+DEFINE_EVENT(rpm_internal, rpm_resume,
+
+       TP_PROTO(struct device *dev, int flags),
+
+       TP_ARGS(dev, flags)
+)
+DEFINE_EVENT(rpm_internal, rpm_idle,
+
+       TP_PROTO(struct device *dev, int flags),
+
+       TP_ARGS(dev, flags)
+)
+
+TRACE_EVENT(rpm_return_int,
+       TP_PROTO(struct device *dev, unsigned long ip, int ret),
+       TP_ARGS(dev, ip, ret),
+
+       TP_STRUCT__entry(
+               __string(       name,           dev_name(dev))
+               __field(        unsigned long,          ip      )
+               __field(        int,                    ret     )
+       ),
+
+       TP_fast_assign(
+               tp_strcpy(name, dev_name(dev))
+               tp_assign(ip, ip)
+               tp_assign(ret, ret)
+       ),
+
+       TP_printk("%pS:%s ret=%d", (void *)__entry->ip, __get_str(name),
+               __entry->ret)
+)
+
+#endif /* _TRACE_RUNTIME_POWER_H */
+
+/* This part must be outside protection */
+#include "../../../probes/define_trace.h"
diff --git a/instrumentation/events/lttng-module/sunrpc.h b/instrumentation/events/lttng-module/sunrpc.h
new file mode 100644 (file)
index 0000000..6320668
--- /dev/null
@@ -0,0 +1,177 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sunrpc
+
+#if !defined(_TRACE_SUNRPC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SUNRPC_H
+
+#include <linux/sunrpc/sched.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(rpc_task_status,
+
+       TP_PROTO(struct rpc_task *task),
+
+       TP_ARGS(task),
+
+       TP_STRUCT__entry(
+               __field(const struct rpc_task *, task)
+               __field(const struct rpc_clnt *, clnt)
+               __field(int, status)
+       ),
+
+       TP_fast_assign(
+               tp_assign(task, task)
+               tp_assign(clnt, task->tk_client)
+               tp_assign(status, task->tk_status)
+       ),
+
+       TP_printk("task:%p@%p, status %d",__entry->task, __entry->clnt, __entry->status)
+)
+
+DEFINE_EVENT(rpc_task_status, rpc_call_status,
+       TP_PROTO(struct rpc_task *task),
+
+       TP_ARGS(task)
+)
+
+DEFINE_EVENT(rpc_task_status, rpc_bind_status,
+       TP_PROTO(struct rpc_task *task),
+
+       TP_ARGS(task)
+)
+
+TRACE_EVENT(rpc_connect_status,
+       TP_PROTO(struct rpc_task *task, int status),
+
+       TP_ARGS(task, status),
+
+       TP_STRUCT__entry(
+               __field(const struct rpc_task *, task)
+               __field(const struct rpc_clnt *, clnt)
+               __field(int, status)
+       ),
+
+       TP_fast_assign(
+               tp_assign(task, task)
+               tp_assign(clnt, task->tk_client)
+               tp_assign(status, status)
+       ),
+
+       TP_printk("task:%p@%p, status %d",__entry->task, __entry->clnt, __entry->status)
+)
+
+DECLARE_EVENT_CLASS(rpc_task_running,
+
+       TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action),
+
+       TP_ARGS(clnt, task, action),
+
+       TP_STRUCT__entry(
+               __field(const struct rpc_clnt *, clnt)
+               __field(const struct rpc_task *, task)
+               __field(const void *, action)
+               __field(unsigned long, runstate)
+               __field(int, status)
+               __field(unsigned short, flags)
+               ),
+
+       TP_fast_assign(
+               tp_assign(clnt, clnt)
+               tp_assign(task, task)
+               tp_assign(action, action)
+               tp_assign(runstate, task->tk_runstate)
+               tp_assign(status, task->tk_status)
+               tp_assign(flags, task->tk_flags)
+               ),
+
+       TP_printk("task:%p@%p flags=%4.4x state=%4.4lx status=%d action=%pf",
+               __entry->task,
+               __entry->clnt,
+               __entry->flags,
+               __entry->runstate,
+               __entry->status,
+               __entry->action
+               )
+)
+
+DEFINE_EVENT(rpc_task_running, rpc_task_begin,
+
+       TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action),
+
+       TP_ARGS(clnt, task, action)
+
+)
+
+DEFINE_EVENT(rpc_task_running, rpc_task_run_action,
+
+       TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action),
+
+       TP_ARGS(clnt, task, action)
+
+)
+
+DEFINE_EVENT(rpc_task_running, rpc_task_complete,
+
+       TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action),
+
+       TP_ARGS(clnt, task, action)
+
+)
+
+DECLARE_EVENT_CLASS(rpc_task_queued,
+
+       TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const struct rpc_wait_queue *q),
+
+       TP_ARGS(clnt, task, q),
+
+       TP_STRUCT__entry(
+               __field(const struct rpc_clnt *, clnt)
+               __field(const struct rpc_task *, task)
+               __field(unsigned long, timeout)
+               __field(unsigned long, runstate)
+               __field(int, status)
+               __field(unsigned short, flags)
+               __string(q_name, rpc_qname(q))
+               ),
+
+       TP_fast_assign(
+               tp_assign(clnt, clnt)
+               tp_assign(task, task)
+               tp_assign(timeout, task->tk_timeout)
+               tp_assign(runstate, task->tk_runstate)
+               tp_assign(status, task->tk_status)
+               tp_assign(flags, task->tk_flags)
+               tp_strcpy(q_name, rpc_qname(q))
+               ),
+
+       TP_printk("task:%p@%p flags=%4.4x state=%4.4lx status=%d timeout=%lu queue=%s",
+               __entry->task,
+               __entry->clnt,
+               __entry->flags,
+               __entry->runstate,
+               __entry->status,
+               __entry->timeout,
+               __get_str(q_name)
+               )
+)
+
+DEFINE_EVENT(rpc_task_queued, rpc_task_sleep,
+
+       TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const struct rpc_wait_queue *q),
+
+       TP_ARGS(clnt, task, q)
+
+)
+
+DEFINE_EVENT(rpc_task_queued, rpc_task_wakeup,
+
+       TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const struct rpc_wait_queue *q),
+
+       TP_ARGS(clnt, task, q)
+
+)
+
+#endif /* _TRACE_SUNRPC_H */
+
+#include "../../../probes/define_trace.h"
diff --git a/instrumentation/events/lttng-module/workqueue.h b/instrumentation/events/lttng-module/workqueue.h
new file mode 100644 (file)
index 0000000..02115c7
--- /dev/null
@@ -0,0 +1,316 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM workqueue
+
+#if !defined(_TRACE_WORKQUEUE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_WORKQUEUE_H
+
+#include <linux/tracepoint.h>
+#include <linux/workqueue.h>
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
+
+#ifndef _TRACE_WORKQUEUE_DEF_
+#define _TRACE_WORKQUEUE_DEF_
+
+struct worker;
+struct global_cwq;
+
+/* Have to duplicate all these definitions from kernel/workqueue.c */
+/* Extracts only */
+
+enum {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
+       NR_WORKER_POOLS         = 2,            /* # worker pools per gcwq */
+#endif
+       BUSY_WORKER_HASH_ORDER  = 6,            /* 64 pointers */
+       BUSY_WORKER_HASH_SIZE   = 1 << BUSY_WORKER_HASH_ORDER,
+};
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
+struct worker_pool {
+       struct global_cwq       *gcwq;          /* I: the owning gcwq */
+       unsigned int            flags;          /* X: flags */
+
+       struct list_head        worklist;       /* L: list of pending works */
+       int                     nr_workers;     /* L: total number of workers */
+       int                     nr_idle;        /* L: currently idle ones */
+
+       struct list_head        idle_list;      /* X: list of idle workers */
+       struct timer_list       idle_timer;     /* L: worker idle timeout */
+       struct timer_list       mayday_timer;   /* L: SOS timer for workers */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+       struct mutex            assoc_mutex;    /* protect GCWQ_DISASSOCUATED */
+#else
+       struct mutex            manager_mutex;  /* mutex manager should hold */
+#endif
+       struct ida              worker_ida;     /* L: for worker IDs */
+};
+
+struct global_cwq {
+       spinlock_t              lock;           /* the gcwq lock */
+       unsigned int            cpu;            /* I: the associated cpu */
+       unsigned int            flags;          /* L: GCWQ_* flags */
+
+       /* workers are chained either in busy_hash or pool idle_list */
+       struct hlist_head       busy_hash[BUSY_WORKER_HASH_SIZE];
+                                               /* L: hash of busy workers */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+       struct worker_pool      pools[NR_WORKER_POOLS];
+                                               /* normal and highpri pools */
+#else
+       struct worker_pool      pools[2];       /* normal and highpri pools */
+
+       wait_queue_head_t       rebind_hold;    /* rebind hold wait */
+#endif
+} ____cacheline_aligned_in_smp;
+
+#else
+
+struct global_cwq {
+       spinlock_t              lock;           /* the gcwq lock */
+       struct list_head        worklist;       /* L: list of pending works */
+       unsigned int            cpu;            /* I: the associated cpu */
+       unsigned int            flags;          /* L: GCWQ_* flags */
+
+       int                     nr_workers;     /* L: total number of workers */
+       int                     nr_idle;        /* L: currently idle ones */
+
+       /* workers are chained either in the idle_list or busy_hash */
+       struct list_head        idle_list;      /* X: list of idle workers */
+       struct hlist_head       busy_hash[BUSY_WORKER_HASH_SIZE];
+                                               /* L: hash of busy workers */
+
+       struct timer_list       idle_timer;     /* L: worker idle timeout */
+       struct timer_list       mayday_timer;   /* L: SOS timer for dworkers */
+
+       struct ida              worker_ida;     /* L: for worker IDs */
+
+       struct task_struct      *trustee;       /* L: for gcwq shutdown */
+       unsigned int            trustee_state;  /* L: trustee state */
+       wait_queue_head_t       trustee_wait;   /* trustee wait */
+       struct worker           *first_idle;    /* L: first idle worker */
+} ____cacheline_aligned_in_smp;
+
+#endif
+
+struct cpu_workqueue_struct {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
+       struct worker_pool      *pool;          /* I: The associated pool */
+#else
+       struct global_cwq       *gcwq;          /* I: the associated gcwq */
+#endif
+       struct workqueue_struct *wq;            /* I: the owning workqueue */
+       int                     work_color;     /* L: current color */
+       int                     flush_color;    /* L: flushing color */
+       int                     nr_in_flight[WORK_NR_COLORS];
+                                               /* L: nr of in_flight works */
+       int                     nr_active;      /* L: nr of active works */
+       int                     max_active;     /* L: max active works */
+       struct list_head        delayed_works;  /* L: delayed works */
+};
+
+#endif
+
+DECLARE_EVENT_CLASS(workqueue_work,
+
+       TP_PROTO(struct work_struct *work),
+
+       TP_ARGS(work),
+
+       TP_STRUCT__entry(
+               __field( void *,        work    )
+       ),
+
+       TP_fast_assign(
+               tp_assign(work, work)
+       ),
+
+       TP_printk("work struct %p", __entry->work)
+)
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+/**
+ * workqueue_queue_work - called when a work gets queued
+ * @req_cpu:   the requested cpu
+ * @cwq:       pointer to struct cpu_workqueue_struct
+ * @work:      pointer to struct work_struct
+ *
+ * This event occurs when a work is queued immediately or once a
+ * delayed work is actually queued on a workqueue (ie: once the delay
+ * has been reached).
+ */
+TRACE_EVENT(workqueue_queue_work,
+
+       TP_PROTO(unsigned int req_cpu, struct cpu_workqueue_struct *cwq,
+                struct work_struct *work),
+
+       TP_ARGS(req_cpu, cwq, work),
+
+       TP_STRUCT__entry(
+               __field( void *,        work    )
+               __field( void *,        function)
+               __field( void *,        workqueue)
+               __field( unsigned int,  req_cpu )
+               __field( unsigned int,  cpu     )
+       ),
+
+       TP_fast_assign(
+               tp_assign(work, work)
+               tp_assign(function, work->func)
+               tp_assign(workqueue, cwq->wq)
+               tp_assign(req_cpu, req_cpu)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
+               tp_assign(cpu, cwq->pool->gcwq->cpu)
+#else
+               tp_assign(cpu, cwq->gcwq->cpu)
+#endif
+       ),
+
+       TP_printk("work struct=%p function=%pf workqueue=%p req_cpu=%u cpu=%u",
+                 __entry->work, __entry->function, __entry->workqueue,
+                 __entry->req_cpu, __entry->cpu)
+)
+
+/**
+ * workqueue_activate_work - called when a work gets activated
+ * @work:      pointer to struct work_struct
+ *
+ * This event occurs when a queued work is put on the active queue,
+ * which happens immediately after queueing unless @max_active limit
+ * is reached.
+ */
+DEFINE_EVENT(workqueue_work, workqueue_activate_work,
+
+       TP_PROTO(struct work_struct *work),
+
+       TP_ARGS(work)
+)
+#endif
+
+/**
+ * workqueue_execute_start - called immediately before the workqueue callback
+ * @work:      pointer to struct work_struct
+ *
+ * Allows to track workqueue execution.
+ */
+TRACE_EVENT(workqueue_execute_start,
+
+       TP_PROTO(struct work_struct *work),
+
+       TP_ARGS(work),
+
+       TP_STRUCT__entry(
+               __field( void *,        work    )
+               __field( void *,        function)
+       ),
+
+       TP_fast_assign(
+               tp_assign(work, work)
+               tp_assign(function, work->func)
+       ),
+
+       TP_printk("work struct %p: function %pf", __entry->work, __entry->function)
+)
+
+/**
+ * workqueue_execute_end - called immediately after the workqueue callback
+ * @work:      pointer to struct work_struct
+ *
+ * Allows to track workqueue execution.
+ */
+DEFINE_EVENT(workqueue_work, workqueue_execute_end,
+
+       TP_PROTO(struct work_struct *work),
+
+       TP_ARGS(work)
+)
+
+#else
+
+DECLARE_EVENT_CLASS(workqueue,
+
+       TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
+
+       TP_ARGS(wq_thread, work),
+
+       TP_STRUCT__entry(
+               __array(char,           thread_comm,    TASK_COMM_LEN)
+               __field(pid_t,          thread_pid)
+               __field(work_func_t,    func)
+       ),
+
+       TP_fast_assign(
+               tp_memcpy(thread_comm, wq_thread->comm, TASK_COMM_LEN)
+               tp_assign(thread_pid, wq_thread->pid)
+               tp_assign(func, work->func)
+       ),
+
+       TP_printk("thread=%s:%d func=%pf", __entry->thread_comm,
+               __entry->thread_pid, __entry->func)
+)
+
+DEFINE_EVENT(workqueue, workqueue_insertion,
+
+       TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
+
+       TP_ARGS(wq_thread, work)
+)
+
+DEFINE_EVENT(workqueue, workqueue_execution,
+
+       TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
+
+       TP_ARGS(wq_thread, work)
+)
+
+/* Trace the creation of one workqueue thread on a cpu */
+TRACE_EVENT(workqueue_creation,
+
+       TP_PROTO(struct task_struct *wq_thread, int cpu),
+
+       TP_ARGS(wq_thread, cpu),
+
+       TP_STRUCT__entry(
+               __array(char,   thread_comm,    TASK_COMM_LEN)
+               __field(pid_t,  thread_pid)
+               __field(int,    cpu)
+       ),
+
+       TP_fast_assign(
+               tp_memcpy(thread_comm, wq_thread->comm, TASK_COMM_LEN)
+               tp_assign(thread_pid, wq_thread->pid)
+               tp_assign(cpu, cpu)
+       ),
+
+       TP_printk("thread=%s:%d cpu=%d", __entry->thread_comm,
+               __entry->thread_pid, __entry->cpu)
+)
+
+TRACE_EVENT(workqueue_destruction,
+
+       TP_PROTO(struct task_struct *wq_thread),
+
+       TP_ARGS(wq_thread),
+
+       TP_STRUCT__entry(
+               __array(char,   thread_comm,    TASK_COMM_LEN)
+               __field(pid_t,  thread_pid)
+       ),
+
+       TP_fast_assign(
+               tp_memcpy(thread_comm, wq_thread->comm, TASK_COMM_LEN)
+               tp_assign(thread_pid, wq_thread->pid)
+       ),
+
+       TP_printk("thread=%s:%d", __entry->thread_comm, __entry->thread_pid)
+)
+
+#endif
+
+#endif /*  _TRACE_WORKQUEUE_H */
+
+/* This part must be outside protection */
+#include "../../../probes/define_trace.h"
diff --git a/instrumentation/events/lttng-module/writeback.h b/instrumentation/events/lttng-module/writeback.h
new file mode 100644 (file)
index 0000000..b3d5538
--- /dev/null
@@ -0,0 +1,673 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM writeback
+
+#if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_WRITEBACK_H
+
+#include <linux/backing-dev.h>
+#include <linux/writeback.h>
+#include <linux/version.h>
+
+#ifndef _TRACE_WRITEBACK_DEF_
+#define _TRACE_WRITEBACK_DEF_
+/* Have to duplicate it here from fs/fs-writeback.c */
+struct wb_writeback_work {
+       long nr_pages;
+       struct super_block *sb;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))
+       unsigned long *older_than_this;
+#endif
+       enum writeback_sync_modes sync_mode;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))
+       unsigned int tagged_writepages:1;
+#endif
+       unsigned int for_kupdate:1;
+       unsigned int range_cyclic:1;
+       unsigned int for_background:1;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+       enum wb_reason reason;          /* why was writeback initiated? */
+#endif
+
+       struct list_head list;          /* pending work list */
+       struct completion *done;        /* set if the caller waits */
+};
+static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
+{
+       struct super_block *sb = inode->i_sb;
+
+       if (strcmp(sb->s_type->name, "bdev") == 0)
+               return inode->i_mapping->backing_dev_info;
+
+       return sb->s_bdi;
+}
+#endif
+
+#define show_inode_state(state)                                        \
+       __print_flags(state, "|",                               \
+               {I_DIRTY_SYNC,          "I_DIRTY_SYNC"},        \
+               {I_DIRTY_DATASYNC,      "I_DIRTY_DATASYNC"},    \
+               {I_DIRTY_PAGES,         "I_DIRTY_PAGES"},       \
+               {I_NEW,                 "I_NEW"},               \
+               {I_WILL_FREE,           "I_WILL_FREE"},         \
+               {I_FREEING,             "I_FREEING"},           \
+               {I_CLEAR,               "I_CLEAR"},             \
+               {I_SYNC,                "I_SYNC"},              \
+               {I_REFERENCED,          "I_REFERENCED"}         \
+       )
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+#define WB_WORK_REASON                                                 \
+               {WB_REASON_BACKGROUND,          "background"},          \
+               {WB_REASON_TRY_TO_FREE_PAGES,   "try_to_free_pages"},   \
+               {WB_REASON_SYNC,                "sync"},                \
+               {WB_REASON_PERIODIC,            "periodic"},            \
+               {WB_REASON_LAPTOP_TIMER,        "laptop_timer"},        \
+               {WB_REASON_FREE_MORE_MEM,       "free_more_memory"},    \
+               {WB_REASON_FS_FREE_SPACE,       "fs_free_space"},       \
+               {WB_REASON_FORKER_THREAD,       "forker_thread"}
+#endif
+
+DECLARE_EVENT_CLASS(writeback_work_class,
+       TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work),
+       TP_ARGS(bdi, work),
+       TP_STRUCT__entry(
+               __array(char, name, 32)
+               __field(long, nr_pages)
+               __field(dev_t, sb_dev)
+               __field(int, sync_mode)
+               __field(int, for_kupdate)
+               __field(int, range_cyclic)
+               __field(int, for_background)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+               __field(int, reason)
+#endif
+       ),
+       TP_fast_assign(
+               tp_memcpy(name, dev_name(bdi->dev ? bdi->dev :
+                               default_backing_dev_info.dev), 32)
+               tp_assign(nr_pages, work->nr_pages)
+               tp_assign(sb_dev, work->sb ? work->sb->s_dev : 0)
+               tp_assign(sync_mode, work->sync_mode)
+               tp_assign(for_kupdate, work->for_kupdate)
+               tp_assign(range_cyclic, work->range_cyclic)
+               tp_assign(for_background, work->for_background)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+               tp_assign(reason, work->reason)
+#endif
+       ),
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+       TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
+                 "kupdate=%d range_cyclic=%d background=%d reason=%s",
+                 __entry->name,
+                 MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
+                 __entry->nr_pages,
+                 __entry->sync_mode,
+                 __entry->for_kupdate,
+                 __entry->range_cyclic,
+                 __entry->for_background,
+                 __print_symbolic(__entry->reason, WB_WORK_REASON)
+       )
+#else
+       TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
+                 "kupdate=%d range_cyclic=%d background=%d",
+                 __entry->name,
+                 MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
+                 __entry->nr_pages,
+                 __entry->sync_mode,
+                 __entry->for_kupdate,
+                 __entry->range_cyclic,
+                 __entry->for_background
+       )
+#endif
+)
+#define DEFINE_WRITEBACK_WORK_EVENT(name) \
+DEFINE_EVENT(writeback_work_class, name, \
+       TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), \
+       TP_ARGS(bdi, work))
+DEFINE_WRITEBACK_WORK_EVENT(writeback_nothread)
+DEFINE_WRITEBACK_WORK_EVENT(writeback_queue)
+DEFINE_WRITEBACK_WORK_EVENT(writeback_exec)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))
+DEFINE_WRITEBACK_WORK_EVENT(writeback_start)
+DEFINE_WRITEBACK_WORK_EVENT(writeback_written)
+DEFINE_WRITEBACK_WORK_EVENT(writeback_wait)
+#endif
+
+TRACE_EVENT(writeback_pages_written,
+       TP_PROTO(long pages_written),
+       TP_ARGS(pages_written),
+       TP_STRUCT__entry(
+               __field(long,           pages)
+       ),
+       TP_fast_assign(
+               tp_assign(pages, pages_written)
+       ),
+       TP_printk("%ld", __entry->pages)
+)
+
+DECLARE_EVENT_CLASS(writeback_class,
+       TP_PROTO(struct backing_dev_info *bdi),
+       TP_ARGS(bdi),
+       TP_STRUCT__entry(
+               __array(char, name, 32)
+       ),
+       TP_fast_assign(
+               tp_memcpy(name, dev_name(bdi->dev), 32)
+       ),
+       TP_printk("bdi %s",
+                 __entry->name
+       )
+)
+#define DEFINE_WRITEBACK_EVENT(name) \
+DEFINE_EVENT(writeback_class, name, \
+       TP_PROTO(struct backing_dev_info *bdi), \
+       TP_ARGS(bdi))
+
+DEFINE_WRITEBACK_EVENT(writeback_nowork)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38))
+DEFINE_WRITEBACK_EVENT(writeback_wake_background)
+#endif
+DEFINE_WRITEBACK_EVENT(writeback_wake_thread)
+DEFINE_WRITEBACK_EVENT(writeback_wake_forker_thread)
+DEFINE_WRITEBACK_EVENT(writeback_bdi_register)
+DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister)
+DEFINE_WRITEBACK_EVENT(writeback_thread_start)
+DEFINE_WRITEBACK_EVENT(writeback_thread_stop)
+#if (LTTNG_KERNEL_RANGE(3,1,0, 3,2,0))
+DEFINE_WRITEBACK_EVENT(balance_dirty_start)
+DEFINE_WRITEBACK_EVENT(balance_dirty_wait)
+
+TRACE_EVENT(balance_dirty_written,
+
+       TP_PROTO(struct backing_dev_info *bdi, int written),
+
+       TP_ARGS(bdi, written),
+
+       TP_STRUCT__entry(
+               __array(char,   name, 32)
+               __field(int,    written)
+       ),
+
+       TP_fast_assign(
+               tp_memcpy(name, dev_name(bdi->dev), 32)
+               tp_assign(written, written)
+       ),
+
+       TP_printk("bdi %s written %d",
+                 __entry->name,
+                 __entry->written
+       )
+)
+#endif
+
+DECLARE_EVENT_CLASS(wbc_class,
+       TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
+       TP_ARGS(wbc, bdi),
+       TP_STRUCT__entry(
+               __array(char, name, 32)
+               __field(long, nr_to_write)
+               __field(long, pages_skipped)
+               __field(int, sync_mode)
+               __field(int, for_kupdate)
+               __field(int, for_background)
+               __field(int, for_reclaim)
+               __field(int, range_cyclic)
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0))
+               __field(int, more_io)
+               __field(unsigned long, older_than_this)
+#endif
+               __field(long, range_start)
+               __field(long, range_end)
+       ),
+
+       TP_fast_assign(
+               tp_memcpy(name, dev_name(bdi->dev), 32)
+               tp_assign(nr_to_write, wbc->nr_to_write)
+               tp_assign(pages_skipped, wbc->pages_skipped)
+               tp_assign(sync_mode, wbc->sync_mode)
+               tp_assign(for_kupdate, wbc->for_kupdate)
+               tp_assign(for_background, wbc->for_background)
+               tp_assign(for_reclaim, wbc->for_reclaim)
+               tp_assign(range_cyclic, wbc->range_cyclic)
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0))
+               tp_assign(more_io, wbc->more_io)
+               tp_assign(older_than_this, wbc->older_than_this ?
+                                               *wbc->older_than_this : 0)
+#endif
+               tp_assign(range_start, (long)wbc->range_start)
+               tp_assign(range_end, (long)wbc->range_end)
+       ),
+
+       TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0))
+               "bgrd=%d reclm=%d cyclic=%d more=%d older=0x%lx "
+#else
+               "bgrd=%d reclm=%d cyclic=%d "
+#endif
+               "start=0x%lx end=0x%lx",
+               __entry->name,
+               __entry->nr_to_write,
+               __entry->pages_skipped,
+               __entry->sync_mode,
+               __entry->for_kupdate,
+               __entry->for_background,
+               __entry->for_reclaim,
+               __entry->range_cyclic,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0))
+               __entry->more_io,
+               __entry->older_than_this,
+#endif
+               __entry->range_start,
+               __entry->range_end)
+)
+
+#define DEFINE_WBC_EVENT(name) \
+DEFINE_EVENT(wbc_class, name, \
+       TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
+       TP_ARGS(wbc, bdi))
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0))
+DEFINE_WBC_EVENT(wbc_writeback_start)
+DEFINE_WBC_EVENT(wbc_writeback_written)
+DEFINE_WBC_EVENT(wbc_writeback_wait)
+DEFINE_WBC_EVENT(wbc_balance_dirty_start)
+DEFINE_WBC_EVENT(wbc_balance_dirty_written)
+DEFINE_WBC_EVENT(wbc_balance_dirty_wait)
+#endif
+DEFINE_WBC_EVENT(wbc_writepage)
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))
+TRACE_EVENT(writeback_queue_io,
+       TP_PROTO(struct bdi_writeback *wb,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+                struct wb_writeback_work *work,
+#else
+                unsigned long *older_than_this,
+#endif
+                int moved),
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+       TP_ARGS(wb, work, moved),
+#else
+       TP_ARGS(wb, older_than_this, moved),
+#endif
+       TP_STRUCT__entry(
+               __array(char,           name, 32)
+               __field(unsigned long,  older)
+               __field(long,           age)
+               __field(int,            moved)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+               __field(int,            reason)
+#endif
+       ),
+       TP_fast_assign(
+               tp_memcpy(name, dev_name(wb->bdi->dev), 32)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+               tp_assign(older,
+                       work->older_than_this ? *(work->older_than_this) : 0)
+               tp_assign(age, work->older_than_this ?
+                       (jiffies - *(work->older_than_this)) * 1000 / HZ : -1)
+#else
+               tp_assign(older, older_than_this ?  *older_than_this : 0)
+               tp_assign(age, older_than_this ?
+                       (jiffies - *older_than_this) * 1000 / HZ : -1)
+#endif
+               tp_assign(moved, moved)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+               tp_assign(reason, work->reason)
+#endif
+       ),
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+       TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s",
+               __entry->name,
+               __entry->older, /* older_than_this in jiffies */
+               __entry->age,   /* older_than_this in relative milliseconds */
+               __entry->moved,
+               __print_symbolic(__entry->reason, WB_WORK_REASON)
+       )
+#else
+       TP_printk("bdi %s: older=%lu age=%ld enqueue=%d",
+               __entry->name,
+               __entry->older, /* older_than_this in jiffies */
+               __entry->age,   /* older_than_this in relative milliseconds */
+               __entry->moved
+       )
+#endif
+)
+
+TRACE_EVENT(global_dirty_state,
+
+       TP_PROTO(unsigned long background_thresh,
+                unsigned long dirty_thresh
+       ),
+
+       TP_ARGS(background_thresh,
+               dirty_thresh
+       ),
+
+       TP_STRUCT__entry(
+               __field(unsigned long,  nr_dirty)
+               __field(unsigned long,  nr_writeback)
+               __field(unsigned long,  nr_unstable)
+               __field(unsigned long,  background_thresh)
+               __field(unsigned long,  dirty_thresh)
+               __field(unsigned long,  dirty_limit)
+               __field(unsigned long,  nr_dirtied)
+               __field(unsigned long,  nr_written)
+       ),
+
+       TP_fast_assign(
+               tp_assign(nr_dirty, global_page_state(NR_FILE_DIRTY))
+               tp_assign(nr_writeback, global_page_state(NR_WRITEBACK))
+               tp_assign(nr_unstable, global_page_state(NR_UNSTABLE_NFS))
+               tp_assign(nr_dirtied, global_page_state(NR_DIRTIED))
+               tp_assign(nr_written, global_page_state(NR_WRITTEN))
+               tp_assign(background_thresh, background_thresh)
+               tp_assign(dirty_thresh, dirty_thresh)
+               tp_assign(dirty_limit, global_dirty_limit)
+       ),
+
+       TP_printk("dirty=%lu writeback=%lu unstable=%lu "
+                 "bg_thresh=%lu thresh=%lu limit=%lu "
+                 "dirtied=%lu written=%lu",
+                 __entry->nr_dirty,
+                 __entry->nr_writeback,
+                 __entry->nr_unstable,
+                 __entry->background_thresh,
+                 __entry->dirty_thresh,
+                 __entry->dirty_limit,
+                 __entry->nr_dirtied,
+                 __entry->nr_written
+       )
+)
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+
+#define KBps(x)                        ((x) << (PAGE_SHIFT - 10))
+
+TRACE_EVENT(bdi_dirty_ratelimit,
+
+       TP_PROTO(struct backing_dev_info *bdi,
+                unsigned long dirty_rate,
+                unsigned long task_ratelimit),
+
+       TP_ARGS(bdi, dirty_rate, task_ratelimit),
+
+       TP_STRUCT__entry(
+               __array(char,           bdi, 32)
+               __field(unsigned long,  write_bw)
+               __field(unsigned long,  avg_write_bw)
+               __field(unsigned long,  dirty_rate)
+               __field(unsigned long,  dirty_ratelimit)
+               __field(unsigned long,  task_ratelimit)
+               __field(unsigned long,  balanced_dirty_ratelimit)
+       ),
+
+       TP_fast_assign(
+               tp_memcpy(bdi, dev_name(bdi->dev), 32)
+               tp_assign(write_bw, KBps(bdi->write_bandwidth))
+               tp_assign(avg_write_bw, KBps(bdi->avg_write_bandwidth))
+               tp_assign(dirty_rate, KBps(dirty_rate))
+               tp_assign(dirty_ratelimit, KBps(bdi->dirty_ratelimit))
+               tp_assign(task_ratelimit, KBps(task_ratelimit))
+               tp_assign(balanced_dirty_ratelimit,
+                                       KBps(bdi->balanced_dirty_ratelimit))
+       ),
+
+       TP_printk("bdi %s: "
+                 "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
+                 "dirty_ratelimit=%lu task_ratelimit=%lu "
+                 "balanced_dirty_ratelimit=%lu",
+                 __entry->bdi,
+                 __entry->write_bw,            /* write bandwidth */
+                 __entry->avg_write_bw,        /* avg write bandwidth */
+                 __entry->dirty_rate,          /* bdi dirty rate */
+                 __entry->dirty_ratelimit,     /* base ratelimit */
+                 __entry->task_ratelimit, /* ratelimit with position control */
+                 __entry->balanced_dirty_ratelimit /* the balanced ratelimit */
+       )
+)
+
+TRACE_EVENT(balance_dirty_pages,
+
+       TP_PROTO(struct backing_dev_info *bdi,
+                unsigned long thresh,
+                unsigned long bg_thresh,
+                unsigned long dirty,
+                unsigned long bdi_thresh,
+                unsigned long bdi_dirty,
+                unsigned long dirty_ratelimit,
+                unsigned long task_ratelimit,
+                unsigned long dirtied,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
+                unsigned long period,
+#endif
+                long pause,
+                unsigned long start_time),
+
+       TP_ARGS(bdi, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
+               dirty_ratelimit, task_ratelimit,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
+               dirtied, period, pause, start_time),
+#else
+               dirtied, pause, start_time),
+#endif
+       TP_STRUCT__entry(
+               __array(         char,  bdi, 32)
+               __field(unsigned long,  limit)
+               __field(unsigned long,  setpoint)
+               __field(unsigned long,  dirty)
+               __field(unsigned long,  bdi_setpoint)
+               __field(unsigned long,  bdi_dirty)
+               __field(unsigned long,  dirty_ratelimit)
+               __field(unsigned long,  task_ratelimit)
+               __field(unsigned int,   dirtied)
+               __field(unsigned int,   dirtied_pause)
+               __field(unsigned long,  paused)
+               __field(         long,  pause)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
+               __field(unsigned long,  period)
+               __field(         long,  think)
+#endif
+       ),
+
+       TP_fast_assign(
+               tp_memcpy(bdi, dev_name(bdi->dev), 32)
+               tp_assign(limit, global_dirty_limit)
+               tp_assign(setpoint,
+                       (global_dirty_limit + (thresh + bg_thresh) / 2) / 2)
+               tp_assign(dirty, dirty)
+               tp_assign(bdi_setpoint,
+                       ((global_dirty_limit + (thresh + bg_thresh) / 2) / 2) *
+                       bdi_thresh / (thresh + 1))
+               tp_assign(bdi_dirty, bdi_dirty)
+               tp_assign(dirty_ratelimit, KBps(dirty_ratelimit))
+               tp_assign(task_ratelimit, KBps(task_ratelimit))
+               tp_assign(dirtied, dirtied)
+               tp_assign(dirtied_pause, current->nr_dirtied_pause)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
+               tp_assign(think, current->dirty_paused_when == 0 ? 0 :
+                       (long)(jiffies - current->dirty_paused_when) * 1000/HZ)
+               tp_assign(period, period * 1000 / HZ)
+#endif
+               tp_assign(pause, pause * 1000 / HZ)
+               tp_assign(paused, (jiffies - start_time) * 1000 / HZ)
+       ),
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
+       TP_printk("bdi %s: "
+                 "limit=%lu setpoint=%lu dirty=%lu "
+                 "bdi_setpoint=%lu bdi_dirty=%lu "
+                 "dirty_ratelimit=%lu task_ratelimit=%lu "
+                 "dirtied=%u dirtied_pause=%u "
+                 "paused=%lu pause=%ld period=%lu think=%ld",
+                 __entry->bdi,
+                 __entry->limit,
+                 __entry->setpoint,
+                 __entry->dirty,
+                 __entry->bdi_setpoint,
+                 __entry->bdi_dirty,
+                 __entry->dirty_ratelimit,
+                 __entry->task_ratelimit,
+                 __entry->dirtied,
+                 __entry->dirtied_pause,
+                 __entry->paused,      /* ms */
+                 __entry->pause,       /* ms */
+                 __entry->period,      /* ms */
+                 __entry->think        /* ms */
+         )
+#else
+       TP_printk("bdi %s: "
+                 "limit=%lu setpoint=%lu dirty=%lu "
+                 "bdi_setpoint=%lu bdi_dirty=%lu "
+                 "dirty_ratelimit=%lu task_ratelimit=%lu "
+                 "dirtied=%u dirtied_pause=%u "
+                 "paused=%lu pause=%ld",
+                 __entry->bdi,
+                 __entry->limit,
+                 __entry->setpoint,
+                 __entry->dirty,
+                 __entry->bdi_setpoint,
+                 __entry->bdi_dirty,
+                 __entry->dirty_ratelimit,
+                 __entry->task_ratelimit,
+                 __entry->dirtied,
+                 __entry->dirtied_pause,
+                 __entry->paused,      /* ms */
+                 __entry->pause        /* ms */
+         )
+#endif
+)
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0))
+TRACE_EVENT(writeback_sb_inodes_requeue,
+
+       TP_PROTO(struct inode *inode),
+       TP_ARGS(inode),
+
+       TP_STRUCT__entry(
+               __array(char, name, 32)
+               __field(unsigned long, ino)
+               __field(unsigned long, state)
+               __field(unsigned long, dirtied_when)
+       ),
+
+       TP_fast_assign(
+               tp_memcpy(name, dev_name(inode_to_bdi(inode)->dev), 32)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(state, inode->i_state)
+               tp_assign(dirtied_when, inode->dirtied_when)
+       ),
+
+       TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu",
+                 __entry->name,
+                 __entry->ino,
+                 show_inode_state(__entry->state),
+                 __entry->dirtied_when,
+                 (jiffies - __entry->dirtied_when) / HZ
+       )
+)
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+DECLARE_EVENT_CLASS(writeback_congest_waited_template,
+
+       TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
+
+       TP_ARGS(usec_timeout, usec_delayed),
+
+       TP_STRUCT__entry(
+               __field(        unsigned int,   usec_timeout    )
+               __field(        unsigned int,   usec_delayed    )
+       ),
+
+       TP_fast_assign(
+               tp_assign(usec_timeout, usec_timeout)
+               tp_assign(usec_delayed, usec_delayed)
+       ),
+
+       TP_printk("usec_timeout=%u usec_delayed=%u",
+                       __entry->usec_timeout,
+                       __entry->usec_delayed)
+)
+
+DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
+
+       TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
+
+       TP_ARGS(usec_timeout, usec_delayed)
+)
+
+DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
+
+       TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
+
+       TP_ARGS(usec_timeout, usec_delayed)
+)
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))
+DECLARE_EVENT_CLASS(writeback_single_inode_template,
+
+       TP_PROTO(struct inode *inode,
+                struct writeback_control *wbc,
+                unsigned long nr_to_write
+       ),
+
+       TP_ARGS(inode, wbc, nr_to_write),
+
+       TP_STRUCT__entry(
+               __array(char, name, 32)
+               __field(unsigned long, ino)
+               __field(unsigned long, state)
+               __field(unsigned long, dirtied_when)
+               __field(unsigned long, writeback_index)
+               __field(long, nr_to_write)
+               __field(unsigned long, wrote)
+       ),
+
+       TP_fast_assign(
+               tp_memcpy(name, dev_name(inode_to_bdi(inode)->dev), 32)
+               tp_assign(ino, inode->i_ino)
+               tp_assign(state, inode->i_state)
+               tp_assign(dirtied_when, inode->dirtied_when)
+               tp_assign(writeback_index, inode->i_mapping->writeback_index)
+               tp_assign(nr_to_write, nr_to_write)
+               tp_assign(wrote, nr_to_write - wbc->nr_to_write)
+       ),
+
+       TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
+                 "index=%lu to_write=%ld wrote=%lu",
+                 __entry->name,
+                 __entry->ino,
+                 show_inode_state(__entry->state),
+                 __entry->dirtied_when,
+                 (jiffies - __entry->dirtied_when) / HZ,
+                 __entry->writeback_index,
+                 __entry->nr_to_write,
+                 __entry->wrote
+       )
+)
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0))
+DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_requeue,
+       TP_PROTO(struct inode *inode,
+               struct writeback_control *wbc,
+               unsigned long nr_to_write),
+       TP_ARGS(inode, wbc, nr_to_write)
+)
+#endif
+
+DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
+       TP_PROTO(struct inode *inode,
+                struct writeback_control *wbc,
+                unsigned long nr_to_write),
+       TP_ARGS(inode, wbc, nr_to_write)
+)
+#endif
+
+#endif /* _TRACE_WRITEBACK_H */
+
+/* This part must be outside protection */
+#include "../../../probes/define_trace.h"
diff --git a/instrumentation/events/mainline/btrfs.h b/instrumentation/events/mainline/btrfs.h
new file mode 100644 (file)
index 0000000..54fab04
--- /dev/null
@@ -0,0 +1,918 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM btrfs
+
+#if !defined(_TRACE_BTRFS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_BTRFS_H
+
+#include <linux/writeback.h>
+#include <linux/tracepoint.h>
+#include <trace/events/gfpflags.h>
+
+struct btrfs_root;
+struct btrfs_fs_info;
+struct btrfs_inode;
+struct extent_map;
+struct btrfs_ordered_extent;
+struct btrfs_delayed_ref_node;
+struct btrfs_delayed_tree_ref;
+struct btrfs_delayed_data_ref;
+struct btrfs_delayed_ref_head;
+struct btrfs_block_group_cache;
+struct btrfs_free_cluster;
+struct map_lookup;
+struct extent_buffer;
+
+#define show_ref_type(type)                                            \
+       __print_symbolic(type,                                          \
+               { BTRFS_TREE_BLOCK_REF_KEY,     "TREE_BLOCK_REF" },     \
+               { BTRFS_EXTENT_DATA_REF_KEY,    "EXTENT_DATA_REF" },    \
+               { BTRFS_EXTENT_REF_V0_KEY,      "EXTENT_REF_V0" },      \
+               { BTRFS_SHARED_BLOCK_REF_KEY,   "SHARED_BLOCK_REF" },   \
+               { BTRFS_SHARED_DATA_REF_KEY,    "SHARED_DATA_REF" })
+
+#define __show_root_type(obj)                                          \
+       __print_symbolic_u64(obj,                                       \
+               { BTRFS_ROOT_TREE_OBJECTID,     "ROOT_TREE"     },      \
+               { BTRFS_EXTENT_TREE_OBJECTID,   "EXTENT_TREE"   },      \
+               { BTRFS_CHUNK_TREE_OBJECTID,    "CHUNK_TREE"    },      \
+               { BTRFS_DEV_TREE_OBJECTID,      "DEV_TREE"      },      \
+               { BTRFS_FS_TREE_OBJECTID,       "FS_TREE"       },      \
+               { BTRFS_ROOT_TREE_DIR_OBJECTID, "ROOT_TREE_DIR" },      \
+               { BTRFS_CSUM_TREE_OBJECTID,     "CSUM_TREE"     },      \
+               { BTRFS_TREE_LOG_OBJECTID,      "TREE_LOG"      },      \
+               { BTRFS_TREE_RELOC_OBJECTID,    "TREE_RELOC"    },      \
+               { BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" })
+
+#define show_root_type(obj)                                            \
+       obj, ((obj >= BTRFS_DATA_RELOC_TREE_OBJECTID) ||                \
+             (obj <= BTRFS_CSUM_TREE_OBJECTID )) ? __show_root_type(obj) : "-"
+
+#define BTRFS_GROUP_FLAGS      \
+       { BTRFS_BLOCK_GROUP_DATA,       "DATA"}, \
+       { BTRFS_BLOCK_GROUP_SYSTEM,     "SYSTEM"}, \
+       { BTRFS_BLOCK_GROUP_METADATA,   "METADATA"}, \
+       { BTRFS_BLOCK_GROUP_RAID0,      "RAID0"}, \
+       { BTRFS_BLOCK_GROUP_RAID1,      "RAID1"}, \
+       { BTRFS_BLOCK_GROUP_DUP,        "DUP"}, \
+       { BTRFS_BLOCK_GROUP_RAID10,     "RAID10"}
+
+#define BTRFS_UUID_SIZE 16
+
+TRACE_EVENT(btrfs_transaction_commit,
+
+       TP_PROTO(struct btrfs_root *root),
+
+       TP_ARGS(root),
+
+       TP_STRUCT__entry(
+               __field(        u64,  generation                )
+               __field(        u64,  root_objectid             )
+       ),
+
+       TP_fast_assign(
+               __entry->generation     = root->fs_info->generation;
+               __entry->root_objectid  = root->root_key.objectid;
+       ),
+
+       TP_printk("root = %llu(%s), gen = %llu",
+                 show_root_type(__entry->root_objectid),
+                 (unsigned long long)__entry->generation)
+);
+
+DECLARE_EVENT_CLASS(btrfs__inode,
+
+       TP_PROTO(struct inode *inode),
+
+       TP_ARGS(inode),
+
+       TP_STRUCT__entry(
+               __field(        ino_t,  ino                     )
+               __field(        blkcnt_t,  blocks               )
+               __field(        u64,  disk_i_size               )
+               __field(        u64,  generation                )
+               __field(        u64,  last_trans                )
+               __field(        u64,  logged_trans              )
+               __field(        u64,  root_objectid             )
+       ),
+
+       TP_fast_assign(
+               __entry->ino    = inode->i_ino;
+               __entry->blocks = inode->i_blocks;
+               __entry->disk_i_size  = BTRFS_I(inode)->disk_i_size;
+               __entry->generation = BTRFS_I(inode)->generation;
+               __entry->last_trans = BTRFS_I(inode)->last_trans;
+               __entry->logged_trans = BTRFS_I(inode)->logged_trans;
+               __entry->root_objectid =
+                               BTRFS_I(inode)->root->root_key.objectid;
+       ),
+
+       TP_printk("root = %llu(%s), gen = %llu, ino = %lu, blocks = %llu, "
+                 "disk_i_size = %llu, last_trans = %llu, logged_trans = %llu",
+                 show_root_type(__entry->root_objectid),
+                 (unsigned long long)__entry->generation,
+                 (unsigned long)__entry->ino,
+                 (unsigned long long)__entry->blocks,
+                 (unsigned long long)__entry->disk_i_size,
+                 (unsigned long long)__entry->last_trans,
+                 (unsigned long long)__entry->logged_trans)
+);
+
+DEFINE_EVENT(btrfs__inode, btrfs_inode_new,
+
+       TP_PROTO(struct inode *inode),
+
+       TP_ARGS(inode)
+);
+
+DEFINE_EVENT(btrfs__inode, btrfs_inode_request,
+
+       TP_PROTO(struct inode *inode),
+
+       TP_ARGS(inode)
+);
+
+DEFINE_EVENT(btrfs__inode, btrfs_inode_evict,
+
+       TP_PROTO(struct inode *inode),
+
+       TP_ARGS(inode)
+);
+
+#define __show_map_type(type)                                          \
+       __print_symbolic_u64(type,                                      \
+               { EXTENT_MAP_LAST_BYTE, "LAST_BYTE"     },              \
+               { EXTENT_MAP_HOLE,      "HOLE"          },              \
+               { EXTENT_MAP_INLINE,    "INLINE"        },              \
+               { EXTENT_MAP_DELALLOC,  "DELALLOC"      })
+
+#define show_map_type(type)                    \
+       type, (type >= EXTENT_MAP_LAST_BYTE) ? "-" :  __show_map_type(type)
+
+#define show_map_flags(flag)                                           \
+       __print_flags(flag, "|",                                        \
+               { EXTENT_FLAG_PINNED,           "PINNED"        },      \
+               { EXTENT_FLAG_COMPRESSED,       "COMPRESSED"    },      \
+               { EXTENT_FLAG_VACANCY,          "VACANCY"       },      \
+               { EXTENT_FLAG_PREALLOC,         "PREALLOC"      })
+
+TRACE_EVENT(btrfs_get_extent,
+
+       TP_PROTO(struct btrfs_root *root, struct extent_map *map),
+
+       TP_ARGS(root, map),
+
+       TP_STRUCT__entry(
+               __field(        u64,  root_objectid     )
+               __field(        u64,  start             )
+               __field(        u64,  len               )
+               __field(        u64,  orig_start        )
+               __field(        u64,  block_start       )
+               __field(        u64,  block_len         )
+               __field(        unsigned long,  flags   )
+               __field(        int,  refs              )
+               __field(        unsigned int,  compress_type    )
+       ),
+
+       TP_fast_assign(
+               __entry->root_objectid  = root->root_key.objectid;
+               __entry->start          = map->start;
+               __entry->len            = map->len;
+               __entry->orig_start     = map->orig_start;
+               __entry->block_start    = map->block_start;
+               __entry->block_len      = map->block_len;
+               __entry->flags          = map->flags;
+               __entry->refs           = atomic_read(&map->refs);
+               __entry->compress_type  = map->compress_type;
+       ),
+
+       TP_printk("root = %llu(%s), start = %llu, len = %llu, "
+                 "orig_start = %llu, block_start = %llu(%s), "
+                 "block_len = %llu, flags = %s, refs = %u, "
+                 "compress_type = %u",
+                 show_root_type(__entry->root_objectid),
+                 (unsigned long long)__entry->start,
+                 (unsigned long long)__entry->len,
+                 (unsigned long long)__entry->orig_start,
+                 show_map_type(__entry->block_start),
+                 (unsigned long long)__entry->block_len,
+                 show_map_flags(__entry->flags),
+                 __entry->refs, __entry->compress_type)
+);
+
+#define show_ordered_flags(flags)                                      \
+       __print_symbolic(flags,                                 \
+               { BTRFS_ORDERED_IO_DONE,        "IO_DONE"       },      \
+               { BTRFS_ORDERED_COMPLETE,       "COMPLETE"      },      \
+               { BTRFS_ORDERED_NOCOW,          "NOCOW"         },      \
+               { BTRFS_ORDERED_COMPRESSED,     "COMPRESSED"    },      \
+               { BTRFS_ORDERED_PREALLOC,       "PREALLOC"      },      \
+               { BTRFS_ORDERED_DIRECT,         "DIRECT"        })
+
+DECLARE_EVENT_CLASS(btrfs__ordered_extent,
+
+       TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
+
+       TP_ARGS(inode, ordered),
+
+       TP_STRUCT__entry(
+               __field(        ino_t,  ino             )
+               __field(        u64,  file_offset       )
+               __field(        u64,  start             )
+               __field(        u64,  len               )
+               __field(        u64,  disk_len          )
+               __field(        u64,  bytes_left        )
+               __field(        unsigned long,  flags   )
+               __field(        int,  compress_type     )
+               __field(        int,  refs              )
+               __field(        u64,  root_objectid     )
+       ),
+
+       TP_fast_assign(
+               __entry->ino            = inode->i_ino;
+               __entry->file_offset    = ordered->file_offset;
+               __entry->start          = ordered->start;
+               __entry->len            = ordered->len;
+               __entry->disk_len       = ordered->disk_len;
+               __entry->bytes_left     = ordered->bytes_left;
+               __entry->flags          = ordered->flags;
+               __entry->compress_type  = ordered->compress_type;
+               __entry->refs           = atomic_read(&ordered->refs);
+               __entry->root_objectid  =
+                               BTRFS_I(inode)->root->root_key.objectid;
+       ),
+
+       TP_printk("root = %llu(%s), ino = %llu, file_offset = %llu, "
+                 "start = %llu, len = %llu, disk_len = %llu, "
+                 "bytes_left = %llu, flags = %s, compress_type = %d, "
+                 "refs = %d",
+                 show_root_type(__entry->root_objectid),
+                 (unsigned long long)__entry->ino,
+                 (unsigned long long)__entry->file_offset,
+                 (unsigned long long)__entry->start,
+                 (unsigned long long)__entry->len,
+                 (unsigned long long)__entry->disk_len,
+                 (unsigned long long)__entry->bytes_left,
+                 show_ordered_flags(__entry->flags),
+                 __entry->compress_type, __entry->refs)
+);
+
+DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_add,
+
+       TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
+
+       TP_ARGS(inode, ordered)
+);
+
+DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_remove,
+
+       TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
+
+       TP_ARGS(inode, ordered)
+);
+
+DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_start,
+
+       TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
+
+       TP_ARGS(inode, ordered)
+);
+
+DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_put,
+
+       TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
+
+       TP_ARGS(inode, ordered)
+);
+
+DECLARE_EVENT_CLASS(btrfs__writepage,
+
+       TP_PROTO(struct page *page, struct inode *inode,
+                struct writeback_control *wbc),
+
+       TP_ARGS(page, inode, wbc),
+
+       TP_STRUCT__entry(
+               __field(        ino_t,  ino                     )
+               __field(        pgoff_t,  index                 )
+               __field(        long,   nr_to_write             )
+               __field(        long,   pages_skipped           )
+               __field(        loff_t, range_start             )
+               __field(        loff_t, range_end               )
+               __field(        char,   for_kupdate             )
+               __field(        char,   for_reclaim             )
+               __field(        char,   range_cyclic            )
+               __field(        pgoff_t,  writeback_index       )
+               __field(        u64,    root_objectid           )
+       ),
+
+       TP_fast_assign(
+               __entry->ino            = inode->i_ino;
+               __entry->index          = page->index;
+               __entry->nr_to_write    = wbc->nr_to_write;
+               __entry->pages_skipped  = wbc->pages_skipped;
+               __entry->range_start    = wbc->range_start;
+               __entry->range_end      = wbc->range_end;
+               __entry->for_kupdate    = wbc->for_kupdate;
+               __entry->for_reclaim    = wbc->for_reclaim;
+               __entry->range_cyclic   = wbc->range_cyclic;
+               __entry->writeback_index = inode->i_mapping->writeback_index;
+               __entry->root_objectid  =
+                                BTRFS_I(inode)->root->root_key.objectid;
+       ),
+
+       TP_printk("root = %llu(%s), ino = %lu, page_index = %lu, "
+                 "nr_to_write = %ld, pages_skipped = %ld, range_start = %llu, "
+                 "range_end = %llu, for_kupdate = %d, "
+                 "for_reclaim = %d, range_cyclic = %d, writeback_index = %lu",
+                 show_root_type(__entry->root_objectid),
+                 (unsigned long)__entry->ino, __entry->index,
+                 __entry->nr_to_write, __entry->pages_skipped,
+                 __entry->range_start, __entry->range_end,
+                 __entry->for_kupdate,
+                 __entry->for_reclaim, __entry->range_cyclic,
+                 (unsigned long)__entry->writeback_index)
+);
+
+DEFINE_EVENT(btrfs__writepage, __extent_writepage,
+
+       TP_PROTO(struct page *page, struct inode *inode,
+                struct writeback_control *wbc),
+
+       TP_ARGS(page, inode, wbc)
+);
+
+TRACE_EVENT(btrfs_writepage_end_io_hook,
+
+       TP_PROTO(struct page *page, u64 start, u64 end, int uptodate),
+
+       TP_ARGS(page, start, end, uptodate),
+
+       TP_STRUCT__entry(
+               __field(        ino_t,   ino            )
+               __field(        pgoff_t, index          )
+               __field(        u64,     start          )
+               __field(        u64,     end            )
+               __field(        int,     uptodate       )
+               __field(        u64,    root_objectid   )
+       ),
+
+       TP_fast_assign(
+               __entry->ino    = page->mapping->host->i_ino;
+               __entry->index  = page->index;
+               __entry->start  = start;
+               __entry->end    = end;
+               __entry->uptodate = uptodate;
+               __entry->root_objectid  =
+                        BTRFS_I(page->mapping->host)->root->root_key.objectid;
+       ),
+
+       TP_printk("root = %llu(%s), ino = %lu, page_index = %lu, start = %llu, "
+                 "end = %llu, uptodate = %d",
+                 show_root_type(__entry->root_objectid),
+                 (unsigned long)__entry->ino, (unsigned long)__entry->index,
+                 (unsigned long long)__entry->start,
+                 (unsigned long long)__entry->end, __entry->uptodate)
+);
+
+TRACE_EVENT(btrfs_sync_file,
+
+       TP_PROTO(struct file *file, int datasync),
+
+       TP_ARGS(file, datasync),
+
+       TP_STRUCT__entry(
+               __field(        ino_t,  ino             )
+               __field(        ino_t,  parent          )
+               __field(        int,    datasync        )
+               __field(        u64,    root_objectid   )
+       ),
+
+       TP_fast_assign(
+               struct dentry *dentry = file->f_path.dentry;
+               struct inode *inode = dentry->d_inode;
+
+               __entry->ino            = inode->i_ino;
+               __entry->parent         = dentry->d_parent->d_inode->i_ino;
+               __entry->datasync       = datasync;
+               __entry->root_objectid  =
+                                BTRFS_I(inode)->root->root_key.objectid;
+       ),
+
+       TP_printk("root = %llu(%s), ino = %ld, parent = %ld, datasync = %d",
+                 show_root_type(__entry->root_objectid),
+                 (unsigned long)__entry->ino, (unsigned long)__entry->parent,
+                 __entry->datasync)
+);
+
+TRACE_EVENT(btrfs_sync_fs,
+
+       TP_PROTO(int wait),
+
+       TP_ARGS(wait),
+
+       TP_STRUCT__entry(
+               __field(        int,  wait              )
+       ),
+
+       TP_fast_assign(
+               __entry->wait   = wait;
+       ),
+
+       TP_printk("wait = %d", __entry->wait)
+);
+
+#define show_ref_action(action)                                                \
+       __print_symbolic(action,                                        \
+               { BTRFS_ADD_DELAYED_REF,    "ADD_DELAYED_REF" },        \
+               { BTRFS_DROP_DELAYED_REF,   "DROP_DELAYED_REF" },       \
+               { BTRFS_ADD_DELAYED_EXTENT, "ADD_DELAYED_EXTENT" },     \
+               { BTRFS_UPDATE_DELAYED_HEAD, "UPDATE_DELAYED_HEAD" })
+                       
+
+TRACE_EVENT(btrfs_delayed_tree_ref,
+
+       TP_PROTO(struct btrfs_delayed_ref_node *ref,
+                struct btrfs_delayed_tree_ref *full_ref,
+                int action),
+
+       TP_ARGS(ref, full_ref, action),
+
+       TP_STRUCT__entry(
+               __field(        u64,  bytenr            )
+               __field(        u64,  num_bytes         )
+               __field(        int,  action            ) 
+               __field(        u64,  parent            )
+               __field(        u64,  ref_root          )
+               __field(        int,  level             )
+               __field(        int,  type              )
+               __field(        u64,  seq               )
+       ),
+
+       TP_fast_assign(
+               __entry->bytenr         = ref->bytenr;
+               __entry->num_bytes      = ref->num_bytes;
+               __entry->action         = action;
+               __entry->parent         = full_ref->parent;
+               __entry->ref_root       = full_ref->root;
+               __entry->level          = full_ref->level;
+               __entry->type           = ref->type;
+               __entry->seq            = ref->seq;
+       ),
+
+       TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, "
+                 "parent = %llu(%s), ref_root = %llu(%s), level = %d, "
+                 "type = %s, seq = %llu",
+                 (unsigned long long)__entry->bytenr,
+                 (unsigned long long)__entry->num_bytes,
+                 show_ref_action(__entry->action),
+                 show_root_type(__entry->parent),
+                 show_root_type(__entry->ref_root),
+                 __entry->level, show_ref_type(__entry->type),
+                 (unsigned long long)__entry->seq)
+);
+
+TRACE_EVENT(btrfs_delayed_data_ref,
+
+       TP_PROTO(struct btrfs_delayed_ref_node *ref,
+                struct btrfs_delayed_data_ref *full_ref,
+                int action),
+
+       TP_ARGS(ref, full_ref, action),
+
+       TP_STRUCT__entry(
+               __field(        u64,  bytenr            )
+               __field(        u64,  num_bytes         )
+               __field(        int,  action            ) 
+               __field(        u64,  parent            )
+               __field(        u64,  ref_root          )
+               __field(        u64,  owner             )
+               __field(        u64,  offset            )
+               __field(        int,  type              )
+               __field(        u64,  seq               )
+       ),
+
+       TP_fast_assign(
+               __entry->bytenr         = ref->bytenr;
+               __entry->num_bytes      = ref->num_bytes;
+               __entry->action         = action;
+               __entry->parent         = full_ref->parent;
+               __entry->ref_root       = full_ref->root;
+               __entry->owner          = full_ref->objectid;
+               __entry->offset         = full_ref->offset;
+               __entry->type           = ref->type;
+               __entry->seq            = ref->seq;
+       ),
+
+       TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, "
+                 "parent = %llu(%s), ref_root = %llu(%s), owner = %llu, "
+                 "offset = %llu, type = %s, seq = %llu",
+                 (unsigned long long)__entry->bytenr,
+                 (unsigned long long)__entry->num_bytes,
+                 show_ref_action(__entry->action),
+                 show_root_type(__entry->parent),
+                 show_root_type(__entry->ref_root),
+                 (unsigned long long)__entry->owner,
+                 (unsigned long long)__entry->offset,
+                 show_ref_type(__entry->type),
+                 (unsigned long long)__entry->seq)
+);
+
+TRACE_EVENT(btrfs_delayed_ref_head,
+
+       TP_PROTO(struct btrfs_delayed_ref_node *ref,
+                struct btrfs_delayed_ref_head *head_ref,
+                int action),
+
+       TP_ARGS(ref, head_ref, action),
+
+       TP_STRUCT__entry(
+               __field(        u64,  bytenr            )
+               __field(        u64,  num_bytes         )
+               __field(        int,  action            ) 
+               __field(        int,  is_data           )
+       ),
+
+       TP_fast_assign(
+               __entry->bytenr         = ref->bytenr;
+               __entry->num_bytes      = ref->num_bytes;
+               __entry->action         = action;
+               __entry->is_data        = head_ref->is_data;
+       ),
+
+       TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, is_data = %d",
+                 (unsigned long long)__entry->bytenr,
+                 (unsigned long long)__entry->num_bytes,
+                 show_ref_action(__entry->action),
+                 __entry->is_data)
+);
+
+#define show_chunk_type(type)                                  \
+       __print_flags(type, "|",                                \
+               { BTRFS_BLOCK_GROUP_DATA,       "DATA"  },      \
+               { BTRFS_BLOCK_GROUP_SYSTEM,     "SYSTEM"},      \
+               { BTRFS_BLOCK_GROUP_METADATA,   "METADATA"},    \
+               { BTRFS_BLOCK_GROUP_RAID0,      "RAID0" },      \
+               { BTRFS_BLOCK_GROUP_RAID1,      "RAID1" },      \
+               { BTRFS_BLOCK_GROUP_DUP,        "DUP"   },      \
+               { BTRFS_BLOCK_GROUP_RAID10,     "RAID10"})
+
+DECLARE_EVENT_CLASS(btrfs__chunk,
+
+       TP_PROTO(struct btrfs_root *root, struct map_lookup *map,
+                u64 offset, u64 size),
+
+       TP_ARGS(root, map, offset, size),
+
+       TP_STRUCT__entry(
+               __field(        int,  num_stripes               )
+               __field(        u64,  type                      )
+               __field(        int,  sub_stripes               )
+               __field(        u64,  offset                    )
+               __field(        u64,  size                      )
+               __field(        u64,  root_objectid             )
+       ),
+
+       TP_fast_assign(
+               __entry->num_stripes    = map->num_stripes;
+               __entry->type           = map->type;
+               __entry->sub_stripes    = map->sub_stripes;
+               __entry->offset         = offset;
+               __entry->size           = size;
+               __entry->root_objectid  = root->root_key.objectid;
+       ),
+
+       TP_printk("root = %llu(%s), offset = %llu, size = %llu, "
+                 "num_stripes = %d, sub_stripes = %d, type = %s",
+                 show_root_type(__entry->root_objectid),
+                 (unsigned long long)__entry->offset,
+                 (unsigned long long)__entry->size,
+                 __entry->num_stripes, __entry->sub_stripes,
+                 show_chunk_type(__entry->type))
+);
+
+DEFINE_EVENT(btrfs__chunk,  btrfs_chunk_alloc,
+
+       TP_PROTO(struct btrfs_root *root, struct map_lookup *map,
+                u64 offset, u64 size),
+
+       TP_ARGS(root, map, offset, size)
+);
+
+DEFINE_EVENT(btrfs__chunk,  btrfs_chunk_free,
+
+       TP_PROTO(struct btrfs_root *root, struct map_lookup *map,
+                u64 offset, u64 size),
+
+       TP_ARGS(root, map, offset, size)
+);
+
+TRACE_EVENT(btrfs_cow_block,
+
+       TP_PROTO(struct btrfs_root *root, struct extent_buffer *buf,
+                struct extent_buffer *cow),
+
+       TP_ARGS(root, buf, cow),
+
+       TP_STRUCT__entry(
+               __field(        u64,  root_objectid             )
+               __field(        u64,  buf_start                 )
+               __field(        int,  refs                      )
+               __field(        u64,  cow_start                 )
+               __field(        int,  buf_level                 )
+               __field(        int,  cow_level                 )
+       ),
+
+       TP_fast_assign(
+               __entry->root_objectid  = root->root_key.objectid;
+               __entry->buf_start      = buf->start;
+               __entry->refs           = atomic_read(&buf->refs);
+               __entry->cow_start      = cow->start;
+               __entry->buf_level      = btrfs_header_level(buf);
+               __entry->cow_level      = btrfs_header_level(cow);
+       ),
+
+       TP_printk("root = %llu(%s), refs = %d, orig_buf = %llu "
+                 "(orig_level = %d), cow_buf = %llu (cow_level = %d)",
+                 show_root_type(__entry->root_objectid),
+                 __entry->refs,
+                 (unsigned long long)__entry->buf_start,
+                 __entry->buf_level,
+                 (unsigned long long)__entry->cow_start,
+                 __entry->cow_level)
+);
+
+TRACE_EVENT(btrfs_space_reservation,
+
+       TP_PROTO(struct btrfs_fs_info *fs_info, char *type, u64 val,
+                u64 bytes, int reserve),
+
+       TP_ARGS(fs_info, type, val, bytes, reserve),
+
+       TP_STRUCT__entry(
+               __array(        u8,     fsid,   BTRFS_UUID_SIZE )
+               __string(       type,   type                    )
+               __field(        u64,    val                     )
+               __field(        u64,    bytes                   )
+               __field(        int,    reserve                 )
+       ),
+
+       TP_fast_assign(
+               memcpy(__entry->fsid, fs_info->fsid, BTRFS_UUID_SIZE);
+               __assign_str(type, type);
+               __entry->val            = val;
+               __entry->bytes          = bytes;
+               __entry->reserve        = reserve;
+       ),
+
+       TP_printk("%pU: %s: %Lu %s %Lu", __entry->fsid, __get_str(type),
+                 __entry->val, __entry->reserve ? "reserve" : "release",
+                 __entry->bytes)
+);
+
+DECLARE_EVENT_CLASS(btrfs__reserved_extent,
+
+       TP_PROTO(struct btrfs_root *root, u64 start, u64 len),
+
+       TP_ARGS(root, start, len),
+
+       TP_STRUCT__entry(
+               __field(        u64,  root_objectid             )
+               __field(        u64,  start                     )
+               __field(        u64,  len                       )
+       ),
+
+       TP_fast_assign(
+               __entry->root_objectid  = root->root_key.objectid;
+               __entry->start          = start;
+               __entry->len            = len;
+       ),
+
+       TP_printk("root = %llu(%s), start = %llu, len = %llu",
+                 show_root_type(__entry->root_objectid),
+                 (unsigned long long)__entry->start,
+                 (unsigned long long)__entry->len)
+);
+
+DEFINE_EVENT(btrfs__reserved_extent,  btrfs_reserved_extent_alloc,
+
+       TP_PROTO(struct btrfs_root *root, u64 start, u64 len),
+
+       TP_ARGS(root, start, len)
+);
+
+DEFINE_EVENT(btrfs__reserved_extent,  btrfs_reserved_extent_free,
+
+       TP_PROTO(struct btrfs_root *root, u64 start, u64 len),
+
+       TP_ARGS(root, start, len)
+);
+
+TRACE_EVENT(find_free_extent,
+
+       TP_PROTO(struct btrfs_root *root, u64 num_bytes, u64 empty_size,
+                u64 data),
+
+       TP_ARGS(root, num_bytes, empty_size, data),
+
+       TP_STRUCT__entry(
+               __field(        u64,    root_objectid           )
+               __field(        u64,    num_bytes               )
+               __field(        u64,    empty_size              )
+               __field(        u64,    data                    )
+       ),
+
+       TP_fast_assign(
+               __entry->root_objectid  = root->root_key.objectid;
+               __entry->num_bytes      = num_bytes;
+               __entry->empty_size     = empty_size;
+               __entry->data           = data;
+       ),
+
+       TP_printk("root = %Lu(%s), len = %Lu, empty_size = %Lu, "
+                 "flags = %Lu(%s)", show_root_type(__entry->root_objectid),
+                 __entry->num_bytes, __entry->empty_size, __entry->data,
+                 __print_flags((unsigned long)__entry->data, "|",
+                                BTRFS_GROUP_FLAGS))
+);
+
+DECLARE_EVENT_CLASS(btrfs__reserve_extent,
+
+       TP_PROTO(struct btrfs_root *root,
+                struct btrfs_block_group_cache *block_group, u64 start,
+                u64 len),
+
+       TP_ARGS(root, block_group, start, len),
+
+       TP_STRUCT__entry(
+               __field(        u64,    root_objectid           )
+               __field(        u64,    bg_objectid             )
+               __field(        u64,    flags                   )
+               __field(        u64,    start                   )
+               __field(        u64,    len                     )
+       ),
+
+       TP_fast_assign(
+               __entry->root_objectid  = root->root_key.objectid;
+               __entry->bg_objectid    = block_group->key.objectid;
+               __entry->flags          = block_group->flags;
+               __entry->start          = start;
+               __entry->len            = len;
+       ),
+
+       TP_printk("root = %Lu(%s), block_group = %Lu, flags = %Lu(%s), "
+                 "start = %Lu, len = %Lu",
+                 show_root_type(__entry->root_objectid), __entry->bg_objectid,
+                 __entry->flags, __print_flags((unsigned long)__entry->flags,
+                                               "|", BTRFS_GROUP_FLAGS),
+                 __entry->start, __entry->len)
+);
+
+DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent,
+
+       TP_PROTO(struct btrfs_root *root,
+                struct btrfs_block_group_cache *block_group, u64 start,
+                u64 len),
+
+       TP_ARGS(root, block_group, start, len)
+);
+
+DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent_cluster,
+
+       TP_PROTO(struct btrfs_root *root,
+                struct btrfs_block_group_cache *block_group, u64 start,
+                u64 len),
+
+       TP_ARGS(root, block_group, start, len)
+);
+
+TRACE_EVENT(btrfs_find_cluster,
+
+       TP_PROTO(struct btrfs_block_group_cache *block_group, u64 start,
+                u64 bytes, u64 empty_size, u64 min_bytes),
+
+       TP_ARGS(block_group, start, bytes, empty_size, min_bytes),
+
+       TP_STRUCT__entry(
+               __field(        u64,    bg_objectid             )
+               __field(        u64,    flags                   )
+               __field(        u64,    start                   )
+               __field(        u64,    bytes                   )
+               __field(        u64,    empty_size              )
+               __field(        u64,    min_bytes               )
+       ),
+
+       TP_fast_assign(
+               __entry->bg_objectid    = block_group->key.objectid;
+               __entry->flags          = block_group->flags;
+               __entry->start          = start;
+               __entry->bytes          = bytes;
+               __entry->empty_size     = empty_size;
+               __entry->min_bytes      = min_bytes;
+       ),
+
+       TP_printk("block_group = %Lu, flags = %Lu(%s), start = %Lu, len = %Lu,"
+                 " empty_size = %Lu, min_bytes = %Lu", __entry->bg_objectid,
+                 __entry->flags,
+                 __print_flags((unsigned long)__entry->flags, "|",
+                               BTRFS_GROUP_FLAGS), __entry->start,
+                 __entry->bytes, __entry->empty_size,  __entry->min_bytes)
+);
+
+TRACE_EVENT(btrfs_failed_cluster_setup,
+
+       TP_PROTO(struct btrfs_block_group_cache *block_group),
+
+       TP_ARGS(block_group),
+
+       TP_STRUCT__entry(
+               __field(        u64,    bg_objectid             )
+       ),
+
+       TP_fast_assign(
+               __entry->bg_objectid    = block_group->key.objectid;
+       ),
+
+       TP_printk("block_group = %Lu", __entry->bg_objectid)
+);
+
+TRACE_EVENT(btrfs_setup_cluster,
+
+       TP_PROTO(struct btrfs_block_group_cache *block_group,
+                struct btrfs_free_cluster *cluster, u64 size, int bitmap),
+
+       TP_ARGS(block_group, cluster, size, bitmap),
+
+       TP_STRUCT__entry(
+               __field(        u64,    bg_objectid             )
+               __field(        u64,    flags                   )
+               __field(        u64,    start                   )
+               __field(        u64,    max_size                )
+               __field(        u64,    size                    )
+               __field(        int,    bitmap                  )
+       ),
+
+       TP_fast_assign(
+               __entry->bg_objectid    = block_group->key.objectid;
+               __entry->flags          = block_group->flags;
+               __entry->start          = cluster->window_start;
+               __entry->max_size       = cluster->max_size;
+               __entry->size           = size;
+               __entry->bitmap         = bitmap;
+       ),
+
+       TP_printk("block_group = %Lu, flags = %Lu(%s), window_start = %Lu, "
+                 "size = %Lu, max_size = %Lu, bitmap = %d",
+                 __entry->bg_objectid,
+                 __entry->flags,
+                 __print_flags((unsigned long)__entry->flags, "|",
+                               BTRFS_GROUP_FLAGS), __entry->start,
+                 __entry->size, __entry->max_size, __entry->bitmap)
+);
+
+struct extent_state;
+TRACE_EVENT(alloc_extent_state,
+
+       TP_PROTO(struct extent_state *state, gfp_t mask, unsigned long IP),
+
+       TP_ARGS(state, mask, IP),
+
+       TP_STRUCT__entry(
+               __field(struct extent_state *, state)
+               __field(gfp_t, mask)
+               __field(unsigned long, ip)
+       ),
+
+       TP_fast_assign(
+               __entry->state  = state,
+               __entry->mask   = mask,
+               __entry->ip     = IP
+       ),
+
+       TP_printk("state=%p; mask = %s; caller = %pF", __entry->state,
+                 show_gfp_flags(__entry->mask), (void *)__entry->ip)
+);
+
+TRACE_EVENT(free_extent_state,
+
+       TP_PROTO(struct extent_state *state, unsigned long IP),
+
+       TP_ARGS(state, IP),
+
+       TP_STRUCT__entry(
+               __field(struct extent_state *, state)
+               __field(unsigned long, ip)
+       ),
+
+       TP_fast_assign(
+               __entry->state  = state,
+               __entry->ip = IP
+       ),
+
+       TP_printk(" state=%p; caller = %pF", __entry->state,
+                 (void *)__entry->ip)
+);
+
+#endif /* _TRACE_BTRFS_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/instrumentation/events/mainline/compaction.h b/instrumentation/events/mainline/compaction.h
new file mode 100644 (file)
index 0000000..fde1b3e
--- /dev/null
@@ -0,0 +1,74 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM compaction
+
+#if !defined(_TRACE_COMPACTION_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_COMPACTION_H
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <trace/events/gfpflags.h>
+
+DECLARE_EVENT_CLASS(mm_compaction_isolate_template,
+
+       TP_PROTO(unsigned long nr_scanned,
+               unsigned long nr_taken),
+
+       TP_ARGS(nr_scanned, nr_taken),
+
+       TP_STRUCT__entry(
+               __field(unsigned long, nr_scanned)
+               __field(unsigned long, nr_taken)
+       ),
+
+       TP_fast_assign(
+               __entry->nr_scanned = nr_scanned;
+               __entry->nr_taken = nr_taken;
+       ),
+
+       TP_printk("nr_scanned=%lu nr_taken=%lu",
+               __entry->nr_scanned,
+               __entry->nr_taken)
+);
+
+DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_migratepages,
+
+       TP_PROTO(unsigned long nr_scanned,
+               unsigned long nr_taken),
+
+       TP_ARGS(nr_scanned, nr_taken)
+);
+
+DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_freepages,
+       TP_PROTO(unsigned long nr_scanned,
+               unsigned long nr_taken),
+
+       TP_ARGS(nr_scanned, nr_taken)
+);
+
+TRACE_EVENT(mm_compaction_migratepages,
+
+       TP_PROTO(unsigned long nr_migrated,
+               unsigned long nr_failed),
+
+       TP_ARGS(nr_migrated, nr_failed),
+
+       TP_STRUCT__entry(
+               __field(unsigned long, nr_migrated)
+               __field(unsigned long, nr_failed)
+       ),
+
+       TP_fast_assign(
+               __entry->nr_migrated = nr_migrated;
+               __entry->nr_failed = nr_failed;
+       ),
+
+       TP_printk("nr_migrated=%lu nr_failed=%lu",
+               __entry->nr_migrated,
+               __entry->nr_failed)
+);
+
+
+#endif /* _TRACE_COMPACTION_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/instrumentation/events/mainline/ext4.h b/instrumentation/events/mainline/ext4.h
new file mode 100644 (file)
index 0000000..d49b285
--- /dev/null
@@ -0,0 +1,2061 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ext4
+
+#if !defined(_TRACE_EXT4_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_EXT4_H
+
+#include <linux/writeback.h>
+#include <linux/tracepoint.h>
+
+struct ext4_allocation_context;
+struct ext4_allocation_request;
+struct ext4_extent;
+struct ext4_prealloc_space;
+struct ext4_inode_info;
+struct mpage_da_data;
+struct ext4_map_blocks;
+struct ext4_extent;
+
+#define EXT4_I(inode) (container_of(inode, struct ext4_inode_info, vfs_inode))
+
+TRACE_EVENT(ext4_free_inode,
+       TP_PROTO(struct inode *inode),
+
+       TP_ARGS(inode),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        uid_t,  uid                     )
+               __field(        gid_t,  gid                     )
+               __field(        __u64, blocks                   )
+               __field(        __u16, mode                     )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+               __entry->uid    = i_uid_read(inode);
+               __entry->gid    = i_gid_read(inode);
+               __entry->blocks = inode->i_blocks;
+               __entry->mode   = inode->i_mode;
+       ),
+
+       TP_printk("dev %d,%d ino %lu mode 0%o uid %u gid %u blocks %llu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino, __entry->mode,
+                 __entry->uid, __entry->gid, __entry->blocks)
+);
+
+TRACE_EVENT(ext4_request_inode,
+       TP_PROTO(struct inode *dir, int mode),
+
+       TP_ARGS(dir, mode),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  dir                     )
+               __field(        __u16, mode                     )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = dir->i_sb->s_dev;
+               __entry->dir    = dir->i_ino;
+               __entry->mode   = mode;
+       ),
+
+       TP_printk("dev %d,%d dir %lu mode 0%o",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->dir, __entry->mode)
+);
+
+TRACE_EVENT(ext4_allocate_inode,
+       TP_PROTO(struct inode *inode, struct inode *dir, int mode),
+
+       TP_ARGS(inode, dir, mode),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        ino_t,  dir                     )
+               __field(        __u16,  mode                    )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+               __entry->dir    = dir->i_ino;
+               __entry->mode   = mode;
+       ),
+
+       TP_printk("dev %d,%d ino %lu dir %lu mode 0%o",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned long) __entry->dir, __entry->mode)
+);
+
+TRACE_EVENT(ext4_evict_inode,
+       TP_PROTO(struct inode *inode),
+
+       TP_ARGS(inode),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        int,    nlink                   )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+               __entry->nlink  = inode->i_nlink;
+       ),
+
+       TP_printk("dev %d,%d ino %lu nlink %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino, __entry->nlink)
+);
+
+TRACE_EVENT(ext4_drop_inode,
+       TP_PROTO(struct inode *inode, int drop),
+
+       TP_ARGS(inode, drop),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        int,    drop                    )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+               __entry->drop   = drop;
+       ),
+
+       TP_printk("dev %d,%d ino %lu drop %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino, __entry->drop)
+);
+
+TRACE_EVENT(ext4_mark_inode_dirty,
+       TP_PROTO(struct inode *inode, unsigned long IP),
+
+       TP_ARGS(inode, IP),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(unsigned long,  ip                      )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+               __entry->ip     = IP;
+       ),
+
+       TP_printk("dev %d,%d ino %lu caller %pF",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino, (void *)__entry->ip)
+);
+
+TRACE_EVENT(ext4_begin_ordered_truncate,
+       TP_PROTO(struct inode *inode, loff_t new_size),
+
+       TP_ARGS(inode, new_size),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        loff_t, new_size                )
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = inode->i_sb->s_dev;
+               __entry->ino            = inode->i_ino;
+               __entry->new_size       = new_size;
+       ),
+
+       TP_printk("dev %d,%d ino %lu new_size %lld",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->new_size)
+);
+
+DECLARE_EVENT_CLASS(ext4__write_begin,
+
+       TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
+                unsigned int flags),
+
+       TP_ARGS(inode, pos, len, flags),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        loff_t, pos                     )
+               __field(        unsigned int, len               )
+               __field(        unsigned int, flags             )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+               __entry->pos    = pos;
+               __entry->len    = len;
+               __entry->flags  = flags;
+       ),
+
+       TP_printk("dev %d,%d ino %lu pos %lld len %u flags %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->pos, __entry->len, __entry->flags)
+);
+
+DEFINE_EVENT(ext4__write_begin, ext4_write_begin,
+
+       TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
+                unsigned int flags),
+
+       TP_ARGS(inode, pos, len, flags)
+);
+
+DEFINE_EVENT(ext4__write_begin, ext4_da_write_begin,
+
+       TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
+                unsigned int flags),
+
+       TP_ARGS(inode, pos, len, flags)
+);
+
+DECLARE_EVENT_CLASS(ext4__write_end,
+       TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
+                       unsigned int copied),
+
+       TP_ARGS(inode, pos, len, copied),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        loff_t, pos                     )
+               __field(        unsigned int, len               )
+               __field(        unsigned int, copied            )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+               __entry->pos    = pos;
+               __entry->len    = len;
+               __entry->copied = copied;
+       ),
+
+       TP_printk("dev %d,%d ino %lu pos %lld len %u copied %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->pos, __entry->len, __entry->copied)
+);
+
+DEFINE_EVENT(ext4__write_end, ext4_ordered_write_end,
+
+       TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
+                unsigned int copied),
+
+       TP_ARGS(inode, pos, len, copied)
+);
+
+DEFINE_EVENT(ext4__write_end, ext4_writeback_write_end,
+
+       TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
+                unsigned int copied),
+
+       TP_ARGS(inode, pos, len, copied)
+);
+
+DEFINE_EVENT(ext4__write_end, ext4_journalled_write_end,
+
+       TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
+                unsigned int copied),
+
+       TP_ARGS(inode, pos, len, copied)
+);
+
+DEFINE_EVENT(ext4__write_end, ext4_da_write_end,
+
+       TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
+                unsigned int copied),
+
+       TP_ARGS(inode, pos, len, copied)
+);
+
+TRACE_EVENT(ext4_da_writepages,
+       TP_PROTO(struct inode *inode, struct writeback_control *wbc),
+
+       TP_ARGS(inode, wbc),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        long,   nr_to_write             )
+               __field(        long,   pages_skipped           )
+               __field(        loff_t, range_start             )
+               __field(        loff_t, range_end               )
+               __field(       pgoff_t, writeback_index         )
+               __field(        int,    sync_mode               )
+               __field(        char,   for_kupdate             )
+               __field(        char,   range_cyclic            )
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = inode->i_sb->s_dev;
+               __entry->ino            = inode->i_ino;
+               __entry->nr_to_write    = wbc->nr_to_write;
+               __entry->pages_skipped  = wbc->pages_skipped;
+               __entry->range_start    = wbc->range_start;
+               __entry->range_end      = wbc->range_end;
+               __entry->writeback_index = inode->i_mapping->writeback_index;
+               __entry->sync_mode      = wbc->sync_mode;
+               __entry->for_kupdate    = wbc->for_kupdate;
+               __entry->range_cyclic   = wbc->range_cyclic;
+       ),
+
+       TP_printk("dev %d,%d ino %lu nr_to_write %ld pages_skipped %ld "
+                 "range_start %lld range_end %lld sync_mode %d "
+                 "for_kupdate %d range_cyclic %d writeback_index %lu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino, __entry->nr_to_write,
+                 __entry->pages_skipped, __entry->range_start,
+                 __entry->range_end, __entry->sync_mode,
+                 __entry->for_kupdate, __entry->range_cyclic,
+                 (unsigned long) __entry->writeback_index)
+);
+
+TRACE_EVENT(ext4_da_write_pages,
+       TP_PROTO(struct inode *inode, struct mpage_da_data *mpd),
+
+       TP_ARGS(inode, mpd),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        __u64,  b_blocknr               )
+               __field(        __u32,  b_size                  )
+               __field(        __u32,  b_state                 )
+               __field(        unsigned long,  first_page      )
+               __field(        int,    io_done                 )
+               __field(        int,    pages_written           )
+               __field(        int,    sync_mode               )
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = inode->i_sb->s_dev;
+               __entry->ino            = inode->i_ino;
+               __entry->b_blocknr      = mpd->b_blocknr;
+               __entry->b_size         = mpd->b_size;
+               __entry->b_state        = mpd->b_state;
+               __entry->first_page     = mpd->first_page;
+               __entry->io_done        = mpd->io_done;
+               __entry->pages_written  = mpd->pages_written;
+               __entry->sync_mode      = mpd->wbc->sync_mode;
+       ),
+
+       TP_printk("dev %d,%d ino %lu b_blocknr %llu b_size %u b_state 0x%04x "
+                 "first_page %lu io_done %d pages_written %d sync_mode %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->b_blocknr, __entry->b_size,
+                 __entry->b_state, __entry->first_page,
+                 __entry->io_done, __entry->pages_written,
+                 __entry->sync_mode
+                  )
+);
+
+TRACE_EVENT(ext4_da_writepages_result,
+       TP_PROTO(struct inode *inode, struct writeback_control *wbc,
+                       int ret, int pages_written),
+
+       TP_ARGS(inode, wbc, ret, pages_written),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        int,    ret                     )
+               __field(        int,    pages_written           )
+               __field(        long,   pages_skipped           )
+               __field(       pgoff_t, writeback_index         )
+               __field(        int,    sync_mode               )
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = inode->i_sb->s_dev;
+               __entry->ino            = inode->i_ino;
+               __entry->ret            = ret;
+               __entry->pages_written  = pages_written;
+               __entry->pages_skipped  = wbc->pages_skipped;
+               __entry->writeback_index = inode->i_mapping->writeback_index;
+               __entry->sync_mode      = wbc->sync_mode;
+       ),
+
+       TP_printk("dev %d,%d ino %lu ret %d pages_written %d pages_skipped %ld "
+                 "sync_mode %d writeback_index %lu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino, __entry->ret,
+                 __entry->pages_written, __entry->pages_skipped,
+                 __entry->sync_mode,
+                 (unsigned long) __entry->writeback_index)
+);
+
+DECLARE_EVENT_CLASS(ext4__page_op,
+       TP_PROTO(struct page *page),
+
+       TP_ARGS(page),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        pgoff_t, index                  )
+
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = page->mapping->host->i_sb->s_dev;
+               __entry->ino    = page->mapping->host->i_ino;
+               __entry->index  = page->index;
+       ),
+
+       TP_printk("dev %d,%d ino %lu page_index %lu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned long) __entry->index)
+);
+
+DEFINE_EVENT(ext4__page_op, ext4_writepage,
+
+       TP_PROTO(struct page *page),
+
+       TP_ARGS(page)
+);
+
+DEFINE_EVENT(ext4__page_op, ext4_readpage,
+
+       TP_PROTO(struct page *page),
+
+       TP_ARGS(page)
+);
+
+DEFINE_EVENT(ext4__page_op, ext4_releasepage,
+
+       TP_PROTO(struct page *page),
+
+       TP_ARGS(page)
+);
+
+TRACE_EVENT(ext4_invalidatepage,
+       TP_PROTO(struct page *page, unsigned long offset),
+
+       TP_ARGS(page, offset),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        pgoff_t, index                  )
+               __field(        unsigned long, offset           )
+
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = page->mapping->host->i_sb->s_dev;
+               __entry->ino    = page->mapping->host->i_ino;
+               __entry->index  = page->index;
+               __entry->offset = offset;
+       ),
+
+       TP_printk("dev %d,%d ino %lu page_index %lu offset %lu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned long) __entry->index, __entry->offset)
+);
+
+TRACE_EVENT(ext4_discard_blocks,
+       TP_PROTO(struct super_block *sb, unsigned long long blk,
+                       unsigned long long count),
+
+       TP_ARGS(sb, blk, count),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        __u64,  blk                     )
+               __field(        __u64,  count                   )
+
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = sb->s_dev;
+               __entry->blk    = blk;
+               __entry->count  = count;
+       ),
+
+       TP_printk("dev %d,%d blk %llu count %llu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 __entry->blk, __entry->count)
+);
+
+DECLARE_EVENT_CLASS(ext4__mb_new_pa,
+       TP_PROTO(struct ext4_allocation_context *ac,
+                struct ext4_prealloc_space *pa),
+
+       TP_ARGS(ac, pa),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        __u64,  pa_pstart               )
+               __field(        __u64,  pa_lstart               )
+               __field(        __u32,  pa_len                  )
+
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = ac->ac_sb->s_dev;
+               __entry->ino            = ac->ac_inode->i_ino;
+               __entry->pa_pstart      = pa->pa_pstart;
+               __entry->pa_lstart      = pa->pa_lstart;
+               __entry->pa_len         = pa->pa_len;
+       ),
+
+       TP_printk("dev %d,%d ino %lu pstart %llu len %u lstart %llu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->pa_pstart, __entry->pa_len, __entry->pa_lstart)
+);
+
+DEFINE_EVENT(ext4__mb_new_pa, ext4_mb_new_inode_pa,
+
+       TP_PROTO(struct ext4_allocation_context *ac,
+                struct ext4_prealloc_space *pa),
+
+       TP_ARGS(ac, pa)
+);
+
+DEFINE_EVENT(ext4__mb_new_pa, ext4_mb_new_group_pa,
+
+       TP_PROTO(struct ext4_allocation_context *ac,
+                struct ext4_prealloc_space *pa),
+
+       TP_ARGS(ac, pa)
+);
+
+TRACE_EVENT(ext4_mb_release_inode_pa,
+       TP_PROTO(struct ext4_prealloc_space *pa,
+                unsigned long long block, unsigned int count),
+
+       TP_ARGS(pa, block, count),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        __u64,  block                   )
+               __field(        __u32,  count                   )
+
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = pa->pa_inode->i_sb->s_dev;
+               __entry->ino            = pa->pa_inode->i_ino;
+               __entry->block          = block;
+               __entry->count          = count;
+       ),
+
+       TP_printk("dev %d,%d ino %lu block %llu count %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->block, __entry->count)
+);
+
+TRACE_EVENT(ext4_mb_release_group_pa,
+       TP_PROTO(struct super_block *sb, struct ext4_prealloc_space *pa),
+
+       TP_ARGS(sb, pa),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        __u64,  pa_pstart               )
+               __field(        __u32,  pa_len                  )
+
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = sb->s_dev;
+               __entry->pa_pstart      = pa->pa_pstart;
+               __entry->pa_len         = pa->pa_len;
+       ),
+
+       TP_printk("dev %d,%d pstart %llu len %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 __entry->pa_pstart, __entry->pa_len)
+);
+
+TRACE_EVENT(ext4_discard_preallocations,
+       TP_PROTO(struct inode *inode),
+
+       TP_ARGS(inode),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+       ),
+
+       TP_printk("dev %d,%d ino %lu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino)
+);
+
+TRACE_EVENT(ext4_mb_discard_preallocations,
+       TP_PROTO(struct super_block *sb, int needed),
+
+       TP_ARGS(sb, needed),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        int,    needed                  )
+
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = sb->s_dev;
+               __entry->needed = needed;
+       ),
+
+       TP_printk("dev %d,%d needed %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 __entry->needed)
+);
+
+TRACE_EVENT(ext4_request_blocks,
+       TP_PROTO(struct ext4_allocation_request *ar),
+
+       TP_ARGS(ar),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        unsigned int, len               )
+               __field(        __u32,  logical                 )
+               __field(        __u32,  lleft                   )
+               __field(        __u32,  lright                  )
+               __field(        __u64,  goal                    )
+               __field(        __u64,  pleft                   )
+               __field(        __u64,  pright                  )
+               __field(        unsigned int, flags             )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = ar->inode->i_sb->s_dev;
+               __entry->ino    = ar->inode->i_ino;
+               __entry->len    = ar->len;
+               __entry->logical = ar->logical;
+               __entry->goal   = ar->goal;
+               __entry->lleft  = ar->lleft;
+               __entry->lright = ar->lright;
+               __entry->pleft  = ar->pleft;
+               __entry->pright = ar->pright;
+               __entry->flags  = ar->flags;
+       ),
+
+       TP_printk("dev %d,%d ino %lu flags %u len %u lblk %u goal %llu "
+                 "lleft %u lright %u pleft %llu pright %llu ",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino, __entry->flags,
+                 __entry->len, __entry->logical, __entry->goal,
+                 __entry->lleft, __entry->lright, __entry->pleft,
+                 __entry->pright)
+);
+
+TRACE_EVENT(ext4_allocate_blocks,
+       TP_PROTO(struct ext4_allocation_request *ar, unsigned long long block),
+
+       TP_ARGS(ar, block),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        __u64,  block                   )
+               __field(        unsigned int, len               )
+               __field(        __u32,  logical                 )
+               __field(        __u32,  lleft                   )
+               __field(        __u32,  lright                  )
+               __field(        __u64,  goal                    )
+               __field(        __u64,  pleft                   )
+               __field(        __u64,  pright                  )
+               __field(        unsigned int, flags             )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = ar->inode->i_sb->s_dev;
+               __entry->ino    = ar->inode->i_ino;
+               __entry->block  = block;
+               __entry->len    = ar->len;
+               __entry->logical = ar->logical;
+               __entry->goal   = ar->goal;
+               __entry->lleft  = ar->lleft;
+               __entry->lright = ar->lright;
+               __entry->pleft  = ar->pleft;
+               __entry->pright = ar->pright;
+               __entry->flags  = ar->flags;
+       ),
+
+       TP_printk("dev %d,%d ino %lu flags %u len %u block %llu lblk %u "
+                 "goal %llu lleft %u lright %u pleft %llu pright %llu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino, __entry->flags,
+                 __entry->len, __entry->block, __entry->logical,
+                 __entry->goal,  __entry->lleft, __entry->lright,
+                 __entry->pleft, __entry->pright)
+);
+
+TRACE_EVENT(ext4_free_blocks,
+       TP_PROTO(struct inode *inode, __u64 block, unsigned long count,
+                int flags),
+
+       TP_ARGS(inode, block, count, flags),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        __u64,  block                   )
+               __field(        unsigned long,  count           )
+               __field(        int,    flags                   )
+               __field(        __u16,  mode                    )
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = inode->i_sb->s_dev;
+               __entry->ino            = inode->i_ino;
+               __entry->block          = block;
+               __entry->count          = count;
+               __entry->flags          = flags;
+               __entry->mode           = inode->i_mode;
+       ),
+
+       TP_printk("dev %d,%d ino %lu mode 0%o block %llu count %lu flags %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->mode, __entry->block, __entry->count,
+                 __entry->flags)
+);
+
+TRACE_EVENT(ext4_sync_file_enter,
+       TP_PROTO(struct file *file, int datasync),
+
+       TP_ARGS(file, datasync),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        ino_t,  parent                  )
+               __field(        int,    datasync                )
+       ),
+
+       TP_fast_assign(
+               struct dentry *dentry = file->f_path.dentry;
+
+               __entry->dev            = dentry->d_inode->i_sb->s_dev;
+               __entry->ino            = dentry->d_inode->i_ino;
+               __entry->datasync       = datasync;
+               __entry->parent         = dentry->d_parent->d_inode->i_ino;
+       ),
+
+       TP_printk("dev %d,%d ino %lu parent %lu datasync %d ",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned long) __entry->parent, __entry->datasync)
+);
+
+TRACE_EVENT(ext4_sync_file_exit,
+       TP_PROTO(struct inode *inode, int ret),
+
+       TP_ARGS(inode, ret),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        int,    ret                     )
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = inode->i_sb->s_dev;
+               __entry->ino            = inode->i_ino;
+               __entry->ret            = ret;
+       ),
+
+       TP_printk("dev %d,%d ino %lu ret %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->ret)
+);
+
+TRACE_EVENT(ext4_sync_fs,
+       TP_PROTO(struct super_block *sb, int wait),
+
+       TP_ARGS(sb, wait),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        int,    wait                    )
+
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = sb->s_dev;
+               __entry->wait   = wait;
+       ),
+
+       TP_printk("dev %d,%d wait %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 __entry->wait)
+);
+
+TRACE_EVENT(ext4_alloc_da_blocks,
+       TP_PROTO(struct inode *inode),
+
+       TP_ARGS(inode),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field( unsigned int,  data_blocks     )
+               __field( unsigned int,  meta_blocks     )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+               __entry->data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
+               __entry->meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
+       ),
+
+       TP_printk("dev %d,%d ino %lu data_blocks %u meta_blocks %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->data_blocks, __entry->meta_blocks)
+);
+
+TRACE_EVENT(ext4_mballoc_alloc,
+       TP_PROTO(struct ext4_allocation_context *ac),
+
+       TP_ARGS(ac),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        __u32,  orig_logical            )
+               __field(          int,  orig_start              )
+               __field(        __u32,  orig_group              )
+               __field(          int,  orig_len                )
+               __field(        __u32,  goal_logical            )
+               __field(          int,  goal_start              )
+               __field(        __u32,  goal_group              )
+               __field(          int,  goal_len                )
+               __field(        __u32,  result_logical          )
+               __field(          int,  result_start            )
+               __field(        __u32,  result_group            )
+               __field(          int,  result_len              )
+               __field(        __u16,  found                   )
+               __field(        __u16,  groups                  )
+               __field(        __u16,  buddy                   )
+               __field(        __u16,  flags                   )
+               __field(        __u16,  tail                    )
+               __field(        __u8,   cr                      )
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = ac->ac_inode->i_sb->s_dev;
+               __entry->ino            = ac->ac_inode->i_ino;
+               __entry->orig_logical   = ac->ac_o_ex.fe_logical;
+               __entry->orig_start     = ac->ac_o_ex.fe_start;
+               __entry->orig_group     = ac->ac_o_ex.fe_group;
+               __entry->orig_len       = ac->ac_o_ex.fe_len;
+               __entry->goal_logical   = ac->ac_g_ex.fe_logical;
+               __entry->goal_start     = ac->ac_g_ex.fe_start;
+               __entry->goal_group     = ac->ac_g_ex.fe_group;
+               __entry->goal_len       = ac->ac_g_ex.fe_len;
+               __entry->result_logical = ac->ac_f_ex.fe_logical;
+               __entry->result_start   = ac->ac_f_ex.fe_start;
+               __entry->result_group   = ac->ac_f_ex.fe_group;
+               __entry->result_len     = ac->ac_f_ex.fe_len;
+               __entry->found          = ac->ac_found;
+               __entry->flags          = ac->ac_flags;
+               __entry->groups         = ac->ac_groups_scanned;
+               __entry->buddy          = ac->ac_buddy;
+               __entry->tail           = ac->ac_tail;
+               __entry->cr             = ac->ac_criteria;
+       ),
+
+       TP_printk("dev %d,%d inode %lu orig %u/%d/%u@%u goal %u/%d/%u@%u "
+                 "result %u/%d/%u@%u blks %u grps %u cr %u flags 0x%04x "
+                 "tail %u broken %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->orig_group, __entry->orig_start,
+                 __entry->orig_len, __entry->orig_logical,
+                 __entry->goal_group, __entry->goal_start,
+                 __entry->goal_len, __entry->goal_logical,
+                 __entry->result_group, __entry->result_start,
+                 __entry->result_len, __entry->result_logical,
+                 __entry->found, __entry->groups, __entry->cr,
+                 __entry->flags, __entry->tail,
+                 __entry->buddy ? 1 << __entry->buddy : 0)
+);
+
+TRACE_EVENT(ext4_mballoc_prealloc,
+       TP_PROTO(struct ext4_allocation_context *ac),
+
+       TP_ARGS(ac),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        __u32,  orig_logical            )
+               __field(          int,  orig_start              )
+               __field(        __u32,  orig_group              )
+               __field(          int,  orig_len                )
+               __field(        __u32,  result_logical          )
+               __field(          int,  result_start            )
+               __field(        __u32,  result_group            )
+               __field(          int,  result_len              )
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = ac->ac_inode->i_sb->s_dev;
+               __entry->ino            = ac->ac_inode->i_ino;
+               __entry->orig_logical   = ac->ac_o_ex.fe_logical;
+               __entry->orig_start     = ac->ac_o_ex.fe_start;
+               __entry->orig_group     = ac->ac_o_ex.fe_group;
+               __entry->orig_len       = ac->ac_o_ex.fe_len;
+               __entry->result_logical = ac->ac_b_ex.fe_logical;
+               __entry->result_start   = ac->ac_b_ex.fe_start;
+               __entry->result_group   = ac->ac_b_ex.fe_group;
+               __entry->result_len     = ac->ac_b_ex.fe_len;
+       ),
+
+       TP_printk("dev %d,%d inode %lu orig %u/%d/%u@%u result %u/%d/%u@%u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->orig_group, __entry->orig_start,
+                 __entry->orig_len, __entry->orig_logical,
+                 __entry->result_group, __entry->result_start,
+                 __entry->result_len, __entry->result_logical)
+);
+
+DECLARE_EVENT_CLASS(ext4__mballoc,
+       TP_PROTO(struct super_block *sb,
+                struct inode *inode,
+                ext4_group_t group,
+                ext4_grpblk_t start,
+                ext4_grpblk_t len),
+
+       TP_ARGS(sb, inode, group, start, len),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(          int,  result_start            )
+               __field(        __u32,  result_group            )
+               __field(          int,  result_len              )
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = sb->s_dev;
+               __entry->ino            = inode ? inode->i_ino : 0;
+               __entry->result_start   = start;
+               __entry->result_group   = group;
+               __entry->result_len     = len;
+       ),
+
+       TP_printk("dev %d,%d inode %lu extent %u/%d/%d ",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->result_group, __entry->result_start,
+                 __entry->result_len)
+);
+
+DEFINE_EVENT(ext4__mballoc, ext4_mballoc_discard,
+
+       TP_PROTO(struct super_block *sb,
+                struct inode *inode,
+                ext4_group_t group,
+                ext4_grpblk_t start,
+                ext4_grpblk_t len),
+
+       TP_ARGS(sb, inode, group, start, len)
+);
+
+DEFINE_EVENT(ext4__mballoc, ext4_mballoc_free,
+
+       TP_PROTO(struct super_block *sb,
+                struct inode *inode,
+                ext4_group_t group,
+                ext4_grpblk_t start,
+                ext4_grpblk_t len),
+
+       TP_ARGS(sb, inode, group, start, len)
+);
+
+TRACE_EVENT(ext4_forget,
+       TP_PROTO(struct inode *inode, int is_metadata, __u64 block),
+
+       TP_ARGS(inode, is_metadata, block),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        __u64,  block                   )
+               __field(        int,    is_metadata             )
+               __field(        __u16,  mode                    )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+               __entry->block  = block;
+               __entry->is_metadata = is_metadata;
+               __entry->mode   = inode->i_mode;
+       ),
+
+       TP_printk("dev %d,%d ino %lu mode 0%o is_metadata %d block %llu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->mode, __entry->is_metadata, __entry->block)
+);
+
+TRACE_EVENT(ext4_da_update_reserve_space,
+       TP_PROTO(struct inode *inode, int used_blocks, int quota_claim),
+
+       TP_ARGS(inode, used_blocks, quota_claim),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        __u64,  i_blocks                )
+               __field(        int,    used_blocks             )
+               __field(        int,    reserved_data_blocks    )
+               __field(        int,    reserved_meta_blocks    )
+               __field(        int,    allocated_meta_blocks   )
+               __field(        int,    quota_claim             )
+               __field(        __u16,  mode                    )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+               __entry->i_blocks = inode->i_blocks;
+               __entry->used_blocks = used_blocks;
+               __entry->reserved_data_blocks =
+                               EXT4_I(inode)->i_reserved_data_blocks;
+               __entry->reserved_meta_blocks =
+                               EXT4_I(inode)->i_reserved_meta_blocks;
+               __entry->allocated_meta_blocks =
+                               EXT4_I(inode)->i_allocated_meta_blocks;
+               __entry->quota_claim = quota_claim;
+               __entry->mode   = inode->i_mode;
+       ),
+
+       TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu used_blocks %d "
+                 "reserved_data_blocks %d reserved_meta_blocks %d "
+                 "allocated_meta_blocks %d quota_claim %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->mode, __entry->i_blocks,
+                 __entry->used_blocks, __entry->reserved_data_blocks,
+                 __entry->reserved_meta_blocks, __entry->allocated_meta_blocks,
+                 __entry->quota_claim)
+);
+
+TRACE_EVENT(ext4_da_reserve_space,
+       TP_PROTO(struct inode *inode, int md_needed),
+
+       TP_ARGS(inode, md_needed),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        __u64,  i_blocks                )
+               __field(        int,    md_needed               )
+               __field(        int,    reserved_data_blocks    )
+               __field(        int,    reserved_meta_blocks    )
+               __field(        __u16,  mode                    )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+               __entry->i_blocks = inode->i_blocks;
+               __entry->md_needed = md_needed;
+               __entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
+               __entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
+               __entry->mode   = inode->i_mode;
+       ),
+
+       TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu md_needed %d "
+                 "reserved_data_blocks %d reserved_meta_blocks %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->mode, __entry->i_blocks,
+                 __entry->md_needed, __entry->reserved_data_blocks,
+                 __entry->reserved_meta_blocks)
+);
+
+TRACE_EVENT(ext4_da_release_space,
+       TP_PROTO(struct inode *inode, int freed_blocks),
+
+       TP_ARGS(inode, freed_blocks),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        __u64,  i_blocks                )
+               __field(        int,    freed_blocks            )
+               __field(        int,    reserved_data_blocks    )
+               __field(        int,    reserved_meta_blocks    )
+               __field(        int,    allocated_meta_blocks   )
+               __field(        __u16,  mode                    )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+               __entry->i_blocks = inode->i_blocks;
+               __entry->freed_blocks = freed_blocks;
+               __entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
+               __entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
+               __entry->allocated_meta_blocks = EXT4_I(inode)->i_allocated_meta_blocks;
+               __entry->mode   = inode->i_mode;
+       ),
+
+       TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu freed_blocks %d "
+                 "reserved_data_blocks %d reserved_meta_blocks %d "
+                 "allocated_meta_blocks %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->mode, __entry->i_blocks,
+                 __entry->freed_blocks, __entry->reserved_data_blocks,
+                 __entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
+);
+
+DECLARE_EVENT_CLASS(ext4__bitmap_load,
+       TP_PROTO(struct super_block *sb, unsigned long group),
+
+       TP_ARGS(sb, group),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        __u32,  group                   )
+
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = sb->s_dev;
+               __entry->group  = group;
+       ),
+
+       TP_printk("dev %d,%d group %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 __entry->group)
+);
+
+DEFINE_EVENT(ext4__bitmap_load, ext4_mb_bitmap_load,
+
+       TP_PROTO(struct super_block *sb, unsigned long group),
+
+       TP_ARGS(sb, group)
+);
+
+DEFINE_EVENT(ext4__bitmap_load, ext4_mb_buddy_bitmap_load,
+
+       TP_PROTO(struct super_block *sb, unsigned long group),
+
+       TP_ARGS(sb, group)
+);
+
+DEFINE_EVENT(ext4__bitmap_load, ext4_read_block_bitmap_load,
+
+       TP_PROTO(struct super_block *sb, unsigned long group),
+
+       TP_ARGS(sb, group)
+);
+
+DEFINE_EVENT(ext4__bitmap_load, ext4_load_inode_bitmap,
+
+       TP_PROTO(struct super_block *sb, unsigned long group),
+
+       TP_ARGS(sb, group)
+);
+
+TRACE_EVENT(ext4_direct_IO_enter,
+       TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw),
+
+       TP_ARGS(inode, offset, len, rw),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        loff_t, pos                     )
+               __field(        unsigned long,  len             )
+               __field(        int,    rw                      )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+               __entry->pos    = offset;
+               __entry->len    = len;
+               __entry->rw     = rw;
+       ),
+
+       TP_printk("dev %d,%d ino %lu pos %lld len %lu rw %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->pos, __entry->len, __entry->rw)
+);
+
+TRACE_EVENT(ext4_direct_IO_exit,
+       TP_PROTO(struct inode *inode, loff_t offset, unsigned long len,
+                int rw, int ret),
+
+       TP_ARGS(inode, offset, len, rw, ret),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        loff_t, pos                     )
+               __field(        unsigned long,  len             )
+               __field(        int,    rw                      )
+               __field(        int,    ret                     )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+               __entry->pos    = offset;
+               __entry->len    = len;
+               __entry->rw     = rw;
+               __entry->ret    = ret;
+       ),
+
+       TP_printk("dev %d,%d ino %lu pos %lld len %lu rw %d ret %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->pos, __entry->len,
+                 __entry->rw, __entry->ret)
+);
+
+TRACE_EVENT(ext4_fallocate_enter,
+       TP_PROTO(struct inode *inode, loff_t offset, loff_t len, int mode),
+
+       TP_ARGS(inode, offset, len, mode),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        loff_t, pos                     )
+               __field(        loff_t, len                     )
+               __field(        int,    mode                    )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+               __entry->pos    = offset;
+               __entry->len    = len;
+               __entry->mode   = mode;
+       ),
+
+       TP_printk("dev %d,%d ino %lu pos %lld len %lld mode %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino, __entry->pos,
+                 __entry->len, __entry->mode)
+);
+
+TRACE_EVENT(ext4_fallocate_exit,
+       TP_PROTO(struct inode *inode, loff_t offset,
+                unsigned int max_blocks, int ret),
+
+       TP_ARGS(inode, offset, max_blocks, ret),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        loff_t, pos                     )
+               __field(        unsigned int,   blocks          )
+               __field(        int,    ret                     )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+               __entry->pos    = offset;
+               __entry->blocks = max_blocks;
+               __entry->ret    = ret;
+       ),
+
+       TP_printk("dev %d,%d ino %lu pos %lld blocks %u ret %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->pos, __entry->blocks,
+                 __entry->ret)
+);
+
+TRACE_EVENT(ext4_unlink_enter,
+       TP_PROTO(struct inode *parent, struct dentry *dentry),
+
+       TP_ARGS(parent, dentry),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        ino_t,  parent                  )
+               __field(        loff_t, size                    )
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = dentry->d_inode->i_sb->s_dev;
+               __entry->ino            = dentry->d_inode->i_ino;
+               __entry->parent         = parent->i_ino;
+               __entry->size           = dentry->d_inode->i_size;
+       ),
+
+       TP_printk("dev %d,%d ino %lu size %lld parent %lu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino, __entry->size,
+                 (unsigned long) __entry->parent)
+);
+
+TRACE_EVENT(ext4_unlink_exit,
+       TP_PROTO(struct dentry *dentry, int ret),
+
+       TP_ARGS(dentry, ret),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        int,    ret                     )
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = dentry->d_inode->i_sb->s_dev;
+               __entry->ino            = dentry->d_inode->i_ino;
+               __entry->ret            = ret;
+       ),
+
+       TP_printk("dev %d,%d ino %lu ret %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->ret)
+);
+
+DECLARE_EVENT_CLASS(ext4__truncate,
+       TP_PROTO(struct inode *inode),
+
+       TP_ARGS(inode),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev             )
+               __field(        ino_t,          ino             )
+               __field(        __u64,          blocks          )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+               __entry->blocks = inode->i_blocks;
+       ),
+
+       TP_printk("dev %d,%d ino %lu blocks %llu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino, __entry->blocks)
+);
+
+DEFINE_EVENT(ext4__truncate, ext4_truncate_enter,
+
+       TP_PROTO(struct inode *inode),
+
+       TP_ARGS(inode)
+);
+
+DEFINE_EVENT(ext4__truncate, ext4_truncate_exit,
+
+       TP_PROTO(struct inode *inode),
+
+       TP_ARGS(inode)
+);
+
+/* 'ux' is the uninitialized extent. */
+TRACE_EVENT(ext4_ext_convert_to_initialized_enter,
+       TP_PROTO(struct inode *inode, struct ext4_map_blocks *map,
+                struct ext4_extent *ux),
+
+       TP_ARGS(inode, map, ux),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev     )
+               __field(        ino_t,          ino     )
+               __field(        ext4_lblk_t,    m_lblk  )
+               __field(        unsigned,       m_len   )
+               __field(        ext4_lblk_t,    u_lblk  )
+               __field(        unsigned,       u_len   )
+               __field(        ext4_fsblk_t,   u_pblk  )
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = inode->i_sb->s_dev;
+               __entry->ino            = inode->i_ino;
+               __entry->m_lblk         = map->m_lblk;
+               __entry->m_len          = map->m_len;
+               __entry->u_lblk         = le32_to_cpu(ux->ee_block);
+               __entry->u_len          = ext4_ext_get_actual_len(ux);
+               __entry->u_pblk         = ext4_ext_pblock(ux);
+       ),
+
+       TP_printk("dev %d,%d ino %lu m_lblk %u m_len %u u_lblk %u u_len %u "
+                 "u_pblk %llu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->m_lblk, __entry->m_len,
+                 __entry->u_lblk, __entry->u_len, __entry->u_pblk)
+);
+
+/*
+ * 'ux' is the uninitialized extent.
+ * 'ix' is the initialized extent to which blocks are transferred.
+ */
+TRACE_EVENT(ext4_ext_convert_to_initialized_fastpath,
+       TP_PROTO(struct inode *inode, struct ext4_map_blocks *map,
+                struct ext4_extent *ux, struct ext4_extent *ix),
+
+       TP_ARGS(inode, map, ux, ix),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev     )
+               __field(        ino_t,          ino     )
+               __field(        ext4_lblk_t,    m_lblk  )
+               __field(        unsigned,       m_len   )
+               __field(        ext4_lblk_t,    u_lblk  )
+               __field(        unsigned,       u_len   )
+               __field(        ext4_fsblk_t,   u_pblk  )
+               __field(        ext4_lblk_t,    i_lblk  )
+               __field(        unsigned,       i_len   )
+               __field(        ext4_fsblk_t,   i_pblk  )
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = inode->i_sb->s_dev;
+               __entry->ino            = inode->i_ino;
+               __entry->m_lblk         = map->m_lblk;
+               __entry->m_len          = map->m_len;
+               __entry->u_lblk         = le32_to_cpu(ux->ee_block);
+               __entry->u_len          = ext4_ext_get_actual_len(ux);
+               __entry->u_pblk         = ext4_ext_pblock(ux);
+               __entry->i_lblk         = le32_to_cpu(ix->ee_block);
+               __entry->i_len          = ext4_ext_get_actual_len(ix);
+               __entry->i_pblk         = ext4_ext_pblock(ix);
+       ),
+
+       TP_printk("dev %d,%d ino %lu m_lblk %u m_len %u "
+                 "u_lblk %u u_len %u u_pblk %llu "
+                 "i_lblk %u i_len %u i_pblk %llu ",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->m_lblk, __entry->m_len,
+                 __entry->u_lblk, __entry->u_len, __entry->u_pblk,
+                 __entry->i_lblk, __entry->i_len, __entry->i_pblk)
+);
+
+DECLARE_EVENT_CLASS(ext4__map_blocks_enter,
+       TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
+                unsigned int len, unsigned int flags),
+
+       TP_ARGS(inode, lblk, len, flags),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev             )
+               __field(        ino_t,          ino             )
+               __field(        ext4_lblk_t,    lblk            )
+               __field(        unsigned int,   len             )
+               __field(        unsigned int,   flags           )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+               __entry->lblk   = lblk;
+               __entry->len    = len;
+               __entry->flags  = flags;
+       ),
+
+       TP_printk("dev %d,%d ino %lu lblk %u len %u flags %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->lblk, __entry->len, __entry->flags)
+);
+
+DEFINE_EVENT(ext4__map_blocks_enter, ext4_ext_map_blocks_enter,
+       TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
+                unsigned len, unsigned flags),
+
+       TP_ARGS(inode, lblk, len, flags)
+);
+
+DEFINE_EVENT(ext4__map_blocks_enter, ext4_ind_map_blocks_enter,
+       TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
+                unsigned len, unsigned flags),
+
+       TP_ARGS(inode, lblk, len, flags)
+);
+
+DECLARE_EVENT_CLASS(ext4__map_blocks_exit,
+       TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
+                ext4_fsblk_t pblk, unsigned int len, int ret),
+
+       TP_ARGS(inode, lblk, pblk, len, ret),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev             )
+               __field(        ino_t,          ino             )
+               __field(        ext4_fsblk_t,   pblk            )
+               __field(        ext4_lblk_t,    lblk            )
+               __field(        unsigned int,   len             )
+               __field(        int,            ret             )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+               __entry->pblk   = pblk;
+               __entry->lblk   = lblk;
+               __entry->len    = len;
+               __entry->ret    = ret;
+       ),
+
+       TP_printk("dev %d,%d ino %lu lblk %u pblk %llu len %u ret %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->lblk, __entry->pblk,
+                 __entry->len, __entry->ret)
+);
+
+DEFINE_EVENT(ext4__map_blocks_exit, ext4_ext_map_blocks_exit,
+       TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
+                ext4_fsblk_t pblk, unsigned len, int ret),
+
+       TP_ARGS(inode, lblk, pblk, len, ret)
+);
+
+DEFINE_EVENT(ext4__map_blocks_exit, ext4_ind_map_blocks_exit,
+       TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
+                ext4_fsblk_t pblk, unsigned len, int ret),
+
+       TP_ARGS(inode, lblk, pblk, len, ret)
+);
+
+TRACE_EVENT(ext4_ext_load_extent,
+       TP_PROTO(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk),
+
+       TP_ARGS(inode, lblk, pblk),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev             )
+               __field(        ino_t,          ino             )
+               __field(        ext4_fsblk_t,   pblk            )
+               __field(        ext4_lblk_t,    lblk            )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+               __entry->pblk   = pblk;
+               __entry->lblk   = lblk;
+       ),
+
+       TP_printk("dev %d,%d ino %lu lblk %u pblk %llu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->lblk, __entry->pblk)
+);
+
+TRACE_EVENT(ext4_load_inode,
+       TP_PROTO(struct inode *inode),
+
+       TP_ARGS(inode),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev             )
+               __field(        ino_t,  ino             )
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = inode->i_sb->s_dev;
+               __entry->ino            = inode->i_ino;
+       ),
+
+       TP_printk("dev %d,%d ino %ld",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino)
+);
+
+TRACE_EVENT(ext4_journal_start,
+       TP_PROTO(struct super_block *sb, int nblocks, unsigned long IP),
+
+       TP_ARGS(sb, nblocks, IP),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(unsigned long,  ip                      )
+               __field(        int,    nblocks                 )
+       ),
+
+       TP_fast_assign(
+               __entry->dev     = sb->s_dev;
+               __entry->ip      = IP;
+               __entry->nblocks = nblocks;
+       ),
+
+       TP_printk("dev %d,%d nblocks %d caller %pF",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 __entry->nblocks, (void *)__entry->ip)
+);
+
+DECLARE_EVENT_CLASS(ext4__trim,
+       TP_PROTO(struct super_block *sb,
+                ext4_group_t group,
+                ext4_grpblk_t start,
+                ext4_grpblk_t len),
+
+       TP_ARGS(sb, group, start, len),
+
+       TP_STRUCT__entry(
+               __field(        int,    dev_major               )
+               __field(        int,    dev_minor               )
+               __field(        __u32,  group                   )
+               __field(        int,    start                   )
+               __field(        int,    len                     )
+       ),
+
+       TP_fast_assign(
+               __entry->dev_major      = MAJOR(sb->s_dev);
+               __entry->dev_minor      = MINOR(sb->s_dev);
+               __entry->group          = group;
+               __entry->start          = start;
+               __entry->len            = len;
+       ),
+
+       TP_printk("dev %d,%d group %u, start %d, len %d",
+                 __entry->dev_major, __entry->dev_minor,
+                 __entry->group, __entry->start, __entry->len)
+);
+
+DEFINE_EVENT(ext4__trim, ext4_trim_extent,
+
+       TP_PROTO(struct super_block *sb,
+                ext4_group_t group,
+                ext4_grpblk_t start,
+                ext4_grpblk_t len),
+
+       TP_ARGS(sb, group, start, len)
+);
+
+DEFINE_EVENT(ext4__trim, ext4_trim_all_free,
+
+       TP_PROTO(struct super_block *sb,
+                ext4_group_t group,
+                ext4_grpblk_t start,
+                ext4_grpblk_t len),
+
+       TP_ARGS(sb, group, start, len)
+);
+
+TRACE_EVENT(ext4_ext_handle_uninitialized_extents,
+       TP_PROTO(struct inode *inode, struct ext4_map_blocks *map,
+                unsigned int allocated, ext4_fsblk_t newblock),
+
+       TP_ARGS(inode, map, allocated, newblock),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev             )
+               __field(        ino_t,          ino             )
+               __field(        int,            flags           )
+               __field(        ext4_lblk_t,    lblk            )
+               __field(        ext4_fsblk_t,   pblk            )
+               __field(        unsigned int,   len             )
+               __field(        unsigned int,   allocated       )
+               __field(        ext4_fsblk_t,   newblk          )
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = inode->i_sb->s_dev;
+               __entry->ino            = inode->i_ino;
+               __entry->flags          = map->m_flags;
+               __entry->lblk           = map->m_lblk;
+               __entry->pblk           = map->m_pblk;
+               __entry->len            = map->m_len;
+               __entry->allocated      = allocated;
+               __entry->newblk         = newblock;
+       ),
+
+       TP_printk("dev %d,%d ino %lu m_lblk %u m_pblk %llu m_len %u flags %d"
+                 "allocated %d newblock %llu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned) __entry->lblk, (unsigned long long) __entry->pblk,
+                 __entry->len, __entry->flags,
+                 (unsigned int) __entry->allocated,
+                 (unsigned long long) __entry->newblk)
+);
+
+TRACE_EVENT(ext4_get_implied_cluster_alloc_exit,
+       TP_PROTO(struct super_block *sb, struct ext4_map_blocks *map, int ret),
+
+       TP_ARGS(sb, map, ret),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev     )
+               __field(        unsigned int,   flags   )
+               __field(        ext4_lblk_t,    lblk    )
+               __field(        ext4_fsblk_t,   pblk    )
+               __field(        unsigned int,   len     )
+               __field(        int,            ret     )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = sb->s_dev;
+               __entry->flags  = map->m_flags;
+               __entry->lblk   = map->m_lblk;
+               __entry->pblk   = map->m_pblk;
+               __entry->len    = map->m_len;
+               __entry->ret    = ret;
+       ),
+
+       TP_printk("dev %d,%d m_lblk %u m_pblk %llu m_len %u m_flags %u ret %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 __entry->lblk, (unsigned long long) __entry->pblk,
+                 __entry->len, __entry->flags, __entry->ret)
+);
+
+TRACE_EVENT(ext4_ext_put_in_cache,
+       TP_PROTO(struct inode *inode, ext4_lblk_t lblk, unsigned int len,
+                ext4_fsblk_t start),
+
+       TP_ARGS(inode, lblk, len, start),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev     )
+               __field(        ino_t,          ino     )
+               __field(        ext4_lblk_t,    lblk    )
+               __field(        unsigned int,   len     )
+               __field(        ext4_fsblk_t,   start   )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+               __entry->lblk   = lblk;
+               __entry->len    = len;
+               __entry->start  = start;
+       ),
+
+       TP_printk("dev %d,%d ino %lu lblk %u len %u start %llu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned) __entry->lblk,
+                 __entry->len,
+                 (unsigned long long) __entry->start)
+);
+
+TRACE_EVENT(ext4_ext_in_cache,
+       TP_PROTO(struct inode *inode, ext4_lblk_t lblk, int ret),
+
+       TP_ARGS(inode, lblk, ret),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev     )
+               __field(        ino_t,          ino     )
+               __field(        ext4_lblk_t,    lblk    )
+               __field(        int,            ret     )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+               __entry->lblk   = lblk;
+               __entry->ret    = ret;
+       ),
+
+       TP_printk("dev %d,%d ino %lu lblk %u ret %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned) __entry->lblk,
+                 __entry->ret)
+
+);
+
+TRACE_EVENT(ext4_find_delalloc_range,
+       TP_PROTO(struct inode *inode, ext4_lblk_t from, ext4_lblk_t to,
+               int reverse, int found, ext4_lblk_t found_blk),
+
+       TP_ARGS(inode, from, to, reverse, found, found_blk),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev             )
+               __field(        ino_t,          ino             )
+               __field(        ext4_lblk_t,    from            )
+               __field(        ext4_lblk_t,    to              )
+               __field(        int,            reverse         )
+               __field(        int,            found           )
+               __field(        ext4_lblk_t,    found_blk       )
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = inode->i_sb->s_dev;
+               __entry->ino            = inode->i_ino;
+               __entry->from           = from;
+               __entry->to             = to;
+               __entry->reverse        = reverse;
+               __entry->found          = found;
+               __entry->found_blk      = found_blk;
+       ),
+
+       TP_printk("dev %d,%d ino %lu from %u to %u reverse %d found %d "
+                 "(blk = %u)",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned) __entry->from, (unsigned) __entry->to,
+                 __entry->reverse, __entry->found,
+                 (unsigned) __entry->found_blk)
+);
+
+TRACE_EVENT(ext4_get_reserved_cluster_alloc,
+       TP_PROTO(struct inode *inode, ext4_lblk_t lblk, unsigned int len),
+
+       TP_ARGS(inode, lblk, len),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev     )
+               __field(        ino_t,          ino     )
+               __field(        ext4_lblk_t,    lblk    )
+               __field(        unsigned int,   len     )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+               __entry->lblk   = lblk;
+               __entry->len    = len;
+       ),
+
+       TP_printk("dev %d,%d ino %lu lblk %u len %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned) __entry->lblk,
+                 __entry->len)
+);
+
+TRACE_EVENT(ext4_ext_show_extent,
+       TP_PROTO(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
+                unsigned short len),
+
+       TP_ARGS(inode, lblk, pblk, len),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev     )
+               __field(        ino_t,          ino     )
+               __field(        ext4_fsblk_t,   pblk    )
+               __field(        ext4_lblk_t,    lblk    )
+               __field(        unsigned short, len     )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+               __entry->pblk   = pblk;
+               __entry->lblk   = lblk;
+               __entry->len    = len;
+       ),
+
+       TP_printk("dev %d,%d ino %lu lblk %u pblk %llu len %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned) __entry->lblk,
+                 (unsigned long long) __entry->pblk,
+                 (unsigned short) __entry->len)
+);
+
+TRACE_EVENT(ext4_remove_blocks,
+           TP_PROTO(struct inode *inode, struct ext4_extent *ex,
+               ext4_lblk_t from, ext4_fsblk_t to,
+               ext4_fsblk_t partial_cluster),
+
+       TP_ARGS(inode, ex, from, to, partial_cluster),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev     )
+               __field(        ino_t,          ino     )
+               __field(        ext4_lblk_t,    from    )
+               __field(        ext4_lblk_t,    to      )
+               __field(        ext4_fsblk_t,   partial )
+               __field(        ext4_fsblk_t,   ee_pblk )
+               __field(        ext4_lblk_t,    ee_lblk )
+               __field(        unsigned short, ee_len  )
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = inode->i_sb->s_dev;
+               __entry->ino            = inode->i_ino;
+               __entry->from           = from;
+               __entry->to             = to;
+               __entry->partial        = partial_cluster;
+               __entry->ee_pblk        = ext4_ext_pblock(ex);
+               __entry->ee_lblk        = cpu_to_le32(ex->ee_block);
+               __entry->ee_len         = ext4_ext_get_actual_len(ex);
+       ),
+
+       TP_printk("dev %d,%d ino %lu extent [%u(%llu), %u]"
+                 "from %u to %u partial_cluster %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned) __entry->ee_lblk,
+                 (unsigned long long) __entry->ee_pblk,
+                 (unsigned short) __entry->ee_len,
+                 (unsigned) __entry->from,
+                 (unsigned) __entry->to,
+                 (unsigned) __entry->partial)
+);
+
+TRACE_EVENT(ext4_ext_rm_leaf,
+       TP_PROTO(struct inode *inode, ext4_lblk_t start,
+                struct ext4_extent *ex, ext4_fsblk_t partial_cluster),
+
+       TP_ARGS(inode, start, ex, partial_cluster),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev     )
+               __field(        ino_t,          ino     )
+               __field(        ext4_fsblk_t,   partial )
+               __field(        ext4_lblk_t,    start   )
+               __field(        ext4_lblk_t,    ee_lblk )
+               __field(        ext4_fsblk_t,   ee_pblk )
+               __field(        short,          ee_len  )
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = inode->i_sb->s_dev;
+               __entry->ino            = inode->i_ino;
+               __entry->partial        = partial_cluster;
+               __entry->start          = start;
+               __entry->ee_lblk        = le32_to_cpu(ex->ee_block);
+               __entry->ee_pblk        = ext4_ext_pblock(ex);
+               __entry->ee_len         = ext4_ext_get_actual_len(ex);
+       ),
+
+       TP_printk("dev %d,%d ino %lu start_lblk %u last_extent [%u(%llu), %u]"
+                 "partial_cluster %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned) __entry->start,
+                 (unsigned) __entry->ee_lblk,
+                 (unsigned long long) __entry->ee_pblk,
+                 (unsigned short) __entry->ee_len,
+                 (unsigned) __entry->partial)
+);
+
+TRACE_EVENT(ext4_ext_rm_idx,
+       TP_PROTO(struct inode *inode, ext4_fsblk_t pblk),
+
+       TP_ARGS(inode, pblk),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev     )
+               __field(        ino_t,          ino     )
+               __field(        ext4_fsblk_t,   pblk    )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+               __entry->pblk   = pblk;
+       ),
+
+       TP_printk("dev %d,%d ino %lu index_pblk %llu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned long long) __entry->pblk)
+);
+
+TRACE_EVENT(ext4_ext_remove_space,
+       TP_PROTO(struct inode *inode, ext4_lblk_t start, int depth),
+
+       TP_ARGS(inode, start, depth),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev     )
+               __field(        ino_t,          ino     )
+               __field(        ext4_lblk_t,    start   )
+               __field(        int,            depth   )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+               __entry->start  = start;
+               __entry->depth  = depth;
+       ),
+
+       TP_printk("dev %d,%d ino %lu since %u depth %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned) __entry->start,
+                 __entry->depth)
+);
+
+TRACE_EVENT(ext4_ext_remove_space_done,
+       TP_PROTO(struct inode *inode, ext4_lblk_t start, int depth,
+               ext4_lblk_t partial, unsigned short eh_entries),
+
+       TP_ARGS(inode, start, depth, partial, eh_entries),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev             )
+               __field(        ino_t,          ino             )
+               __field(        ext4_lblk_t,    start           )
+               __field(        int,            depth           )
+               __field(        ext4_lblk_t,    partial         )
+               __field(        unsigned short, eh_entries      )
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = inode->i_sb->s_dev;
+               __entry->ino            = inode->i_ino;
+               __entry->start          = start;
+               __entry->depth          = depth;
+               __entry->partial        = partial;
+               __entry->eh_entries     = eh_entries;
+       ),
+
+       TP_printk("dev %d,%d ino %lu since %u depth %d partial %u "
+                 "remaining_entries %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned) __entry->start,
+                 __entry->depth,
+                 (unsigned) __entry->partial,
+                 (unsigned short) __entry->eh_entries)
+);
+
+#endif /* _TRACE_EXT4_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/instrumentation/events/mainline/printk.h b/instrumentation/events/mainline/printk.h
new file mode 100644 (file)
index 0000000..94ec79c
--- /dev/null
@@ -0,0 +1,41 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM printk
+
+#if !defined(_TRACE_PRINTK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PRINTK_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT_CONDITION(console,
+       TP_PROTO(const char *log_buf, unsigned start, unsigned end,
+                unsigned log_buf_len),
+
+       TP_ARGS(log_buf, start, end, log_buf_len),
+
+       TP_CONDITION(start != end),
+
+       TP_STRUCT__entry(
+               __dynamic_array(char, msg, end - start + 1)
+       ),
+
+       TP_fast_assign(
+               if ((start & (log_buf_len - 1)) > (end & (log_buf_len - 1))) {
+                       memcpy(__get_dynamic_array(msg),
+                              log_buf + (start & (log_buf_len - 1)),
+                              log_buf_len - (start & (log_buf_len - 1)));
+                       memcpy((char *)__get_dynamic_array(msg) +
+                              log_buf_len - (start & (log_buf_len - 1)),
+                              log_buf, end & (log_buf_len - 1));
+               } else
+                       memcpy(__get_dynamic_array(msg),
+                              log_buf + (start & (log_buf_len - 1)),
+                              end - start);
+               ((char *)__get_dynamic_array(msg))[end - start] = 0;
+       ),
+
+       TP_printk("%s", __get_str(msg))
+);
+#endif /* _TRACE_PRINTK_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/instrumentation/events/mainline/random.h b/instrumentation/events/mainline/random.h
new file mode 100644 (file)
index 0000000..422df19
--- /dev/null
@@ -0,0 +1,134 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM random
+
+#if !defined(_TRACE_RANDOM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RANDOM_H
+
+#include <linux/writeback.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(random__mix_pool_bytes,
+       TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
+
+       TP_ARGS(pool_name, bytes, IP),
+
+       TP_STRUCT__entry(
+               __field( const char *,  pool_name               )
+               __field(          int,  bytes                   )
+               __field(unsigned long,  IP                      )
+       ),
+
+       TP_fast_assign(
+               __entry->pool_name      = pool_name;
+               __entry->bytes          = bytes;
+               __entry->IP             = IP;
+       ),
+
+       TP_printk("%s pool: bytes %d caller %pF",
+                 __entry->pool_name, __entry->bytes, (void *)__entry->IP)
+);
+
+DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes,
+       TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
+
+       TP_ARGS(pool_name, bytes, IP)
+);
+
+DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes_nolock,
+       TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
+
+       TP_ARGS(pool_name, bytes, IP)
+);
+
+TRACE_EVENT(credit_entropy_bits,
+       TP_PROTO(const char *pool_name, int bits, int entropy_count,
+                int entropy_total, unsigned long IP),
+
+       TP_ARGS(pool_name, bits, entropy_count, entropy_total, IP),
+
+       TP_STRUCT__entry(
+               __field( const char *,  pool_name               )
+               __field(          int,  bits                    )
+               __field(          int,  entropy_count           )
+               __field(          int,  entropy_total           )
+               __field(unsigned long,  IP                      )
+       ),
+
+       TP_fast_assign(
+               __entry->pool_name      = pool_name;
+               __entry->bits           = bits;
+               __entry->entropy_count  = entropy_count;
+               __entry->entropy_total  = entropy_total;
+               __entry->IP             = IP;
+       ),
+
+       TP_printk("%s pool: bits %d entropy_count %d entropy_total %d "
+                 "caller %pF", __entry->pool_name, __entry->bits,
+                 __entry->entropy_count, __entry->entropy_total,
+                 (void *)__entry->IP)
+);
+
+TRACE_EVENT(get_random_bytes,
+       TP_PROTO(int nbytes, unsigned long IP),
+
+       TP_ARGS(nbytes, IP),
+
+       TP_STRUCT__entry(
+               __field(          int,  nbytes                  )
+               __field(unsigned long,  IP                      )
+       ),
+
+       TP_fast_assign(
+               __entry->nbytes         = nbytes;
+               __entry->IP             = IP;
+       ),
+
+       TP_printk("nbytes %d caller %pF", __entry->nbytes, (void *)__entry->IP)
+);
+
+DECLARE_EVENT_CLASS(random__extract_entropy,
+       TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
+                unsigned long IP),
+
+       TP_ARGS(pool_name, nbytes, entropy_count, IP),
+
+       TP_STRUCT__entry(
+               __field( const char *,  pool_name               )
+               __field(          int,  nbytes                  )
+               __field(          int,  entropy_count           )
+               __field(unsigned long,  IP                      )
+       ),
+
+       TP_fast_assign(
+               __entry->pool_name      = pool_name;
+               __entry->nbytes         = nbytes;
+               __entry->entropy_count  = entropy_count;
+               __entry->IP             = IP;
+       ),
+
+       TP_printk("%s pool: nbytes %d entropy_count %d caller %pF",
+                 __entry->pool_name, __entry->nbytes, __entry->entropy_count,
+                 (void *)__entry->IP)
+);
+
+
+DEFINE_EVENT(random__extract_entropy, extract_entropy,
+       TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
+                unsigned long IP),
+
+       TP_ARGS(pool_name, nbytes, entropy_count, IP)
+);
+
+DEFINE_EVENT(random__extract_entropy, extract_entropy_user,
+       TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
+                unsigned long IP),
+
+       TP_ARGS(pool_name, nbytes, entropy_count, IP)
+);
+
+
+
+#endif /* _TRACE_RANDOM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/instrumentation/events/mainline/rcu.h b/instrumentation/events/mainline/rcu.h
new file mode 100644 (file)
index 0000000..5bde94d
--- /dev/null
@@ -0,0 +1,618 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rcu
+
+#if !defined(_TRACE_RCU_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RCU_H
+
+#include <linux/tracepoint.h>
+
+/*
+ * Tracepoint for start/end markers used for utilization calculations.
+ * By convention, the string is of the following forms:
+ *
+ * "Start <activity>" -- Mark the start of the specified activity,
+ *                      such as "context switch".  Nesting is permitted.
+ * "End <activity>" -- Mark the end of the specified activity.
+ *
+ * An "@" character within "<activity>" is a comment character: Data
+ * reduction scripts will ignore the "@" and the remainder of the line.
+ */
+TRACE_EVENT(rcu_utilization,
+
+       TP_PROTO(char *s),
+
+       TP_ARGS(s),
+
+       TP_STRUCT__entry(
+               __field(char *, s)
+       ),
+
+       TP_fast_assign(
+               __entry->s = s;
+       ),
+
+       TP_printk("%s", __entry->s)
+);
+
+#ifdef CONFIG_RCU_TRACE
+
+#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
+
+/*
+ * Tracepoint for grace-period events: starting and ending a grace
+ * period ("start" and "end", respectively), a CPU noting the start
+ * of a new grace period or the end of an old grace period ("cpustart"
+ * and "cpuend", respectively), a CPU passing through a quiescent
+ * state ("cpuqs"), a CPU coming online or going offline ("cpuonl"
+ * and "cpuofl", respectively), and a CPU being kicked for being too
+ * long in dyntick-idle mode ("kick").
+ */
+TRACE_EVENT(rcu_grace_period,
+
+       TP_PROTO(char *rcuname, unsigned long gpnum, char *gpevent),
+
+       TP_ARGS(rcuname, gpnum, gpevent),
+
+       TP_STRUCT__entry(
+               __field(char *, rcuname)
+               __field(unsigned long, gpnum)
+               __field(char *, gpevent)
+       ),
+
+       TP_fast_assign(
+               __entry->rcuname = rcuname;
+               __entry->gpnum = gpnum;
+               __entry->gpevent = gpevent;
+       ),
+
+       TP_printk("%s %lu %s",
+                 __entry->rcuname, __entry->gpnum, __entry->gpevent)
+);
+
+/*
+ * Tracepoint for grace-period-initialization events.  These are
+ * distinguished by the type of RCU, the new grace-period number, the
+ * rcu_node structure level, the starting and ending CPU covered by the
+ * rcu_node structure, and the mask of CPUs that will be waited for.
+ * All but the type of RCU are extracted from the rcu_node structure.
+ */
+TRACE_EVENT(rcu_grace_period_init,
+
+       TP_PROTO(char *rcuname, unsigned long gpnum, u8 level,
+                int grplo, int grphi, unsigned long qsmask),
+
+       TP_ARGS(rcuname, gpnum, level, grplo, grphi, qsmask),
+
+       TP_STRUCT__entry(
+               __field(char *, rcuname)
+               __field(unsigned long, gpnum)
+               __field(u8, level)
+               __field(int, grplo)
+               __field(int, grphi)
+               __field(unsigned long, qsmask)
+       ),
+
+       TP_fast_assign(
+               __entry->rcuname = rcuname;
+               __entry->gpnum = gpnum;
+               __entry->level = level;
+               __entry->grplo = grplo;
+               __entry->grphi = grphi;
+               __entry->qsmask = qsmask;
+       ),
+
+       TP_printk("%s %lu %u %d %d %lx",
+                 __entry->rcuname, __entry->gpnum, __entry->level,
+                 __entry->grplo, __entry->grphi, __entry->qsmask)
+);
+
+/*
+ * Tracepoint for tasks blocking within preemptible-RCU read-side
+ * critical sections.  Track the type of RCU (which one day might
+ * include SRCU), the grace-period number that the task is blocking
+ * (the current or the next), and the task's PID.
+ */
+TRACE_EVENT(rcu_preempt_task,
+
+       TP_PROTO(char *rcuname, int pid, unsigned long gpnum),
+
+       TP_ARGS(rcuname, pid, gpnum),
+
+       TP_STRUCT__entry(
+               __field(char *, rcuname)
+               __field(unsigned long, gpnum)
+               __field(int, pid)
+       ),
+
+       TP_fast_assign(
+               __entry->rcuname = rcuname;
+               __entry->gpnum = gpnum;
+               __entry->pid = pid;
+       ),
+
+       TP_printk("%s %lu %d",
+                 __entry->rcuname, __entry->gpnum, __entry->pid)
+);
+
+/*
+ * Tracepoint for tasks that blocked within a given preemptible-RCU
+ * read-side critical section exiting that critical section.  Track the
+ * type of RCU (which one day might include SRCU) and the task's PID.
+ */
+TRACE_EVENT(rcu_unlock_preempted_task,
+
+       TP_PROTO(char *rcuname, unsigned long gpnum, int pid),
+
+       TP_ARGS(rcuname, gpnum, pid),
+
+       TP_STRUCT__entry(
+               __field(char *, rcuname)
+               __field(unsigned long, gpnum)
+               __field(int, pid)
+       ),
+
+       TP_fast_assign(
+               __entry->rcuname = rcuname;
+               __entry->gpnum = gpnum;
+               __entry->pid = pid;
+       ),
+
+       TP_printk("%s %lu %d", __entry->rcuname, __entry->gpnum, __entry->pid)
+);
+
+/*
+ * Tracepoint for quiescent-state-reporting events.  These are
+ * distinguished by the type of RCU, the grace-period number, the
+ * mask of quiescent lower-level entities, the rcu_node structure level,
+ * the starting and ending CPU covered by the rcu_node structure, and
+ * whether there are any blocked tasks blocking the current grace period.
+ * All but the type of RCU are extracted from the rcu_node structure.
+ */
+TRACE_EVENT(rcu_quiescent_state_report,
+
+       TP_PROTO(char *rcuname, unsigned long gpnum,
+                unsigned long mask, unsigned long qsmask,
+                u8 level, int grplo, int grphi, int gp_tasks),
+
+       TP_ARGS(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks),
+
+       TP_STRUCT__entry(
+               __field(char *, rcuname)
+               __field(unsigned long, gpnum)
+               __field(unsigned long, mask)
+               __field(unsigned long, qsmask)
+               __field(u8, level)
+               __field(int, grplo)
+               __field(int, grphi)
+               __field(u8, gp_tasks)
+       ),
+
+       TP_fast_assign(
+               __entry->rcuname = rcuname;
+               __entry->gpnum = gpnum;
+               __entry->mask = mask;
+               __entry->qsmask = qsmask;
+               __entry->level = level;
+               __entry->grplo = grplo;
+               __entry->grphi = grphi;
+               __entry->gp_tasks = gp_tasks;
+       ),
+
+       TP_printk("%s %lu %lx>%lx %u %d %d %u",
+                 __entry->rcuname, __entry->gpnum,
+                 __entry->mask, __entry->qsmask, __entry->level,
+                 __entry->grplo, __entry->grphi, __entry->gp_tasks)
+);
+
+/*
+ * Tracepoint for quiescent states detected by force_quiescent_state().
+ * These trace events include the type of RCU, the grace-period number
+ * that was blocked by the CPU, the CPU itself, and the type of quiescent
+ * state, which can be "dti" for dyntick-idle mode, "ofl" for CPU offline,
+ * or "kick" when kicking a CPU that has been in dyntick-idle mode for
+ * too long.
+ */
+TRACE_EVENT(rcu_fqs,
+
+       TP_PROTO(char *rcuname, unsigned long gpnum, int cpu, char *qsevent),
+
+       TP_ARGS(rcuname, gpnum, cpu, qsevent),
+
+       TP_STRUCT__entry(
+               __field(char *, rcuname)
+               __field(unsigned long, gpnum)
+               __field(int, cpu)
+               __field(char *, qsevent)
+       ),
+
+       TP_fast_assign(
+               __entry->rcuname = rcuname;
+               __entry->gpnum = gpnum;
+               __entry->cpu = cpu;
+               __entry->qsevent = qsevent;
+       ),
+
+       TP_printk("%s %lu %d %s",
+                 __entry->rcuname, __entry->gpnum,
+                 __entry->cpu, __entry->qsevent)
+);
+
+#endif /* #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) */
+
+/*
+ * Tracepoint for dyntick-idle entry/exit events.  These take a string
+ * as argument: "Start" for entering dyntick-idle mode, "End" for
+ * leaving it, "--=" for events moving towards idle, and "++=" for events
+ * moving away from idle.  "Error on entry: not idle task" and "Error on
+ * exit: not idle task" indicate that a non-idle task is erroneously
+ * toying with the idle loop.
+ *
+ * These events also take a pair of numbers, which indicate the nesting
+ * depth before and after the event of interest.  Note that task-related
+ * events use the upper bits of each number, while interrupt-related
+ * events use the lower bits.
+ */
+TRACE_EVENT(rcu_dyntick,
+
+       TP_PROTO(char *polarity, long long oldnesting, long long newnesting),
+
+       TP_ARGS(polarity, oldnesting, newnesting),
+
+       TP_STRUCT__entry(
+               __field(char *, polarity)
+               __field(long long, oldnesting)
+               __field(long long, newnesting)
+       ),
+
+       TP_fast_assign(
+               __entry->polarity = polarity;
+               __entry->oldnesting = oldnesting;
+               __entry->newnesting = newnesting;
+       ),
+
+       TP_printk("%s %llx %llx", __entry->polarity,
+                 __entry->oldnesting, __entry->newnesting)
+);
+
+/*
+ * Tracepoint for RCU preparation for idle, the goal being to get RCU
+ * processing done so that the current CPU can shut off its scheduling
+ * clock and enter dyntick-idle mode.  One way to accomplish this is
+ * to drain all RCU callbacks from this CPU, and the other is to have
+ * done everything RCU requires for the current grace period.  In this
+ * latter case, the CPU will be awakened at the end of the current grace
+ * period in order to process the remainder of its callbacks.
+ *
+ * These tracepoints take a string as argument:
+ *
+ *     "No callbacks": Nothing to do, no callbacks on this CPU.
+ *     "In holdoff": Nothing to do, holding off after unsuccessful attempt.
+ *     "Begin holdoff": Attempt failed, don't retry until next jiffy.
+ *     "Dyntick with callbacks": Entering dyntick-idle despite callbacks.
+ *     "Dyntick with lazy callbacks": Entering dyntick-idle w/lazy callbacks.
+ *     "More callbacks": Still more callbacks, try again to clear them out.
+ *     "Callbacks drained": All callbacks processed, off to dyntick idle!
+ *     "Timer": Timer fired to cause CPU to continue processing callbacks.
+ *     "Demigrate": Timer fired on wrong CPU, woke up correct CPU.
+ *     "Cleanup after idle": Idle exited, timer canceled.
+ */
+TRACE_EVENT(rcu_prep_idle,
+
+       TP_PROTO(char *reason),
+
+       TP_ARGS(reason),
+
+       TP_STRUCT__entry(
+               __field(char *, reason)
+       ),
+
+       TP_fast_assign(
+               __entry->reason = reason;
+       ),
+
+       TP_printk("%s", __entry->reason)
+);
+
+/*
+ * Tracepoint for the registration of a single RCU callback function.
+ * The first argument is the type of RCU, the second argument is
+ * a pointer to the RCU callback itself, the third element is the
+ * number of lazy callbacks queued, and the fourth element is the
+ * total number of callbacks queued.
+ */
+TRACE_EVENT(rcu_callback,
+
+       TP_PROTO(char *rcuname, struct rcu_head *rhp, long qlen_lazy,
+                long qlen),
+
+       TP_ARGS(rcuname, rhp, qlen_lazy, qlen),
+
+       TP_STRUCT__entry(
+               __field(char *, rcuname)
+               __field(void *, rhp)
+               __field(void *, func)
+               __field(long, qlen_lazy)
+               __field(long, qlen)
+       ),
+
+       TP_fast_assign(
+               __entry->rcuname = rcuname;
+               __entry->rhp = rhp;
+               __entry->func = rhp->func;
+               __entry->qlen_lazy = qlen_lazy;
+               __entry->qlen = qlen;
+       ),
+
+       TP_printk("%s rhp=%p func=%pf %ld/%ld",
+                 __entry->rcuname, __entry->rhp, __entry->func,
+                 __entry->qlen_lazy, __entry->qlen)
+);
+
+/*
+ * Tracepoint for the registration of a single RCU callback of the special
+ * kfree() form.  The first argument is the RCU type, the second argument
+ * is a pointer to the RCU callback, the third argument is the offset
+ * of the callback within the enclosing RCU-protected data structure,
+ * the fourth argument is the number of lazy callbacks queued, and the
+ * fifth argument is the total number of callbacks queued.
+ */
+TRACE_EVENT(rcu_kfree_callback,
+
+       TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset,
+                long qlen_lazy, long qlen),
+
+       TP_ARGS(rcuname, rhp, offset, qlen_lazy, qlen),
+
+       TP_STRUCT__entry(
+               __field(char *, rcuname)
+               __field(void *, rhp)
+               __field(unsigned long, offset)
+               __field(long, qlen_lazy)
+               __field(long, qlen)
+       ),
+
+       TP_fast_assign(
+               __entry->rcuname = rcuname;
+               __entry->rhp = rhp;
+               __entry->offset = offset;
+               __entry->qlen_lazy = qlen_lazy;
+               __entry->qlen = qlen;
+       ),
+
+       TP_printk("%s rhp=%p func=%ld %ld/%ld",
+                 __entry->rcuname, __entry->rhp, __entry->offset,
+                 __entry->qlen_lazy, __entry->qlen)
+);
+
+/*
+ * Tracepoint for marking the beginning rcu_do_batch, performed to start
+ * RCU callback invocation.  The first argument is the RCU flavor,
+ * the second is the number of lazy callbacks queued, the third is
+ * the total number of callbacks queued, and the fourth argument is
+ * the current RCU-callback batch limit.
+ */
+TRACE_EVENT(rcu_batch_start,
+
+       TP_PROTO(char *rcuname, long qlen_lazy, long qlen, int blimit),
+
+       TP_ARGS(rcuname, qlen_lazy, qlen, blimit),
+
+       TP_STRUCT__entry(
+               __field(char *, rcuname)
+               __field(long, qlen_lazy)
+               __field(long, qlen)
+               __field(int, blimit)
+       ),
+
+       TP_fast_assign(
+               __entry->rcuname = rcuname;
+               __entry->qlen_lazy = qlen_lazy;
+               __entry->qlen = qlen;
+               __entry->blimit = blimit;
+       ),
+
+       TP_printk("%s CBs=%ld/%ld bl=%d",
+                 __entry->rcuname, __entry->qlen_lazy, __entry->qlen,
+                 __entry->blimit)
+);
+
+/*
+ * Tracepoint for the invocation of a single RCU callback function.
+ * The first argument is the type of RCU, and the second argument is
+ * a pointer to the RCU callback itself.
+ */
+TRACE_EVENT(rcu_invoke_callback,
+
+       TP_PROTO(char *rcuname, struct rcu_head *rhp),
+
+       TP_ARGS(rcuname, rhp),
+
+       TP_STRUCT__entry(
+               __field(char *, rcuname)
+               __field(void *, rhp)
+               __field(void *, func)
+       ),
+
+       TP_fast_assign(
+               __entry->rcuname = rcuname;
+               __entry->rhp = rhp;
+               __entry->func = rhp->func;
+       ),
+
+       TP_printk("%s rhp=%p func=%pf",
+                 __entry->rcuname, __entry->rhp, __entry->func)
+);
+
+/*
+ * Tracepoint for the invocation of a single RCU callback of the special
+ * kfree() form.  The first argument is the RCU flavor, the second
+ * argument is a pointer to the RCU callback, and the third argument
+ * is the offset of the callback within the enclosing RCU-protected
+ * data structure.
+ */
+TRACE_EVENT(rcu_invoke_kfree_callback,
+
+       TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset),
+
+       TP_ARGS(rcuname, rhp, offset),
+
+       TP_STRUCT__entry(
+               __field(char *, rcuname)
+               __field(void *, rhp)
+               __field(unsigned long, offset)
+       ),
+
+       TP_fast_assign(
+               __entry->rcuname = rcuname;
+               __entry->rhp = rhp;
+               __entry->offset = offset;
+       ),
+
+       TP_printk("%s rhp=%p func=%ld",
+                 __entry->rcuname, __entry->rhp, __entry->offset)
+);
+
+/*
+ * Tracepoint for exiting rcu_do_batch after RCU callbacks have been
+ * invoked.  The first argument is the name of the RCU flavor,
+ * the second argument is number of callbacks actually invoked,
+ * the third argument (cb) is whether or not any of the callbacks that
+ * were ready to invoke at the beginning of this batch are still
+ * queued, the fourth argument (nr) is the return value of need_resched(),
+ * the fifth argument (iit) is 1 if the current task is the idle task,
+ * and the sixth argument (risk) is the return value from
+ * rcu_is_callbacks_kthread().
+ */
+TRACE_EVENT(rcu_batch_end,
+
+       TP_PROTO(char *rcuname, int callbacks_invoked,
+                bool cb, bool nr, bool iit, bool risk),
+
+       TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk),
+
+       TP_STRUCT__entry(
+               __field(char *, rcuname)
+               __field(int, callbacks_invoked)
+               __field(bool, cb)
+               __field(bool, nr)
+               __field(bool, iit)
+               __field(bool, risk)
+       ),
+
+       TP_fast_assign(
+               __entry->rcuname = rcuname;
+               __entry->callbacks_invoked = callbacks_invoked;
+               __entry->cb = cb;
+               __entry->nr = nr;
+               __entry->iit = iit;
+               __entry->risk = risk;
+       ),
+
+       TP_printk("%s CBs-invoked=%d idle=%c%c%c%c",
+                 __entry->rcuname, __entry->callbacks_invoked,
+                 __entry->cb ? 'C' : '.',
+                 __entry->nr ? 'S' : '.',
+                 __entry->iit ? 'I' : '.',
+                 __entry->risk ? 'R' : '.')
+);
+
+/*
+ * Tracepoint for rcutorture readers.  The first argument is the name
+ * of the RCU flavor from rcutorture's viewpoint and the second argument
+ * is the callback address.
+ */
+TRACE_EVENT(rcu_torture_read,
+
+       TP_PROTO(char *rcutorturename, struct rcu_head *rhp),
+
+       TP_ARGS(rcutorturename, rhp),
+
+       TP_STRUCT__entry(
+               __field(char *, rcutorturename)
+               __field(struct rcu_head *, rhp)
+       ),
+
+       TP_fast_assign(
+               __entry->rcutorturename = rcutorturename;
+               __entry->rhp = rhp;
+       ),
+
+       TP_printk("%s torture read %p",
+                 __entry->rcutorturename, __entry->rhp)
+);
+
+/*
+ * Tracepoint for _rcu_barrier() execution.  The string "s" describes
+ * the _rcu_barrier phase:
+ *     "Begin": rcu_barrier_callback() started.
+ *     "Check": rcu_barrier_callback() checking for piggybacking.
+ *     "EarlyExit": rcu_barrier_callback() piggybacked, thus early exit.
+ *     "Inc1": rcu_barrier_callback() piggyback check counter incremented.
+ *     "Offline": rcu_barrier_callback() found offline CPU
+ *     "OnlineQ": rcu_barrier_callback() found online CPU with callbacks.
+ *     "OnlineNQ": rcu_barrier_callback() found online CPU, no callbacks.
+ *     "IRQ": An rcu_barrier_callback() callback posted on remote CPU.
+ *     "CB": An rcu_barrier_callback() invoked a callback, not the last.
+ *     "LastCB": An rcu_barrier_callback() invoked the last callback.
+ *     "Inc2": rcu_barrier_callback() piggyback check counter incremented.
+ * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
+ * is the count of remaining callbacks, and "done" is the piggybacking count.
+ */
+TRACE_EVENT(rcu_barrier,
+
+       TP_PROTO(char *rcuname, char *s, int cpu, int cnt, unsigned long done),
+
+       TP_ARGS(rcuname, s, cpu, cnt, done),
+
+       TP_STRUCT__entry(
+               __field(char *, rcuname)
+               __field(char *, s)
+               __field(int, cpu)
+               __field(int, cnt)
+               __field(unsigned long, done)
+       ),
+
+       TP_fast_assign(
+               __entry->rcuname = rcuname;
+               __entry->s = s;
+               __entry->cpu = cpu;
+               __entry->cnt = cnt;
+               __entry->done = done;
+       ),
+
+       TP_printk("%s %s cpu %d remaining %d # %lu",
+                 __entry->rcuname, __entry->s, __entry->cpu, __entry->cnt,
+                 __entry->done)
+);
+
+#else /* #ifdef CONFIG_RCU_TRACE */
+
+#define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
+#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \
+                                   qsmask) do { } while (0)
+#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
+#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
+#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \
+                                        grplo, grphi, gp_tasks) do { } \
+       while (0)
+#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
+#define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0)
+#define trace_rcu_prep_idle(reason) do { } while (0)
+#define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
+#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
+       do { } while (0)
+#define trace_rcu_batch_start(rcuname, qlen_lazy, qlen, blimit) \
+       do { } while (0)
+#define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0)
+#define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0)
+#define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \
+       do { } while (0)
+#define trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
+#define trace_rcu_barrier(name, s, cpu, cnt, done) do { } while (0)
+
+#endif /* #else #ifdef CONFIG_RCU_TRACE */
+
+#endif /* _TRACE_RCU_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/instrumentation/events/mainline/regmap.h b/instrumentation/events/mainline/regmap.h
new file mode 100644 (file)
index 0000000..41a7dbd
--- /dev/null
@@ -0,0 +1,181 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM regmap
+
+#if !defined(_TRACE_REGMAP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_REGMAP_H
+
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+
+struct device;
+struct regmap;
+
+/*
+ * Log register events
+ */
+DECLARE_EVENT_CLASS(regmap_reg,
+
+       TP_PROTO(struct device *dev, unsigned int reg,
+                unsigned int val),
+
+       TP_ARGS(dev, reg, val),
+
+       TP_STRUCT__entry(
+               __string(       name,           dev_name(dev)   )
+               __field(        unsigned int,   reg             )
+               __field(        unsigned int,   val             )
+       ),
+
+       TP_fast_assign(
+               __assign_str(name, dev_name(dev));
+               __entry->reg = reg;
+               __entry->val = val;
+       ),
+
+       TP_printk("%s reg=%x val=%x", __get_str(name),
+                 (unsigned int)__entry->reg,
+                 (unsigned int)__entry->val)
+);
+
+DEFINE_EVENT(regmap_reg, regmap_reg_write,
+
+       TP_PROTO(struct device *dev, unsigned int reg,
+                unsigned int val),
+
+       TP_ARGS(dev, reg, val)
+
+);
+
+DEFINE_EVENT(regmap_reg, regmap_reg_read,
+
+       TP_PROTO(struct device *dev, unsigned int reg,
+                unsigned int val),
+
+       TP_ARGS(dev, reg, val)
+
+);
+
+DEFINE_EVENT(regmap_reg, regmap_reg_read_cache,
+
+       TP_PROTO(struct device *dev, unsigned int reg,
+                unsigned int val),
+
+       TP_ARGS(dev, reg, val)
+
+);
+
+DECLARE_EVENT_CLASS(regmap_block,
+
+       TP_PROTO(struct device *dev, unsigned int reg, int count),
+
+       TP_ARGS(dev, reg, count),
+
+       TP_STRUCT__entry(
+               __string(       name,           dev_name(dev)   )
+               __field(        unsigned int,   reg             )
+               __field(        int,            count           )
+       ),
+
+       TP_fast_assign(
+               __assign_str(name, dev_name(dev));
+               __entry->reg = reg;
+               __entry->count = count;
+       ),
+
+       TP_printk("%s reg=%x count=%d", __get_str(name),
+                 (unsigned int)__entry->reg,
+                 (int)__entry->count)
+);
+
+DEFINE_EVENT(regmap_block, regmap_hw_read_start,
+
+       TP_PROTO(struct device *dev, unsigned int reg, int count),
+
+       TP_ARGS(dev, reg, count)
+);
+
+DEFINE_EVENT(regmap_block, regmap_hw_read_done,
+
+       TP_PROTO(struct device *dev, unsigned int reg, int count),
+
+       TP_ARGS(dev, reg, count)
+);
+
+DEFINE_EVENT(regmap_block, regmap_hw_write_start,
+
+       TP_PROTO(struct device *dev, unsigned int reg, int count),
+
+       TP_ARGS(dev, reg, count)
+);
+
+DEFINE_EVENT(regmap_block, regmap_hw_write_done,
+
+       TP_PROTO(struct device *dev, unsigned int reg, int count),
+
+       TP_ARGS(dev, reg, count)
+);
+
+TRACE_EVENT(regcache_sync,
+
+       TP_PROTO(struct device *dev, const char *type,
+                const char *status),
+
+       TP_ARGS(dev, type, status),
+
+       TP_STRUCT__entry(
+               __string(       name,           dev_name(dev)   )
+               __string(       status,         status          )
+               __string(       type,           type            )
+               __field(        int,            type            )
+       ),
+
+       TP_fast_assign(
+               __assign_str(name, dev_name(dev));
+               __assign_str(status, status);
+               __assign_str(type, type);
+       ),
+
+       TP_printk("%s type=%s status=%s", __get_str(name),
+                 __get_str(type), __get_str(status))
+);
+
+DECLARE_EVENT_CLASS(regmap_bool,
+
+       TP_PROTO(struct device *dev, bool flag),
+
+       TP_ARGS(dev, flag),
+
+       TP_STRUCT__entry(
+               __string(       name,           dev_name(dev)   )
+               __field(        int,            flag            )
+       ),
+
+       TP_fast_assign(
+               __assign_str(name, dev_name(dev));
+               __entry->flag = flag;
+       ),
+
+       TP_printk("%s flag=%d", __get_str(name),
+                 (int)__entry->flag)
+);
+
+DEFINE_EVENT(regmap_bool, regmap_cache_only,
+
+       TP_PROTO(struct device *dev, bool flag),
+
+       TP_ARGS(dev, flag)
+
+);
+
+DEFINE_EVENT(regmap_bool, regmap_cache_bypass,
+
+       TP_PROTO(struct device *dev, bool flag),
+
+       TP_ARGS(dev, flag)
+
+);
+
+#endif /* _TRACE_REGMAP_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/instrumentation/events/mainline/rpm.h b/instrumentation/events/mainline/rpm.h
new file mode 100644 (file)
index 0000000..33f85b6
--- /dev/null
@@ -0,0 +1,100 @@
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rpm
+
+#if !defined(_TRACE_RUNTIME_POWER_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RUNTIME_POWER_H
+
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+
+struct device;
+
+/*
+ * The rpm_internal events are used for tracing some important
+ * runtime pm internal functions.
+ */
+DECLARE_EVENT_CLASS(rpm_internal,
+
+       TP_PROTO(struct device *dev, int flags),
+
+       TP_ARGS(dev, flags),
+
+       TP_STRUCT__entry(
+               __string(       name,           dev_name(dev)   )
+               __field(        int,            flags           )
+               __field(        int ,           usage_count     )
+               __field(        int ,           disable_depth   )
+               __field(        int ,           runtime_auto    )
+               __field(        int ,           request_pending )
+               __field(        int ,           irq_safe        )
+               __field(        int ,           child_count     )
+       ),
+
+       TP_fast_assign(
+               __assign_str(name, dev_name(dev));
+               __entry->flags = flags;
+               __entry->usage_count = atomic_read(
+                       &dev->power.usage_count);
+               __entry->disable_depth = dev->power.disable_depth;
+               __entry->runtime_auto = dev->power.runtime_auto;
+               __entry->request_pending = dev->power.request_pending;
+               __entry->irq_safe = dev->power.irq_safe;
+               __entry->child_count = atomic_read(
+                       &dev->power.child_count);
+       ),
+
+       TP_printk("%s flags-%x cnt-%-2d dep-%-2d auto-%-1d p-%-1d"
+                       " irq-%-1d child-%d",
+                       __get_str(name), __entry->flags,
+                       __entry->usage_count,
+                       __entry->disable_depth,
+                       __entry->runtime_auto,
+                       __entry->request_pending,
+                       __entry->irq_safe,
+                       __entry->child_count
+                )
+);
+DEFINE_EVENT(rpm_internal, rpm_suspend,
+
+       TP_PROTO(struct device *dev, int flags),
+
+       TP_ARGS(dev, flags)
+);
+DEFINE_EVENT(rpm_internal, rpm_resume,
+
+       TP_PROTO(struct device *dev, int flags),
+
+       TP_ARGS(dev, flags)
+);
+DEFINE_EVENT(rpm_internal, rpm_idle,
+
+       TP_PROTO(struct device *dev, int flags),
+
+       TP_ARGS(dev, flags)
+);
+
+TRACE_EVENT(rpm_return_int,
+       TP_PROTO(struct device *dev, unsigned long ip, int ret),
+       TP_ARGS(dev, ip, ret),
+
+       TP_STRUCT__entry(
+               __string(       name,           dev_name(dev))
+               __field(        unsigned long,          ip      )
+               __field(        int,                    ret     )
+       ),
+
+       TP_fast_assign(
+               __assign_str(name, dev_name(dev));
+               __entry->ip = ip;
+               __entry->ret = ret;
+       ),
+
+       TP_printk("%pS:%s ret=%d", (void *)__entry->ip, __get_str(name),
+               __entry->ret)
+);
+
+#endif /* _TRACE_RUNTIME_POWER_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/instrumentation/events/mainline/sunrpc.h b/instrumentation/events/mainline/sunrpc.h
new file mode 100644 (file)
index 0000000..43be87d
--- /dev/null
@@ -0,0 +1,177 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sunrpc
+
+#if !defined(_TRACE_SUNRPC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SUNRPC_H
+
+#include <linux/sunrpc/sched.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(rpc_task_status,
+
+       TP_PROTO(struct rpc_task *task),
+
+       TP_ARGS(task),
+
+       TP_STRUCT__entry(
+               __field(const struct rpc_task *, task)
+               __field(const struct rpc_clnt *, clnt)
+               __field(int, status)
+       ),
+
+       TP_fast_assign(
+               __entry->task = task;
+               __entry->clnt = task->tk_client;
+               __entry->status = task->tk_status;
+       ),
+
+       TP_printk("task:%p@%p, status %d",__entry->task, __entry->clnt, __entry->status)
+);
+
+DEFINE_EVENT(rpc_task_status, rpc_call_status,
+       TP_PROTO(struct rpc_task *task),
+
+       TP_ARGS(task)
+);
+
+DEFINE_EVENT(rpc_task_status, rpc_bind_status,
+       TP_PROTO(struct rpc_task *task),
+
+       TP_ARGS(task)
+);
+
+TRACE_EVENT(rpc_connect_status,
+       TP_PROTO(struct rpc_task *task, int status),
+
+       TP_ARGS(task, status),
+
+       TP_STRUCT__entry(
+               __field(const struct rpc_task *, task)
+               __field(const struct rpc_clnt *, clnt)
+               __field(int, status)
+       ),
+
+       TP_fast_assign(
+               __entry->task = task;
+               __entry->clnt = task->tk_client;
+               __entry->status = status;
+       ),
+
+       TP_printk("task:%p@%p, status %d",__entry->task, __entry->clnt, __entry->status)
+);
+
+DECLARE_EVENT_CLASS(rpc_task_running,
+
+       TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action),
+
+       TP_ARGS(clnt, task, action),
+
+       TP_STRUCT__entry(
+               __field(const struct rpc_clnt *, clnt)
+               __field(const struct rpc_task *, task)
+               __field(const void *, action)
+               __field(unsigned long, runstate)
+               __field(int, status)
+               __field(unsigned short, flags)
+               ),
+
+       TP_fast_assign(
+               __entry->clnt = clnt;
+               __entry->task = task;
+               __entry->action = action;
+               __entry->runstate = task->tk_runstate;
+               __entry->status = task->tk_status;
+               __entry->flags = task->tk_flags;
+               ),
+
+       TP_printk("task:%p@%p flags=%4.4x state=%4.4lx status=%d action=%pf",
+               __entry->task,
+               __entry->clnt,
+               __entry->flags,
+               __entry->runstate,
+               __entry->status,
+               __entry->action
+               )
+);
+
+DEFINE_EVENT(rpc_task_running, rpc_task_begin,
+
+       TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action),
+
+       TP_ARGS(clnt, task, action)
+
+);
+
+DEFINE_EVENT(rpc_task_running, rpc_task_run_action,
+
+       TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action),
+
+       TP_ARGS(clnt, task, action)
+
+);
+
+DEFINE_EVENT(rpc_task_running, rpc_task_complete,
+
+       TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action),
+
+       TP_ARGS(clnt, task, action)
+
+);
+
+DECLARE_EVENT_CLASS(rpc_task_queued,
+
+       TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const struct rpc_wait_queue *q),
+
+       TP_ARGS(clnt, task, q),
+
+       TP_STRUCT__entry(
+               __field(const struct rpc_clnt *, clnt)
+               __field(const struct rpc_task *, task)
+               __field(unsigned long, timeout)
+               __field(unsigned long, runstate)
+               __field(int, status)
+               __field(unsigned short, flags)
+               __string(q_name, rpc_qname(q))
+               ),
+
+       TP_fast_assign(
+               __entry->clnt = clnt;
+               __entry->task = task;
+               __entry->timeout = task->tk_timeout;
+               __entry->runstate = task->tk_runstate;
+               __entry->status = task->tk_status;
+               __entry->flags = task->tk_flags;
+               __assign_str(q_name, rpc_qname(q));
+               ),
+
+       TP_printk("task:%p@%p flags=%4.4x state=%4.4lx status=%d timeout=%lu queue=%s",
+               __entry->task,
+               __entry->clnt,
+               __entry->flags,
+               __entry->runstate,
+               __entry->status,
+               __entry->timeout,
+               __get_str(q_name)
+               )
+);
+
+DEFINE_EVENT(rpc_task_queued, rpc_task_sleep,
+
+       TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const struct rpc_wait_queue *q),
+
+       TP_ARGS(clnt, task, q)
+
+);
+
+DEFINE_EVENT(rpc_task_queued, rpc_task_wakeup,
+
+       TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const struct rpc_wait_queue *q),
+
+       TP_ARGS(clnt, task, q)
+
+);
+
+#endif /* _TRACE_SUNRPC_H */
+
+#include <trace/define_trace.h>
diff --git a/instrumentation/events/mainline/workqueue.h b/instrumentation/events/mainline/workqueue.h
new file mode 100644 (file)
index 0000000..f28d1b6
--- /dev/null
@@ -0,0 +1,121 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM workqueue
+
+#if !defined(_TRACE_WORKQUEUE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_WORKQUEUE_H
+
+#include <linux/tracepoint.h>
+#include <linux/workqueue.h>
+
+DECLARE_EVENT_CLASS(workqueue_work,
+
+       TP_PROTO(struct work_struct *work),
+
+       TP_ARGS(work),
+
+       TP_STRUCT__entry(
+               __field( void *,        work    )
+       ),
+
+       TP_fast_assign(
+               __entry->work           = work;
+       ),
+
+       TP_printk("work struct %p", __entry->work)
+);
+
+/**
+ * workqueue_queue_work - called when a work gets queued
+ * @req_cpu:   the requested cpu
+ * @cwq:       pointer to struct cpu_workqueue_struct
+ * @work:      pointer to struct work_struct
+ *
+ * This event occurs when a work is queued immediately or once a
+ * delayed work is actually queued on a workqueue (ie: once the delay
+ * has been reached).
+ */
+TRACE_EVENT(workqueue_queue_work,
+
+       TP_PROTO(unsigned int req_cpu, struct cpu_workqueue_struct *cwq,
+                struct work_struct *work),
+
+       TP_ARGS(req_cpu, cwq, work),
+
+       TP_STRUCT__entry(
+               __field( void *,        work    )
+               __field( void *,        function)
+               __field( void *,        workqueue)
+               __field( unsigned int,  req_cpu )
+               __field( unsigned int,  cpu     )
+       ),
+
+       TP_fast_assign(
+               __entry->work           = work;
+               __entry->function       = work->func;
+               __entry->workqueue      = cwq->wq;
+               __entry->req_cpu        = req_cpu;
+               __entry->cpu            = cwq->pool->gcwq->cpu;
+       ),
+
+       TP_printk("work struct=%p function=%pf workqueue=%p req_cpu=%u cpu=%u",
+                 __entry->work, __entry->function, __entry->workqueue,
+                 __entry->req_cpu, __entry->cpu)
+);
+
+/**
+ * workqueue_activate_work - called when a work gets activated
+ * @work:      pointer to struct work_struct
+ *
+ * This event occurs when a queued work is put on the active queue,
+ * which happens immediately after queueing unless @max_active limit
+ * is reached.
+ */
+DEFINE_EVENT(workqueue_work, workqueue_activate_work,
+
+       TP_PROTO(struct work_struct *work),
+
+       TP_ARGS(work)
+);
+
+/**
+ * workqueue_execute_start - called immediately before the workqueue callback
+ * @work:      pointer to struct work_struct
+ *
+ * Allows to track workqueue execution.
+ */
+TRACE_EVENT(workqueue_execute_start,
+
+       TP_PROTO(struct work_struct *work),
+
+       TP_ARGS(work),
+
+       TP_STRUCT__entry(
+               __field( void *,        work    )
+               __field( void *,        function)
+       ),
+
+       TP_fast_assign(
+               __entry->work           = work;
+               __entry->function       = work->func;
+       ),
+
+       TP_printk("work struct %p: function %pf", __entry->work, __entry->function)
+);
+
+/**
+ * workqueue_execute_end - called immediately after the workqueue callback
+ * @work:      pointer to struct work_struct
+ *
+ * Allows to track workqueue execution.
+ */
+DEFINE_EVENT(workqueue_work, workqueue_execute_end,
+
+       TP_PROTO(struct work_struct *work),
+
+       TP_ARGS(work)
+);
+
+#endif /*  _TRACE_WORKQUEUE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/instrumentation/events/mainline/writeback.h b/instrumentation/events/mainline/writeback.h
new file mode 100644 (file)
index 0000000..b453d92
--- /dev/null
@@ -0,0 +1,492 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM writeback
+
+#if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_WRITEBACK_H
+
+#include <linux/backing-dev.h>
+#include <linux/writeback.h>
+
+#define show_inode_state(state)                                        \
+       __print_flags(state, "|",                               \
+               {I_DIRTY_SYNC,          "I_DIRTY_SYNC"},        \
+               {I_DIRTY_DATASYNC,      "I_DIRTY_DATASYNC"},    \
+               {I_DIRTY_PAGES,         "I_DIRTY_PAGES"},       \
+               {I_NEW,                 "I_NEW"},               \
+               {I_WILL_FREE,           "I_WILL_FREE"},         \
+               {I_FREEING,             "I_FREEING"},           \
+               {I_CLEAR,               "I_CLEAR"},             \
+               {I_SYNC,                "I_SYNC"},              \
+               {I_REFERENCED,          "I_REFERENCED"}         \
+       )
+
+#define WB_WORK_REASON                                                 \
+               {WB_REASON_BACKGROUND,          "background"},          \
+               {WB_REASON_TRY_TO_FREE_PAGES,   "try_to_free_pages"},   \
+               {WB_REASON_SYNC,                "sync"},                \
+               {WB_REASON_PERIODIC,            "periodic"},            \
+               {WB_REASON_LAPTOP_TIMER,        "laptop_timer"},        \
+               {WB_REASON_FREE_MORE_MEM,       "free_more_memory"},    \
+               {WB_REASON_FS_FREE_SPACE,       "fs_free_space"},       \
+               {WB_REASON_FORKER_THREAD,       "forker_thread"}
+
+struct wb_writeback_work;
+
+DECLARE_EVENT_CLASS(writeback_work_class,
+       TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work),
+       TP_ARGS(bdi, work),
+       TP_STRUCT__entry(
+               __array(char, name, 32)
+               __field(long, nr_pages)
+               __field(dev_t, sb_dev)
+               __field(int, sync_mode)
+               __field(int, for_kupdate)
+               __field(int, range_cyclic)
+               __field(int, for_background)
+               __field(int, reason)
+       ),
+       TP_fast_assign(
+               struct device *dev = bdi->dev;
+               if (!dev)
+                       dev = default_backing_dev_info.dev;
+               strncpy(__entry->name, dev_name(dev), 32);
+               __entry->nr_pages = work->nr_pages;
+               __entry->sb_dev = work->sb ? work->sb->s_dev : 0;
+               __entry->sync_mode = work->sync_mode;
+               __entry->for_kupdate = work->for_kupdate;
+               __entry->range_cyclic = work->range_cyclic;
+               __entry->for_background = work->for_background;
+               __entry->reason = work->reason;
+       ),
+       TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
+                 "kupdate=%d range_cyclic=%d background=%d reason=%s",
+                 __entry->name,
+                 MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
+                 __entry->nr_pages,
+                 __entry->sync_mode,
+                 __entry->for_kupdate,
+                 __entry->range_cyclic,
+                 __entry->for_background,
+                 __print_symbolic(__entry->reason, WB_WORK_REASON)
+       )
+);
+#define DEFINE_WRITEBACK_WORK_EVENT(name) \
+DEFINE_EVENT(writeback_work_class, name, \
+       TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), \
+       TP_ARGS(bdi, work))
+DEFINE_WRITEBACK_WORK_EVENT(writeback_nothread);
+DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
+DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
+DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
+DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
+DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
+
+TRACE_EVENT(writeback_pages_written,
+       TP_PROTO(long pages_written),
+       TP_ARGS(pages_written),
+       TP_STRUCT__entry(
+               __field(long,           pages)
+       ),
+       TP_fast_assign(
+               __entry->pages          = pages_written;
+       ),
+       TP_printk("%ld", __entry->pages)
+);
+
+DECLARE_EVENT_CLASS(writeback_class,
+       TP_PROTO(struct backing_dev_info *bdi),
+       TP_ARGS(bdi),
+       TP_STRUCT__entry(
+               __array(char, name, 32)
+       ),
+       TP_fast_assign(
+               strncpy(__entry->name, dev_name(bdi->dev), 32);
+       ),
+       TP_printk("bdi %s",
+                 __entry->name
+       )
+);
+#define DEFINE_WRITEBACK_EVENT(name) \
+DEFINE_EVENT(writeback_class, name, \
+       TP_PROTO(struct backing_dev_info *bdi), \
+       TP_ARGS(bdi))
+
+DEFINE_WRITEBACK_EVENT(writeback_nowork);
+DEFINE_WRITEBACK_EVENT(writeback_wake_background);
+DEFINE_WRITEBACK_EVENT(writeback_wake_thread);
+DEFINE_WRITEBACK_EVENT(writeback_wake_forker_thread);
+DEFINE_WRITEBACK_EVENT(writeback_bdi_register);
+DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister);
+DEFINE_WRITEBACK_EVENT(writeback_thread_start);
+DEFINE_WRITEBACK_EVENT(writeback_thread_stop);
+
+DECLARE_EVENT_CLASS(wbc_class,
+       TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
+       TP_ARGS(wbc, bdi),
+       TP_STRUCT__entry(
+               __array(char, name, 32)
+               __field(long, nr_to_write)
+               __field(long, pages_skipped)
+               __field(int, sync_mode)
+               __field(int, for_kupdate)
+               __field(int, for_background)
+               __field(int, for_reclaim)
+               __field(int, range_cyclic)
+               __field(long, range_start)
+               __field(long, range_end)
+       ),
+
+       TP_fast_assign(
+               strncpy(__entry->name, dev_name(bdi->dev), 32);
+               __entry->nr_to_write    = wbc->nr_to_write;
+               __entry->pages_skipped  = wbc->pages_skipped;
+               __entry->sync_mode      = wbc->sync_mode;
+               __entry->for_kupdate    = wbc->for_kupdate;
+               __entry->for_background = wbc->for_background;
+               __entry->for_reclaim    = wbc->for_reclaim;
+               __entry->range_cyclic   = wbc->range_cyclic;
+               __entry->range_start    = (long)wbc->range_start;
+               __entry->range_end      = (long)wbc->range_end;
+       ),
+
+       TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
+               "bgrd=%d reclm=%d cyclic=%d "
+               "start=0x%lx end=0x%lx",
+               __entry->name,
+               __entry->nr_to_write,
+               __entry->pages_skipped,
+               __entry->sync_mode,
+               __entry->for_kupdate,
+               __entry->for_background,
+               __entry->for_reclaim,
+               __entry->range_cyclic,
+               __entry->range_start,
+               __entry->range_end)
+)
+
+#define DEFINE_WBC_EVENT(name) \
+DEFINE_EVENT(wbc_class, name, \
+       TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
+       TP_ARGS(wbc, bdi))
+DEFINE_WBC_EVENT(wbc_writepage);
+
+TRACE_EVENT(writeback_queue_io,
+       TP_PROTO(struct bdi_writeback *wb,
+                struct wb_writeback_work *work,
+                int moved),
+       TP_ARGS(wb, work, moved),
+       TP_STRUCT__entry(
+               __array(char,           name, 32)
+               __field(unsigned long,  older)
+               __field(long,           age)
+               __field(int,            moved)
+               __field(int,            reason)
+       ),
+       TP_fast_assign(
+               unsigned long *older_than_this = work->older_than_this;
+               strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
+               __entry->older  = older_than_this ?  *older_than_this : 0;
+               __entry->age    = older_than_this ?
+                                 (jiffies - *older_than_this) * 1000 / HZ : -1;
+               __entry->moved  = moved;
+               __entry->reason = work->reason;
+       ),
+       TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s",
+               __entry->name,
+               __entry->older, /* older_than_this in jiffies */
+               __entry->age,   /* older_than_this in relative milliseconds */
+               __entry->moved,
+               __print_symbolic(__entry->reason, WB_WORK_REASON)
+       )
+);
+
+TRACE_EVENT(global_dirty_state,
+
+       TP_PROTO(unsigned long background_thresh,
+                unsigned long dirty_thresh
+       ),
+
+       TP_ARGS(background_thresh,
+               dirty_thresh
+       ),
+
+       TP_STRUCT__entry(
+               __field(unsigned long,  nr_dirty)
+               __field(unsigned long,  nr_writeback)
+               __field(unsigned long,  nr_unstable)
+               __field(unsigned long,  background_thresh)
+               __field(unsigned long,  dirty_thresh)
+               __field(unsigned long,  dirty_limit)
+               __field(unsigned long,  nr_dirtied)
+               __field(unsigned long,  nr_written)
+       ),
+
+       TP_fast_assign(
+               __entry->nr_dirty       = global_page_state(NR_FILE_DIRTY);
+               __entry->nr_writeback   = global_page_state(NR_WRITEBACK);
+               __entry->nr_unstable    = global_page_state(NR_UNSTABLE_NFS);
+               __entry->nr_dirtied     = global_page_state(NR_DIRTIED);
+               __entry->nr_written     = global_page_state(NR_WRITTEN);
+               __entry->background_thresh = background_thresh;
+               __entry->dirty_thresh   = dirty_thresh;
+               __entry->dirty_limit = global_dirty_limit;
+       ),
+
+       TP_printk("dirty=%lu writeback=%lu unstable=%lu "
+                 "bg_thresh=%lu thresh=%lu limit=%lu "
+                 "dirtied=%lu written=%lu",
+                 __entry->nr_dirty,
+                 __entry->nr_writeback,
+                 __entry->nr_unstable,
+                 __entry->background_thresh,
+                 __entry->dirty_thresh,
+                 __entry->dirty_limit,
+                 __entry->nr_dirtied,
+                 __entry->nr_written
+       )
+);
+
+#define KBps(x)                        ((x) << (PAGE_SHIFT - 10))
+
+TRACE_EVENT(bdi_dirty_ratelimit,
+
+       TP_PROTO(struct backing_dev_info *bdi,
+                unsigned long dirty_rate,
+                unsigned long task_ratelimit),
+
+       TP_ARGS(bdi, dirty_rate, task_ratelimit),
+
+       TP_STRUCT__entry(
+               __array(char,           bdi, 32)
+               __field(unsigned long,  write_bw)
+               __field(unsigned long,  avg_write_bw)
+               __field(unsigned long,  dirty_rate)
+               __field(unsigned long,  dirty_ratelimit)
+               __field(unsigned long,  task_ratelimit)
+               __field(unsigned long,  balanced_dirty_ratelimit)
+       ),
+
+       TP_fast_assign(
+               strlcpy(__entry->bdi, dev_name(bdi->dev), 32);
+               __entry->write_bw       = KBps(bdi->write_bandwidth);
+               __entry->avg_write_bw   = KBps(bdi->avg_write_bandwidth);
+               __entry->dirty_rate     = KBps(dirty_rate);
+               __entry->dirty_ratelimit = KBps(bdi->dirty_ratelimit);
+               __entry->task_ratelimit = KBps(task_ratelimit);
+               __entry->balanced_dirty_ratelimit =
+                                         KBps(bdi->balanced_dirty_ratelimit);
+       ),
+
+       TP_printk("bdi %s: "
+                 "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
+                 "dirty_ratelimit=%lu task_ratelimit=%lu "
+                 "balanced_dirty_ratelimit=%lu",
+                 __entry->bdi,
+                 __entry->write_bw,            /* write bandwidth */
+                 __entry->avg_write_bw,        /* avg write bandwidth */
+                 __entry->dirty_rate,          /* bdi dirty rate */
+                 __entry->dirty_ratelimit,     /* base ratelimit */
+                 __entry->task_ratelimit, /* ratelimit with position control */
+                 __entry->balanced_dirty_ratelimit /* the balanced ratelimit */
+       )
+);
+
+TRACE_EVENT(balance_dirty_pages,
+
+       TP_PROTO(struct backing_dev_info *bdi,
+                unsigned long thresh,
+                unsigned long bg_thresh,
+                unsigned long dirty,
+                unsigned long bdi_thresh,
+                unsigned long bdi_dirty,
+                unsigned long dirty_ratelimit,
+                unsigned long task_ratelimit,
+                unsigned long dirtied,
+                unsigned long period,
+                long pause,
+                unsigned long start_time),
+
+       TP_ARGS(bdi, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
+               dirty_ratelimit, task_ratelimit,
+               dirtied, period, pause, start_time),
+
+       TP_STRUCT__entry(
+               __array(         char,  bdi, 32)
+               __field(unsigned long,  limit)
+               __field(unsigned long,  setpoint)
+               __field(unsigned long,  dirty)
+               __field(unsigned long,  bdi_setpoint)
+               __field(unsigned long,  bdi_dirty)
+               __field(unsigned long,  dirty_ratelimit)
+               __field(unsigned long,  task_ratelimit)
+               __field(unsigned int,   dirtied)
+               __field(unsigned int,   dirtied_pause)
+               __field(unsigned long,  paused)
+               __field(         long,  pause)
+               __field(unsigned long,  period)
+               __field(         long,  think)
+       ),
+
+       TP_fast_assign(
+               unsigned long freerun = (thresh + bg_thresh) / 2;
+               strlcpy(__entry->bdi, dev_name(bdi->dev), 32);
+
+               __entry->limit          = global_dirty_limit;
+               __entry->setpoint       = (global_dirty_limit + freerun) / 2;
+               __entry->dirty          = dirty;
+               __entry->bdi_setpoint   = __entry->setpoint *
+                                               bdi_thresh / (thresh + 1);
+               __entry->bdi_dirty      = bdi_dirty;
+               __entry->dirty_ratelimit = KBps(dirty_ratelimit);
+               __entry->task_ratelimit = KBps(task_ratelimit);
+               __entry->dirtied        = dirtied;
+               __entry->dirtied_pause  = current->nr_dirtied_pause;
+               __entry->think          = current->dirty_paused_when == 0 ? 0 :
+                        (long)(jiffies - current->dirty_paused_when) * 1000/HZ;
+               __entry->period         = period * 1000 / HZ;
+               __entry->pause          = pause * 1000 / HZ;
+               __entry->paused         = (jiffies - start_time) * 1000 / HZ;
+       ),
+
+
+       TP_printk("bdi %s: "
+                 "limit=%lu setpoint=%lu dirty=%lu "
+                 "bdi_setpoint=%lu bdi_dirty=%lu "
+                 "dirty_ratelimit=%lu task_ratelimit=%lu "
+                 "dirtied=%u dirtied_pause=%u "
+                 "paused=%lu pause=%ld period=%lu think=%ld",
+                 __entry->bdi,
+                 __entry->limit,
+                 __entry->setpoint,
+                 __entry->dirty,
+                 __entry->bdi_setpoint,
+                 __entry->bdi_dirty,
+                 __entry->dirty_ratelimit,
+                 __entry->task_ratelimit,
+                 __entry->dirtied,
+                 __entry->dirtied_pause,
+                 __entry->paused,      /* ms */
+                 __entry->pause,       /* ms */
+                 __entry->period,      /* ms */
+                 __entry->think        /* ms */
+         )
+);
+
+TRACE_EVENT(writeback_sb_inodes_requeue,
+
+       TP_PROTO(struct inode *inode),
+       TP_ARGS(inode),
+
+       TP_STRUCT__entry(
+               __array(char, name, 32)
+               __field(unsigned long, ino)
+               __field(unsigned long, state)
+               __field(unsigned long, dirtied_when)
+       ),
+
+       TP_fast_assign(
+               strncpy(__entry->name,
+                       dev_name(inode_to_bdi(inode)->dev), 32);
+               __entry->ino            = inode->i_ino;
+               __entry->state          = inode->i_state;
+               __entry->dirtied_when   = inode->dirtied_when;
+       ),
+
+       TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu",
+                 __entry->name,
+                 __entry->ino,
+                 show_inode_state(__entry->state),
+                 __entry->dirtied_when,
+                 (jiffies - __entry->dirtied_when) / HZ
+       )
+);
+
+DECLARE_EVENT_CLASS(writeback_congest_waited_template,
+
+       TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
+
+       TP_ARGS(usec_timeout, usec_delayed),
+
+       TP_STRUCT__entry(
+               __field(        unsigned int,   usec_timeout    )
+               __field(        unsigned int,   usec_delayed    )
+       ),
+
+       TP_fast_assign(
+               __entry->usec_timeout   = usec_timeout;
+               __entry->usec_delayed   = usec_delayed;
+       ),
+
+       TP_printk("usec_timeout=%u usec_delayed=%u",
+                       __entry->usec_timeout,
+                       __entry->usec_delayed)
+);
+
+DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
+
+       TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
+
+       TP_ARGS(usec_timeout, usec_delayed)
+);
+
+DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
+
+       TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
+
+       TP_ARGS(usec_timeout, usec_delayed)
+);
+
+DECLARE_EVENT_CLASS(writeback_single_inode_template,
+
+       TP_PROTO(struct inode *inode,
+                struct writeback_control *wbc,
+                unsigned long nr_to_write
+       ),
+
+       TP_ARGS(inode, wbc, nr_to_write),
+
+       TP_STRUCT__entry(
+               __array(char, name, 32)
+               __field(unsigned long, ino)
+               __field(unsigned long, state)
+               __field(unsigned long, dirtied_when)
+               __field(unsigned long, writeback_index)
+               __field(long, nr_to_write)
+               __field(unsigned long, wrote)
+       ),
+
+       TP_fast_assign(
+               strncpy(__entry->name,
+                       dev_name(inode_to_bdi(inode)->dev), 32);
+               __entry->ino            = inode->i_ino;
+               __entry->state          = inode->i_state;
+               __entry->dirtied_when   = inode->dirtied_when;
+               __entry->writeback_index = inode->i_mapping->writeback_index;
+               __entry->nr_to_write    = nr_to_write;
+               __entry->wrote          = nr_to_write - wbc->nr_to_write;
+       ),
+
+       TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
+                 "index=%lu to_write=%ld wrote=%lu",
+                 __entry->name,
+                 __entry->ino,
+                 show_inode_state(__entry->state),
+                 __entry->dirtied_when,
+                 (jiffies - __entry->dirtied_when) / HZ,
+                 __entry->writeback_index,
+                 __entry->nr_to_write,
+                 __entry->wrote
+       )
+);
+
+DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
+       TP_PROTO(struct inode *inode,
+                struct writeback_control *wbc,
+                unsigned long nr_to_write),
+       TP_ARGS(inode, wbc, nr_to_write)
+);
+
+#endif /* _TRACE_WRITEBACK_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
index 779616db921bd330d133b31ecd84179d27994d7b..088cd5f107c6403a697e2b918fd68013ba4f82d0 100644 (file)
@@ -121,6 +121,79 @@ endif
 #obj-m += lttng-probe-lock.o
 #endif
 
+ifneq ($(CONFIG_BTRFS_FS),)
+btrfs_dep = $(srctree)/fs/btrfs/*.h
+btrfs = $(shell \
+       if [ $(VERSION) -ge 3 \
+               -o \( $(VERSION) -eq 2 -a $(PATCHLEVEL) -ge 6 -a $(SUBLEVEL) -ge 39 \) ] ; then \
+               echo "lttng-probe-btrfs.o" ; fi;)
+ifneq ($(btrfs),)
+ifeq ($(wildcard $(btrfs_dep)),)
+$(warning Files $(btrfs_dep) not found. Probe "btrfs" is disabled. Use full kernel source tree to enable it.)
+btrfs =
+endif
+endif
+obj-m += $(btrfs)
+endif
+
+obj-m +=  $(shell \
+       if [ $(VERSION) -ge 3 \
+               -o \( $(VERSION) -eq 2 -a $(PATCHLEVEL) -ge 6 -a $(SUBLEVEL) -ge 38 \) ] ; then \
+               echo "lttng-probe-compaction.o" ; fi;)
+
+ifneq ($(CONFIG_EXT4_FS),)
+ext4_dep = $(srctree)/fs/ext4/*.h
+ext4 = lttng-probe-ext4.o
+ifeq ($(wildcard $(ext4_dep)),)
+$(warning Files $(ext4_dep) not found. Probe "ext4" is disabled. Use full kernel source tree to enable it.)
+ext4 =
+endif
+obj-m += $(ext4)
+endif
+
+obj-m +=  $(shell \
+       if [ $(VERSION) -ge 3 -a $(PATCHLEVEL) -ge 4 ] ; then \
+               echo "lttng-probe-printk.o" ; fi;)
+ifneq ($(CONFIG_FRAME_WARN),0)
+CFLAGS_lttng-probe-printk.o += -Wframe-larger-than=2200
+endif
+
+obj-m +=  $(shell \
+       if [ \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -ge 6 \) \
+               -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -eq 5 -a $(SUBLEVEL) -ge 2 \) \
+               -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -eq 4 -a $(SUBLEVEL) -ge 9 \) \
+               -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -eq 0 -a $(SUBLEVEL) -ge 41 \) ] ; then \
+               echo "lttng-probe-random.o" ; fi;)
+
+obj-m +=  $(shell \
+       if [ $(VERSION) -ge 3 -a $(PATCHLEVEL) -ge 2 ] ; then \
+               echo "lttng-probe-rcu.o" ; fi;)
+
+ifneq ($(CONFIG_REGMAP),)
+obj-m +=  $(shell \
+       if [ $(VERSION) -ge 3 -a $(PATCHLEVEL) -ge 2 ] ; then \
+               echo "lttng-probe-regmap.o" ; fi;)
+endif
+
+ifneq ($(CONFIG_PM_RUNTIME),)
+obj-m +=  $(shell \
+       if [ $(VERSION) -ge 3 -a $(PATCHLEVEL) -ge 2 ] ; then \
+               echo "lttng-probe-rpm.o" ; fi;)
+endif
+
+ifneq ($(CONFIG_SUNRPC),)
+obj-m +=  $(shell \
+       if [ $(VERSION) -ge 3 -a $(PATCHLEVEL) -ge 4 ] ; then \
+               echo "lttng-probe-sunrpc.o" ; fi;)
+endif
+
+obj-m += lttng-probe-workqueue.o
+
+obj-m +=  $(shell \
+       if [ $(VERSION) -ge 3 \
+               -o \( $(VERSION) -eq 2 -a $(PATCHLEVEL) -ge 6 -a $(SUBLEVEL) -ge 36 \) ] ; then \
+               echo "lttng-probe-writeback.o" ; fi;)
+
 
 ifneq ($(CONFIG_KPROBES),)
 obj-m += lttng-kprobes.o
diff --git a/probes/lttng-probe-btrfs.c b/probes/lttng-probe-btrfs.c
new file mode 100644 (file)
index 0000000..4b5f84e
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * probes/lttng-probe-btrfs.c
+ *
+ * LTTng btrfs probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <../fs/btrfs/ctree.h>
+#include <../fs/btrfs/transaction.h>
+#include <../fs/btrfs/volumes.h>
+#include <linux/dcache.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/btrfs.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
+
+#include "../instrumentation/events/lttng-module/btrfs.h"
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
+MODULE_DESCRIPTION("LTTng btrfs probes");
diff --git a/probes/lttng-probe-compaction.c b/probes/lttng-probe-compaction.c
new file mode 100644 (file)
index 0000000..13b9959
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * probes/lttng-probe-compaction.c
+ *
+ * LTTng compaction probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/module.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/compaction.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
+
+#include "../instrumentation/events/lttng-module/compaction.h"
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
+MODULE_DESCRIPTION("LTTng compaction probes");
diff --git a/probes/lttng-probe-ext4.c b/probes/lttng-probe-ext4.c
new file mode 100644 (file)
index 0000000..19c9b68
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * probes/lttng-probe-ext4.c
+ *
+ * LTTng ext4 probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <../fs/ext4/ext4.h>
+#include <../fs/ext4/mballoc.h>
+#include <linux/dcache.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/ext4.h>
+
+#include "../lttng-kernel-version.h"
+#include "../wrapper/tracepoint.h"
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
+
+#include "../instrumentation/events/lttng-module/ext4.h"
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
+MODULE_DESCRIPTION("LTTng ext4 probes");
diff --git a/probes/lttng-probe-printk.c b/probes/lttng-probe-printk.c
new file mode 100644 (file)
index 0000000..1fe1f01
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * probes/lttng-probe-printk.c
+ *
+ * LTTng printk probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/module.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/printk.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
+
+#include "../instrumentation/events/lttng-module/printk.h"
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
+MODULE_DESCRIPTION("LTTng printk probes");
diff --git a/probes/lttng-probe-random.c b/probes/lttng-probe-random.c
new file mode 100644 (file)
index 0000000..df9f551
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * probes/lttng-probe-random.c
+ *
+ * LTTng random probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/module.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/random.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
+
+#include "../instrumentation/events/lttng-module/random.h"
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
+MODULE_DESCRIPTION("LTTng random probes");
diff --git a/probes/lttng-probe-rcu.c b/probes/lttng-probe-rcu.c
new file mode 100644 (file)
index 0000000..5216500
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * probes/lttng-probe-rcu.c
+ *
+ * LTTng rcu probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/module.h>
+#include <linux/rcupdate.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/rcu.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
+
+#include "../instrumentation/events/lttng-module/rcu.h"
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
+MODULE_DESCRIPTION("LTTng rcu probes");
diff --git a/probes/lttng-probe-regmap.c b/probes/lttng-probe-regmap.c
new file mode 100644 (file)
index 0000000..7c1fd00
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * probes/lttng-probe-regmap.c
+ *
+ * LTTng regmap probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/regmap.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
+
+#include "../instrumentation/events/lttng-module/regmap.h"
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
+MODULE_DESCRIPTION("LTTng regmap probes");
diff --git a/probes/lttng-probe-rpm.c b/probes/lttng-probe-rpm.c
new file mode 100644 (file)
index 0000000..607e9e5
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * probes/lttng-probe-rpm.c
+ *
+ * LTTng rpm probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/rpm.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
+
+#include "../instrumentation/events/lttng-module/rpm.h"
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
+MODULE_DESCRIPTION("LTTng rpm probes");
diff --git a/probes/lttng-probe-sunrpc.c b/probes/lttng-probe-sunrpc.c
new file mode 100644 (file)
index 0000000..504413e
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * probes/lttng-probe-sunrpc.c
+ *
+ * LTTng sunrpc probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/module.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/sunrpc.h>
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
+
+#include "../instrumentation/events/lttng-module/sunrpc.h"
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
+MODULE_DESCRIPTION("LTTng sunrpc probes");
diff --git a/probes/lttng-probe-workqueue.c b/probes/lttng-probe-workqueue.c
new file mode 100644 (file)
index 0000000..6f36e00
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * probes/lttng-probe-workqueue.c
+ *
+ * LTTng workqueue probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/module.h>
+#include <linux/idr.h>
+
+struct cpu_workqueue_struct;
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/workqueue.h>
+
+#include "../wrapper/tracepoint.h"
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
+
+#include "../instrumentation/events/lttng-module/workqueue.h"
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
+MODULE_DESCRIPTION("LTTng workqueue probes");
diff --git a/probes/lttng-probe-writeback.c b/probes/lttng-probe-writeback.c
new file mode 100644 (file)
index 0000000..0a5c022
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+ * probes/lttng-probe-writeback.c
+ *
+ * LTTng writeback probes.
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Mentor Graphics Corp.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/mm.h>
+
+/*
+ * Create the tracepoint static inlines from the kernel to validate that our
+ * trace event macros match the kernel we run on.
+ */
+#include <trace/events/writeback.h>
+
+#include "../lttng-kernel-version.h"
+
+/* #if <check version number if global_dirty_limit will be exported> */
+#ifdef CONFIG_KALLSYMS
+#include "../wrapper/kallsyms.h"
+
+static unsigned long *wrapper_global_dirty_limit_sym = 0;
+static inline
+unsigned long wrapper_global_dirty_limit(void)
+{
+       if (!wrapper_global_dirty_limit_sym)
+               wrapper_global_dirty_limit_sym =
+                       (void *)kallsyms_lookup_funcptr("global_dirty_limit");
+       if (wrapper_global_dirty_limit_sym)
+               return *wrapper_global_dirty_limit_sym;
+       else {
+               printk(KERN_WARNING "LTTng: global_dirty_limit symbol lookup failed.\n");
+               return 0;
+       }
+}
+#define global_dirty_limit wrapper_global_dirty_limit()
+#endif
+/* #endif <check version number> */
+
+/*
+ * Create LTTng tracepoint probes.
+ */
+#define LTTNG_PACKAGE_BUILD
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
+
+#include "../instrumentation/events/lttng-module/writeback.h"
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
+MODULE_DESCRIPTION("LTTng writeback probes");
This page took 0.158184 seconds and 4 git commands to generate.