)
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
-TRACE_EVENT(find_free_extent,
+TRACE_EVENT_MAP(find_free_extent,
+
+ btrfs_find_free_extent,
TP_PROTO(struct btrfs_root *root, u64 num_bytes, u64 empty_size,
u64 data),
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
-TRACE_EVENT(alloc_extent_state,
+TRACE_EVENT_MAP(alloc_extent_state,
+
+ btrfs_alloc_extent_state,
TP_PROTO(struct extent_state *state, gfp_t mask, unsigned long IP),
show_gfp_flags(__entry->mask), (void *)__entry->ip)
)
-TRACE_EVENT(free_extent_state,
+TRACE_EVENT_MAP(free_extent_state,
+
+ btrfs_free_extent_state,
TP_PROTO(struct extent_state *state, unsigned long IP),
)
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0))
-TRACE_EVENT(journal_write_superblock,
+TRACE_EVENT_MAP(journal_write_superblock,
+
+ jbd_journal_write_superblock,
+
TP_PROTO(journal_t *journal, int write_op),
TP_ARGS(journal, write_op),
show_gfp_flags(__entry->gfp_flags))
)
-DEFINE_EVENT(kmem_alloc, kmalloc,
+DEFINE_EVENT_MAP(kmem_alloc, kmalloc,
+
+ kmem_kmalloc,
TP_PROTO(unsigned long call_site, const void *ptr,
size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
__entry->node)
)
-DEFINE_EVENT(kmem_alloc_node, kmalloc_node,
+DEFINE_EVENT_MAP(kmem_alloc_node, kmalloc_node,
+
+ kmem_kmalloc_node,
TP_PROTO(unsigned long call_site, const void *ptr,
size_t bytes_req, size_t bytes_alloc,
TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr)
)
-DEFINE_EVENT(kmem_free, kfree,
+DEFINE_EVENT_MAP(kmem_free, kfree,
+
+ kmem_kfree,
TP_PROTO(unsigned long call_site, const void *ptr),
#include <linux/version.h>
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38))
-DECLARE_EVENT_CLASS(cpu,
+DECLARE_EVENT_CLASS(power_cpu,
TP_PROTO(unsigned int state, unsigned int cpu_id),
(unsigned long)__entry->cpu_id)
)
-DEFINE_EVENT(cpu, cpu_idle,
+DEFINE_EVENT_MAP(power_cpu, cpu_idle,
+
+ power_cpu_idle,
TP_PROTO(unsigned int state, unsigned int cpu_id),
#define PWR_EVENT_EXIT -1
#endif
-DEFINE_EVENT(cpu, cpu_frequency,
+DEFINE_EVENT_MAP(power_cpu, cpu_frequency,
+
+ power_cpu_frequency,
TP_PROTO(unsigned int frequency, unsigned int cpu_id),
TP_ARGS(frequency, cpu_id)
)
-TRACE_EVENT(machine_suspend,
+TRACE_EVENT_MAP(machine_suspend,
+
+ power_machine_suspend,
TP_PROTO(unsigned int state),
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0))
-DECLARE_EVENT_CLASS(wakeup_source,
+DECLARE_EVENT_CLASS(power_wakeup_source,
TP_PROTO(const char *name, unsigned int state),
(unsigned long)__entry->state)
)
-DEFINE_EVENT(wakeup_source, wakeup_source_activate,
+DEFINE_EVENT_MAP(power_wakeup_source, wakeup_source_activate,
+
+ power_wakeup_source_activate,
TP_PROTO(const char *name, unsigned int state),
TP_ARGS(name, state)
)
-DEFINE_EVENT(wakeup_source, wakeup_source_deactivate,
+DEFINE_EVENT_MAP(power_wakeup_source, wakeup_source_deactivate,
+
+ power_wakeup_source_deactivate,
TP_PROTO(const char *name, unsigned int state),
* The clock events are used for clock enable/disable and for
* clock rate change
*/
-DECLARE_EVENT_CLASS(clock,
+DECLARE_EVENT_CLASS(power_clock,
TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
(unsigned long)__entry->state, (unsigned long)__entry->cpu_id)
)
-DEFINE_EVENT(clock, clock_enable,
+DEFINE_EVENT_MAP(power_clock, clock_enable,
+
+ power_clock_enable,
TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
TP_ARGS(name, state, cpu_id)
)
-DEFINE_EVENT(clock, clock_disable,
+DEFINE_EVENT_MAP(power_clock, clock_disable,
+
+ power_clock_disable,
TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
TP_ARGS(name, state, cpu_id)
)
-DEFINE_EVENT(clock, clock_set_rate,
+DEFINE_EVENT_MAP(power_clock, clock_set_rate,
+
+ power_clock_set_rate,
TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
__get_str(pool_name), __entry->bytes, (void *)__entry->IP)
)
-DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes,
+DEFINE_EVENT_MAP(random__mix_pool_bytes, mix_pool_bytes,
+
+ random_mix_pool_bytes,
+
TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
TP_ARGS(pool_name, bytes, IP)
)
-DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes_nolock,
+DEFINE_EVENT_MAP(random__mix_pool_bytes, mix_pool_bytes_nolock,
+
+ random_mix_pool_bytes_nolock,
+
TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
TP_ARGS(pool_name, bytes, IP)
)
-TRACE_EVENT(credit_entropy_bits,
+TRACE_EVENT_MAP(credit_entropy_bits,
+
+ random_credit_entropy_bits,
+
TP_PROTO(const char *pool_name, int bits, int entropy_count,
int entropy_total, unsigned long IP),
(void *)__entry->IP)
)
-TRACE_EVENT(get_random_bytes,
+TRACE_EVENT_MAP(get_random_bytes,
+
+ random_get_random_bytes,
+
TP_PROTO(int nbytes, unsigned long IP),
TP_ARGS(nbytes, IP),
)
-DEFINE_EVENT(random__extract_entropy, extract_entropy,
+DEFINE_EVENT_MAP(random__extract_entropy, extract_entropy,
+
+ random_extract_entropy,
+
TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
unsigned long IP),
TP_ARGS(pool_name, nbytes, entropy_count, IP)
)
-DEFINE_EVENT(random__extract_entropy, extract_entropy_user,
+DEFINE_EVENT_MAP(random__extract_entropy, extract_entropy_user,
+
+ random_extract_entropy_user,
+
TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
unsigned long IP),
/*
* Tracepoint for free an sk_buff:
*/
-TRACE_EVENT(kfree_skb,
+TRACE_EVENT_MAP(kfree_skb,
+
+ skb_kfree,
TP_PROTO(struct sk_buff *skb, void *location),
)
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
-TRACE_EVENT(consume_skb,
+TRACE_EVENT_MAP(consume_skb,
+
+ skb_consume,
TP_PROTO(struct sk_buff *skb),
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0))
-TRACE_EVENT(replace_swap_token,
+TRACE_EVENT_MAP(replace_swap_token,
+
+ mm_vmscan_replace_swap_token,
+
TP_PROTO(struct mm_struct *old_mm,
struct mm_struct *new_mm),
__entry->new_mm, __entry->new_prio)
)
-DECLARE_EVENT_CLASS(put_swap_token_template,
+DECLARE_EVENT_CLASS(mm_vmscan_put_swap_token_template,
TP_PROTO(struct mm_struct *swap_token_mm),
TP_ARGS(swap_token_mm),
TP_printk("token_mm=%p", __entry->swap_token_mm)
)
-DEFINE_EVENT(put_swap_token_template, put_swap_token,
+DEFINE_EVENT_MAP(put_swap_token_template, put_swap_token,
+
+ mm_vmscan_put_swap_token,
+
TP_PROTO(struct mm_struct *swap_token_mm),
TP_ARGS(swap_token_mm)
)
-DEFINE_EVENT_CONDITION(put_swap_token_template, disable_swap_token,
+DEFINE_EVENT_CONDITION_MAP(put_swap_token_template, disable_swap_token,
+
+ mm_vmscan_disable_swap_token,
+
TP_PROTO(struct mm_struct *swap_token_mm),
TP_ARGS(swap_token_mm),
TP_CONDITION(swap_token_mm != NULL)
)
-TRACE_EVENT_CONDITION(update_swap_token_priority,
+TRACE_EVENT_CONDITION_MAP(update_swap_token_priority,
+
+ mm_vmscan_update_swap_token_priority,
+
TP_PROTO(struct mm_struct *mm,
unsigned int old_prio,
struct mm_struct *swap_token_mm),
TP_PROTO(struct backing_dev_info *bdi), \
TP_ARGS(bdi))
+#define DEFINE_WRITEBACK_EVENT_MAP(name, map) \
+DEFINE_EVENT_MAP(writeback_class, name, map, \
+ TP_PROTO(struct backing_dev_info *bdi), \
+ TP_ARGS(bdi))
+
DEFINE_WRITEBACK_EVENT(writeback_nowork)
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38))
DEFINE_WRITEBACK_EVENT(writeback_wake_background)
DEFINE_WRITEBACK_EVENT(writeback_thread_start)
DEFINE_WRITEBACK_EVENT(writeback_thread_stop)
#if (LTTNG_KERNEL_RANGE(3,1,0, 3,2,0))
-DEFINE_WRITEBACK_EVENT(balance_dirty_start)
-DEFINE_WRITEBACK_EVENT(balance_dirty_wait)
+DEFINE_WRITEBACK_EVENT_MAP(balance_dirty_start, writeback_balance_dirty_start)
+DEFINE_WRITEBACK_EVENT_MAP(balance_dirty_wait, writeback_balance_dirty_wait)
+
+TRACE_EVENT_MAP(balance_dirty_written,
-TRACE_EVENT(balance_dirty_written,
+ writeback_balance_dirty_written,
TP_PROTO(struct backing_dev_info *bdi, int written),
)
#endif
-DECLARE_EVENT_CLASS(wbc_class,
+DECLARE_EVENT_CLASS(writeback_wbc_class,
TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
TP_ARGS(wbc, bdi),
TP_STRUCT__entry(
__entry->range_end)
)
-#define DEFINE_WBC_EVENT(name) \
-DEFINE_EVENT(wbc_class, name, \
+#undef DEFINE_WBC_EVENT
+#define DEFINE_WBC_EVENT(name, map) \
+DEFINE_EVENT_MAP(writeback_wbc_class, name, map, \
TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
TP_ARGS(wbc, bdi))
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0))
-DEFINE_WBC_EVENT(wbc_writeback_start)
-DEFINE_WBC_EVENT(wbc_writeback_written)
-DEFINE_WBC_EVENT(wbc_writeback_wait)
-DEFINE_WBC_EVENT(wbc_balance_dirty_start)
-DEFINE_WBC_EVENT(wbc_balance_dirty_written)
-DEFINE_WBC_EVENT(wbc_balance_dirty_wait)
+DEFINE_WBC_EVENT(wbc_writeback_start, writeback_wbc_writeback_start)
+DEFINE_WBC_EVENT(wbc_writeback_written, writeback_wbc_writeback_written)
+DEFINE_WBC_EVENT(wbc_writeback_wait, writeback_wbc_writeback_wait)
+DEFINE_WBC_EVENT(wbc_balance_dirty_start, writeback_wbc_balance_dirty_start)
+DEFINE_WBC_EVENT(wbc_balance_dirty_written, writeback_wbc_balance_dirty_written)
+DEFINE_WBC_EVENT(wbc_balance_dirty_wait, writeback_wbc_balance_dirty_wait)
#endif
-DEFINE_WBC_EVENT(wbc_writepage)
+DEFINE_WBC_EVENT(wbc_writepage, writeback_wbc_writepage)
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))
TRACE_EVENT(writeback_queue_io,
#endif
)
-TRACE_EVENT(global_dirty_state,
+TRACE_EVENT_MAP(global_dirty_state,
+
+ writeback_global_dirty_state,
TP_PROTO(unsigned long background_thresh,
unsigned long dirty_thresh
#define KBps(x) ((x) << (PAGE_SHIFT - 10))
-TRACE_EVENT(bdi_dirty_ratelimit,
+TRACE_EVENT_MAP(bdi_dirty_ratelimit,
+
+ writeback_bdi_dirty_ratelimit,
TP_PROTO(struct backing_dev_info *bdi,
unsigned long dirty_rate,
)
)
-TRACE_EVENT(balance_dirty_pages,
+TRACE_EVENT_MAP(balance_dirty_pages,
+
+ writeback_balance_dirty_pages,
TP_PROTO(struct backing_dev_info *bdi,
unsigned long thresh,