X-Git-Url: http://git.lttng.org./?a=blobdiff_plain;f=instrumentation%2Fevents%2Flttng-module%2Fkmem.h;h=7607fc0c71b7f78aa6d6546d499e9c66ab83365d;hb=026e6902b22bf8e84b1b89059c23257ee5c916b0;hp=dab8989c0ea8327462ea2377991b873fc98680c8;hpb=d3ac4d63d21c643df5b09d9d7888eb0c4122379c;p=lttng-modules.git diff --git a/instrumentation/events/lttng-module/kmem.h b/instrumentation/events/lttng-module/kmem.h index dab8989c..7607fc0c 100644 --- a/instrumentation/events/lttng-module/kmem.h +++ b/instrumentation/events/lttng-module/kmem.h @@ -4,6 +4,13 @@ #if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_KMEM_H +#include +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)) +#include +#endif + DECLARE_EVENT_CLASS(kmem_alloc, TP_PROTO(unsigned long call_site, @@ -15,8 +22,8 @@ DECLARE_EVENT_CLASS(kmem_alloc, TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags), TP_STRUCT__entry( - __field( unsigned long, call_site ) - __field( const void *, ptr ) + __field_hex( unsigned long, call_site ) + __field_hex( const void *, ptr ) __field( size_t, bytes_req ) __field( size_t, bytes_alloc ) __field( gfp_t, gfp_flags ) @@ -38,7 +45,9 @@ DECLARE_EVENT_CLASS(kmem_alloc, show_gfp_flags(__entry->gfp_flags)) ) -DEFINE_EVENT(kmem_alloc, kmalloc, +DEFINE_EVENT_MAP(kmem_alloc, kmalloc, + + kmem_kmalloc, TP_PROTO(unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags), @@ -66,8 +75,8 @@ DECLARE_EVENT_CLASS(kmem_alloc_node, TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node), TP_STRUCT__entry( - __field( unsigned long, call_site ) - __field( const void *, ptr ) + __field_hex( unsigned long, call_site ) + __field_hex( const void *, ptr ) __field( size_t, bytes_req ) __field( size_t, bytes_alloc ) __field( gfp_t, gfp_flags ) @@ -92,7 +101,9 @@ DECLARE_EVENT_CLASS(kmem_alloc_node, __entry->node) ) -DEFINE_EVENT(kmem_alloc_node, kmalloc_node, +DEFINE_EVENT_MAP(kmem_alloc_node, kmalloc_node, + + kmem_kmalloc_node, TP_PROTO(unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, @@ -117,8 +128,8 @@ DECLARE_EVENT_CLASS(kmem_free, TP_ARGS(call_site, ptr), TP_STRUCT__entry( - __field( unsigned long, call_site ) - __field( const void *, ptr ) + __field_hex( unsigned long, call_site ) + __field_hex( const void *, ptr ) ), TP_fast_assign( @@ -129,7 +140,9 @@ DECLARE_EVENT_CLASS(kmem_free, TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr) ) -DEFINE_EVENT(kmem_free, kfree, +DEFINE_EVENT_MAP(kmem_free, kfree, + + kmem_kfree, TP_PROTO(unsigned long call_site, const void *ptr), @@ -143,14 +156,19 @@ DEFINE_EVENT(kmem_free, kmem_cache_free, TP_ARGS(call_site, ptr) ) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) +TRACE_EVENT(mm_page_free, +#else TRACE_EVENT(mm_page_free_direct, +#endif TP_PROTO(struct page *page, unsigned int order), TP_ARGS(page, order), TP_STRUCT__entry( - __field( struct page *, page ) + __field_hex( struct page *, page ) __field( unsigned int, order ) ), @@ -165,14 +183,18 @@ TRACE_EVENT(mm_page_free_direct, __entry->order) ) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) +TRACE_EVENT(mm_page_free_batched, +#else TRACE_EVENT(mm_pagevec_free, +#endif TP_PROTO(struct page *page, int cold), TP_ARGS(page, cold), TP_STRUCT__entry( - __field( struct page *, page ) + __field_hex( struct page *, page ) __field( int, cold ) ), @@ -195,7 +217,7 @@ TRACE_EVENT(mm_page_alloc, TP_ARGS(page, order, gfp_flags, migratetype), TP_STRUCT__entry( - __field( struct page *, page ) + __field_hex( struct page *, page ) __field( unsigned int, order ) __field( gfp_t, gfp_flags ) __field( int, migratetype ) @@ -210,7 +232,7 @@ TRACE_EVENT(mm_page_alloc, TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s", __entry->page, - page_to_pfn(__entry->page), + __entry->page ? page_to_pfn(__entry->page) : 0, __entry->order, __entry->migratetype, show_gfp_flags(__entry->gfp_flags)) @@ -223,7 +245,7 @@ DECLARE_EVENT_CLASS(mm_page, TP_ARGS(page, order, migratetype), TP_STRUCT__entry( - __field( struct page *, page ) + __field_hex( struct page *, page ) __field( unsigned int, order ) __field( int, migratetype ) ), @@ -236,7 +258,7 @@ DECLARE_EVENT_CLASS(mm_page, TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d", __entry->page, - page_to_pfn(__entry->page), + __entry->page ? page_to_pfn(__entry->page) : 0, __entry->order, __entry->migratetype, __entry->order == 0) @@ -251,7 +273,11 @@ DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked, DEFINE_EVENT_PRINT(mm_page, mm_page_pcpu_drain, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)) TP_PROTO(struct page *page, unsigned int order, int migratetype), +#else + TP_PROTO(struct page *page, int order, int migratetype), +#endif TP_ARGS(page, order, migratetype), @@ -271,7 +297,7 @@ TRACE_EVENT(mm_page_alloc_extfrag, alloc_migratetype, fallback_migratetype), TP_STRUCT__entry( - __field( struct page *, page ) + __field_hex( struct page *, page ) __field( int, alloc_order ) __field( int, fallback_order ) __field( int, alloc_migratetype ) @@ -297,6 +323,7 @@ TRACE_EVENT(mm_page_alloc_extfrag, __entry->fallback_order < pageblock_order, __entry->alloc_migratetype == __entry->fallback_migratetype) ) +#endif #endif /* _TRACE_KMEM_H */