From: Michael Jeanson Date: Mon, 12 Feb 2018 17:32:12 +0000 (+0100) Subject: Fix: update vmscan instrumentation for v4.16 X-Git-Tag: v2.9.9~20 X-Git-Url: https://git.lttng.org./?a=commitdiff_plain;h=c5b8e9ad696519bed135b0c9dfcf31730097f873;p=lttng-modules.git Fix: update vmscan instrumentation for v4.16 See upstream commit : commit 9092c71bb724dba2ecba849eae69e5c9d39bd3d2 Author: Josef Bacik Date: Wed Jan 31 16:16:26 2018 -0800 mm: use sc->priority for slab shrink targets Previously we were using the ratio of the number of lru pages scanned to the number of eligible lru pages to determine the number of slab objects to scan. The problem with this is that these two things have nothing to do with each other, so in slab heavy work loads where there is little to no page cache we can end up with the pages scanned being a very low number. This means that we reclaim next to no slab pages and waste a lot of time reclaiming small amounts of space. Signed-off-by: Michael Jeanson Signed-off-by: Mathieu Desnoyers --- diff --git a/instrumentation/events/lttng-module/mm_vmscan.h b/instrumentation/events/lttng-module/mm_vmscan.h index 9845635b..a69b42fe 100644 --- a/instrumentation/events/lttng-module/mm_vmscan.h +++ b/instrumentation/events/lttng-module/mm_vmscan.h @@ -204,7 +204,32 @@ LTTNG_TRACEPOINT_EVENT_INSTANCE(mm_vmscan_direct_reclaim_end_template, mm_vmscan TP_ARGS(nr_reclaimed) ) -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0)) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,16,0)) +LTTNG_TRACEPOINT_EVENT_MAP(mm_shrink_slab_start, + + mm_vmscan_shrink_slab_start, + + TP_PROTO(struct shrinker *shr, struct shrink_control *sc, + long nr_objects_to_shrink, unsigned long cache_items, + unsigned long long delta, unsigned long total_scan, + int priority), + + TP_ARGS(shr, sc, nr_objects_to_shrink, cache_items, delta, total_scan, + priority), + + TP_FIELDS( + ctf_integer_hex(struct shrinker *, shr, shr) + ctf_integer_hex(void *, shrink, shr->scan_objects) + ctf_integer(int, nid, sc->nid) + ctf_integer(long, nr_objects_to_shrink, nr_objects_to_shrink) + ctf_integer(gfp_t, gfp_flags, sc->gfp_mask) + ctf_integer(unsigned long, cache_items, cache_items) + ctf_integer(unsigned long long, delta, delta) + ctf_integer(unsigned long, total_scan, total_scan) + ctf_integer(int, priority, priority) + ) +) +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0)) LTTNG_TRACEPOINT_EVENT_MAP(mm_shrink_slab_start, mm_vmscan_shrink_slab_start, @@ -233,6 +258,7 @@ LTTNG_TRACEPOINT_EVENT_MAP(mm_shrink_slab_start, ctf_integer(unsigned long, total_scan, total_scan) ) ) +#endif #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0)) LTTNG_TRACEPOINT_EVENT_MAP(mm_shrink_slab_end, @@ -255,7 +281,7 @@ LTTNG_TRACEPOINT_EVENT_MAP(mm_shrink_slab_end, ctf_integer(long, total_scan, total_scan) ) ) -#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0)) */ +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0)) LTTNG_TRACEPOINT_EVENT_MAP(mm_shrink_slab_end, mm_vmscan_shrink_slab_end, @@ -278,7 +304,6 @@ LTTNG_TRACEPOINT_EVENT_MAP(mm_shrink_slab_end, ctf_integer(long, total_scan, new_scan_cnt - unused_scan_cnt) ) ) -#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0)) */ #endif #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0))