DEFINE_PER_CPU(local_t, lttng_last_tsc);
EXPORT_PER_CPU_SYMBOL(lttng_last_tsc);
#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0)) */
+
+#ifdef LTTNG_CLOCK_NMI_SAFE_BROKEN
+#warning "Your kernel implements a bogus nmi-safe clock source. Falling back to the non-nmi-safe clock source, which discards events traced from NMI context. Upgrade your kernel to resolve this situation."
+#endif
* CONFIG_DEBUG_TIMEKEEPING") introduces a buggy ktime_get_mono_fast_ns().
* This is fixed by patch "timekeeping: Fix __ktime_get_fast_ns() regression".
*/
+#if (LTTNG_KERNEL_RANGE(4,8,0, 4,8,1) \
+ || LTTNG_KERNEL_RANGE(4,7,4, 4,7,7) \
+ || LTTNG_KERNEL_RANGE(4,4,20, 4,4,24) \
+ || LTTNG_KERNEL_RANGE(4,1,32, 4,1,34))
+#define LTTNG_CLOCK_NMI_SAFE_BROKEN
+#endif
+
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0) \
- && !LTTNG_KERNEL_RANGE(4,8,0, 4,8,1) \
- && !LTTNG_KERNEL_RANGE(4,7,4, 4,7,7) \
- && !LTTNG_KERNEL_RANGE(4,4,20, 4,4,24) \
- && !LTTNG_KERNEL_RANGE(4,1,32, 4,1,34))
+ && !defined(LTTNG_CLOCK_NMI_SAFE_BROKEN))
DECLARE_PER_CPU(local_t, lttng_last_tsc);