+#include <linux/percpu.h>
+#include <linux/version.h>
+#include <asm/local.h>
+#include <lttng-kernel-version.h>
+#include <lttng-clock.h>
+#include <wrapper/percpu-defs.h>
+#include <wrapper/random.h>
+
+#if ((LTTNG_KERNEL_RANGE(3,10,0, 3,10,14) && !LTTNG_RHEL_KERNEL_RANGE(3,10,0,123,0,0, 3,10,14,0,0,0)) \
+ || LTTNG_KERNEL_RANGE(3,11,0, 3,11,3))
+#error "Linux kernels 3.10 and 3.11 introduce a deadlock in the timekeeping subsystem. Fixed by commit 7bd36014460f793c19e7d6c94dab67b0afcfcb7f \"timekeeping: Fix HRTICK related deadlock from ntp lock changes\" in Linux."
+#endif
+
+extern struct lttng_trace_clock *lttng_trace_clock;
+
+/*
+ * Upstream Linux commit 27727df240c7 ("Avoid taking lock in NMI path with
+ * CONFIG_DEBUG_TIMEKEEPING") introduces a buggy ktime_get_mono_fast_ns().
+ * This is fixed by patch "timekeeping: Fix __ktime_get_fast_ns() regression".
+ */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0) \
+ && !LTTNG_KERNEL_RANGE(4,8,0, 4,8,1) \
+ && !LTTNG_KERNEL_RANGE(4,7,4, 4,7,7) \
+ && !LTTNG_KERNEL_RANGE(4,4,20, 4,4,24) \
+ && !LTTNG_KERNEL_RANGE(4,1,32, 4,1,34))
+
+DECLARE_PER_CPU(local_t, lttng_last_tsc);
+
+#if (BITS_PER_LONG == 32)
+/*
+ * Fixup "src_now" using the 32 LSB from "last". We need to handle overflow and
+ * underflow of the 32nd bit. "last" can be above, below or equal to the 32 LSB
+ * of "src_now".
+ */
+static inline u64 trace_clock_fixup(u64 src_now, u32 last)
+{
+ u64 now;
+
+ now = src_now & 0xFFFFFFFF00000000ULL;
+ now |= (u64) last;
+ /* Detect overflow or underflow between now and last. */
+ if ((src_now & 0x80000000U) && !(last & 0x80000000U)) {
+ /*
+ * If 32nd bit transitions from 1 to 0, and we move forward in
+ * time from "now" to "last", then we have an overflow.
+ */
+ if (((s32) now - (s32) last) < 0)
+ now += 0x0000000100000000ULL;
+ } else if (!(src_now & 0x80000000U) && (last & 0x80000000U)) {
+ /*
+ * If 32nd bit transitions from 0 to 1, and we move backward in
+ * time from "now" to "last", then we have an underflow.
+ */
+ if (((s32) now - (s32) last) > 0)
+ now -= 0x0000000100000000ULL;
+ }
+ return now;
+}
+#else /* #if (BITS_PER_LONG == 32) */
+/*
+ * The fixup is pretty easy on 64-bit architectures: "last" is a 64-bit
+ * value, so we can use last directly as current time.
+ */
+static inline u64 trace_clock_fixup(u64 src_now, u64 last)
+{
+ return last;
+}
+#endif /* #else #if (BITS_PER_LONG == 32) */
+
+/*
+ * Sometimes called with preemption enabled. Can be interrupted.
+ */
+static inline u64 trace_clock_monotonic_wrapper(void)
+{
+ u64 now;
+ unsigned long last, result;
+ local_t *last_tsc;