--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
+ *
+ * wrapper/atomic.h
+ *
+ * wrapper around linux/atomic.h.
+ *
+ * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_WRAPPER_ATOMIC_H
+#define _LTTNG_WRAPPER_ATOMIC_H
+
+#include <linux/version.h>
+#include <linux/atomic.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0))
+static inline void lttng_smp_mb__before_atomic(void)
+{
+ smp_mb__before_atomic();
+}
+
+static inline void lttng_smp_mb__after_atomic(void)
+{
+ smp_mb__after_atomic();
+}
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0)) */
+static inline void lttng_smp_mb__before_atomic(void)
+{
+ smp_mb__before_atomic_inc();
+}
+
+static inline void lttng_smp_mb__after_atomic(void)
+{
+ smp_mb__after_atomic_inc();
+}
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0)) */
+
+#endif /* _LTTNG_WRAPPER_ATOMIC_H */
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * wrapper/compiler.h
+ *
+ * Copyright (C) 2013 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_WRAPPER_COMPILER_H
+#define _LTTNG_WRAPPER_COMPILER_H
+
+#include <linux/compiler.h>
+
+/*
+ * Don't allow compiling with buggy compiler.
+ */
+
+#ifdef GCC_VERSION
+
+/*
+ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58854
+ */
+# ifdef __ARMEL__
+# if GCC_VERSION >= 40800 && GCC_VERSION <= 40802
+# error Your gcc version produces clobbered frame accesses
+# endif
+# endif
+#endif
+
+/*
+ * READ/WRITE_ONCE were introduced in kernel 3.19 and ACCESS_ONCE
+ * was removed in 4.15. Prefer READ/WRITE but fallback to ACCESS
+ * when they are not available.
+ */
+#ifndef READ_ONCE
+# define READ_ONCE(x) ACCESS_ONCE(x)
+#endif
+
+#ifndef WRITE_ONCE
+# define WRITE_ONCE(x, val) ({ ACCESS_ONCE(x) = val; })
+#endif
+
+#define __LTTNG_COMPOUND_LITERAL(type, ...) (type[]) { __VA_ARGS__ }
+
+#endif /* _LTTNG_WRAPPER_COMPILER_H */
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * wrapper/fdtable.h
+ *
+ * Copyright (C) 2013 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_WRAPPER_FDTABLE_H
+#define _LTTNG_WRAPPER_FDTABLE_H
+
+#include <linux/version.h>
+#include <linux/fdtable.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
+
+int lttng_iterate_fd(struct files_struct *files,
+ unsigned int first,
+ int (*cb)(const void *, struct file *, unsigned int),
+ const void *ctx);
+
+#else
+
+/*
+ * iterate_fd() appeared at commit
+ * c3c073f808b22dfae15ef8412b6f7b998644139a in the Linux kernel (first
+ * released kernel: v3.7).
+ */
+#define lttng_iterate_fd iterate_fd
+
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
+
+static inline bool lttng_close_on_exec(int fd, const struct fdtable *fdt)
+{
+ return close_on_exec(fd, fdt);
+}
+
+#else
+
+static inline bool lttng_close_on_exec(int fd, const struct fdtable *fdt)
+{
+ return FD_ISSET(fd, fdt->close_on_exec);
+}
+
+#endif
+
+#endif /* _LTTNG_WRAPPER_FDTABLE_H */
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * wrapper/file.h
+ *
+ * wrapper around linux/file.h.
+ *
+ * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_WRAPPER_FILE_H
+#define _LTTNG_WRAPPER_FILE_H
+
+#include <linux/version.h>
+#include <linux/file.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0)
+
+static
+inline int lttng_get_unused_fd(void)
+{
+ return get_unused_fd_flags(0);
+}
+
+#define lttng_f_dentry f_path.dentry
+
+#else /* #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0) */
+
+static
+inline int lttng_get_unused_fd(void)
+{
+ return get_unused_fd();
+}
+
+#define lttng_f_dentry f_dentry
+
+#endif /* #else #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0) */
+
+#endif /* _LTTNG_WRAPPER_FILE_H */
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * wrapper/frame.h
+ *
+ * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_WRAPPER_FRAME_H
+#define _LTTNG_WRAPPER_FRAME_H
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0))
+
+#include <linux/frame.h>
+
+#define LTTNG_STACK_FRAME_NON_STANDARD(func) \
+ STACK_FRAME_NON_STANDARD(func)
+
+#else
+
+#define LTTNG_STACK_FRAME_NON_STANDARD(func)
+
+#endif
+
+#endif /* _LTTNG_WRAPPER_FRAME_H */
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * wrapper/genhd.h
+ *
+ * wrapper around block layer functions and data structures. Using
+ * KALLSYMS to get its address when available, else we need to have a
+ * kernel that exports this function to GPL modules.
+ *
+ * Copyright (C) 2011-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_WRAPPER_GENHD_H
+#define _LTTNG_WRAPPER_GENHD_H
+
+#include <linux/genhd.h>
+
+#ifdef CONFIG_KALLSYMS
+
+#include <linux/kallsyms.h>
+#include <wrapper/kallsyms.h>
+
+static inline
+char *wrapper_disk_name(struct gendisk *hd, int partno, char *buf)
+{
+ char *(*disk_name_sym)(struct gendisk *hd, int partno, char *buf);
+
+ disk_name_sym = (void *) kallsyms_lookup_funcptr("disk_name");
+ if (disk_name_sym) {
+ return disk_name_sym(hd, partno, buf);
+ } else {
+ printk_once(KERN_WARNING "LTTng: disk_name symbol lookup failed.\n");
+ return NULL;
+ }
+}
+
+#else
+
+static inline
+char *wrapper_disk_name(struct gendisk *hd, int partno, char *buf)
+{
+ return disk_name(hd, partno, buf);
+}
+
+#endif
+
+#ifdef CONFIG_KALLSYMS_ALL
+
+static inline
+struct class *wrapper_get_block_class(void)
+{
+ struct class *ptr_block_class;
+
+ ptr_block_class = (struct class *) kallsyms_lookup_dataptr("block_class");
+ if (!ptr_block_class) {
+ printk_once(KERN_WARNING "LTTng: block_class symbol lookup failed.\n");
+ return NULL;
+ }
+ return ptr_block_class;
+}
+
+static inline
+struct device_type *wrapper_get_disk_type(void)
+{
+ struct device_type *ptr_disk_type;
+
+ ptr_disk_type = (struct device_type *) kallsyms_lookup_dataptr("disk_type");
+ if (!ptr_disk_type) {
+ printk_once(KERN_WARNING "LTTng: disk_type symbol lookup failed.\n");
+ return NULL;
+ }
+ return ptr_disk_type;
+}
+
+#else
+
+static inline
+struct class *wrapper_get_block_class(void)
+{
+ /*
+ * Symbol block_class is not exported.
+ * TODO: return &block_class;
+ */
+ /* Feature currently unavailable without KALLSYMS_ALL */
+ return NULL;
+}
+
+static inline
+struct device_type *wrapper_get_disk_type(void)
+{
+ /*
+ * Symbol disk_type is not exported.
+ * TODO: return &disk_type;
+ */
+ /* Feature currently unavailable without KALLSYMS_ALL */
+ return NULL;
+}
+
+#endif
+
+#endif /* _LTTNG_WRAPPER_GENHD_H */
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * wrapper/inline_memcpy.h
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#if !defined(__HAVE_ARCH_INLINE_MEMCPY) && !defined(inline_memcpy)
+#define inline_memcpy memcpy
+#endif
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * wrapper/irq.h
+ *
+ * wrapper around linux/irq.h.
+ *
+ * Copyright (C) 2013 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_WRAPPER_IRQ_H
+#define _LTTNG_WRAPPER_IRQ_H
+
+#include <linux/version.h>
+
+/*
+ * Starting from the 3.12 Linux kernel, all architectures use the
+ * generic hard irqs system. More details can be seen at commit
+ * 0244ad004a54e39308d495fee0a2e637f8b5c317 in the Linux kernel GIT.
+ */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0) \
+ || defined(CONFIG_GENERIC_HARDIRQS))
+# define CONFIG_LTTNG_HAS_LIST_IRQ
+#endif
+
+#endif /* _LTTNG_WRAPPER_IRQ_H */
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * wrapper/irqdesc.h
+ *
+ * wrapper around irq_to_desc. Using KALLSYMS to get its address when
+ * available, else we need to have a kernel that exports this function to GPL
+ * modules.
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_WRAPPER_IRQDESC_H
+#define _LTTNG_WRAPPER_IRQDESC_H
+
+#include <linux/interrupt.h>
+#include <linux/irqnr.h>
+
+struct irq_desc *wrapper_irq_to_desc(unsigned int irq);
+
+#endif /* _LTTNG_WRAPPER_IRQDESC_H */
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * wrapper/irqflags.h
+ *
+ * wrapper around IRQ flags.
+ *
+ * Copyright (C) 2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_WRAPPER_IRQFLAGS_H
+#define _LTTNG_WRAPPER_IRQFLAGS_H
+
+#include <linux/version.h>
+#include <linux/irqflags.h>
+
+#ifdef CONFIG_X86
+
+static inline
+int lttng_regs_irqs_disabled(struct pt_regs *regs)
+{
+ unsigned long flags = regs->flags;
+
+ return raw_irqs_disabled_flags(flags);
+}
+
+#else
+/*
+ * lttng_regs_irqs_disabled() returns -1 if irqoff state is unknown.
+ * TODO: should implement lttng_regs_irqs_disabled for each architecture
+ * to add interruptible context for kprobes and kretprobes.
+ */
+
+static inline
+int lttng_regs_irqs_disabled(struct pt_regs *regs)
+{
+ return -1;
+}
+#endif
+
+#endif /* _LTTNG_WRAPPER_IRQFLAGS_H */
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * wrapper/kallsyms.h
+ *
+ * wrapper around kallsyms_lookup_name. Implements arch-dependent code for
+ * arches where the address of the start of the function body is different
+ * from the pointer which can be used to call the function, e.g. ARM THUMB2.
+ *
+ * Copyright (C) 2011 Avik Sil (avik.sil@linaro.org)
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_WRAPPER_KALLSYMS_H
+#define _LTTNG_WRAPPER_KALLSYMS_H
+
+#include <linux/kallsyms.h>
+#include <linux/version.h>
+
+/*
+ * PowerPC ABIv1 needs KALLSYMS_ALL to get the function descriptor,
+ * which is needed to perform the function call.
+ */
+#if defined(CONFIG_PPC64) && (!defined(_CALL_ELF) || _CALL_ELF < 2)
+# ifndef CONFIG_KALLSYMS_ALL
+# error "LTTng-modules requires CONFIG_KALLSYMS_ALL on PowerPC ABIv1"
+# endif
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0))
+
+unsigned long wrapper_kallsyms_lookup_name(const char *name);
+
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)) */
+
+static inline
+unsigned long wrapper_kallsyms_lookup_name(const char *name)
+{
+ return kallsyms_lookup_name(name);
+}
+
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)) */
+
+static inline
+unsigned long kallsyms_lookup_funcptr(const char *name)
+{
+ unsigned long addr;
+
+ addr = wrapper_kallsyms_lookup_name(name);
+#ifdef CONFIG_ARM
+#ifdef CONFIG_THUMB2_KERNEL
+ if (addr)
+ addr |= 1; /* set bit 0 in address for thumb mode */
+#endif
+#endif
+ return addr;
+}
+
+static inline
+unsigned long kallsyms_lookup_dataptr(const char *name)
+{
+ return wrapper_kallsyms_lookup_name(name);
+}
+
+#endif /* _LTTNG_WRAPPER_KALLSYMS_H */
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * wrapper/kref.h
+ *
+ * wrapper around linux/kref.h.
+ *
+ * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This wrapper code is derived from Linux 3.19.2 include/linux/list.h
+ * and include/linux/rculist.h, hence the GPLv2 license applied to this
+ * file.
+ */
+
+#ifndef _LTTNG_WRAPPER_KREF_H
+#define _LTTNG_WRAPPER_KREF_H
+
+#include <linux/kref.h>
+#include <linux/rculist.h>
+#include <linux/version.h>
+
+/*
+ * lttng_kref_get: get reference count, checking for overflow.
+ *
+ * Return 1 if reference is taken, 0 otherwise (overflow).
+ */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0))
+static inline int lttng_kref_get(struct kref *kref)
+{
+ kref_get(kref);
+ return 1;
+}
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0)) */
+static inline int lttng_kref_get(struct kref *kref)
+{
+ return atomic_add_unless(&kref->refcount, 1, INT_MAX);
+}
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0)) */
+
+#endif /* _LTTNG_WRAPPER_KREF_H */
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * wrapper/list.h
+ *
+ * wrapper around linux/list.h.
+ *
+ * Copyright (C) 2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This wrapper code is derived from Linux 3.19.2 include/linux/list.h
+ * and include/linux/rculist.h, hence the GPLv2 license applied to this
+ * file.
+ */
+
+#ifndef _LTTNG_WRAPPER_LIST_H
+#define _LTTNG_WRAPPER_LIST_H
+
+#include <linux/list.h>
+#include <linux/rculist.h>
+
+/*
+ * return the first or the next element in an RCU protected hlist
+ */
+#define lttng_hlist_first_rcu(head) (*((struct hlist_node __rcu **)(&(head)->first)))
+#define lttng_hlist_next_rcu(node) (*((struct hlist_node __rcu **)(&(node)->next)))
+#define lttng_hlist_pprev_rcu(node) (*((struct hlist_node __rcu **)((node)->pprev)))
+
+#define lttng_hlist_entry_safe(ptr, type, member) \
+ ({ typeof(ptr) ____ptr = (ptr); \
+ ____ptr ? hlist_entry(____ptr, type, member) : NULL; \
+ })
+
+/**
+ * lttng_hlist_for_each_entry - iterate over list of given type
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the hlist_node within the struct.
+ */
+#define lttng_hlist_for_each_entry(pos, head, member) \
+ for (pos = lttng_hlist_entry_safe((head)->first, typeof(*(pos)), member);\
+ pos; \
+ pos = lttng_hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
+
+/**
+ * lttng_hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
+ * @pos: the type * to use as a loop cursor.
+ * @n: another &struct hlist_node to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the hlist_node within the struct.
+ */
+#define lttng_hlist_for_each_entry_safe(pos, n, head, member) \
+ for (pos = lttng_hlist_entry_safe((head)->first, typeof(*pos), member);\
+ pos && ({ n = pos->member.next; 1; }); \
+ pos = lttng_hlist_entry_safe(n, typeof(*pos), member))
+
+#endif /* _LTTNG_WRAPPER_LIST_H */
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * wrapper/mm.h
+ *
+ * Copyright (C) 2018 Francis Deslauriers <francis.deslauriers@efficios.com>
+ */
+
+#ifndef _LTTNG_WRAPPER_MM_H
+#define _LTTNG_WRAPPER_MM_H
+
+#include <linux/mm.h>
+#include <linux/oom.h>
+
+#include <lttng/kernel-version.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0) \
+ || LTTNG_UBUNTU_KERNEL_RANGE(4,4,25,44, 4,5,0,0))
+
+/*
+ * Returns true if the current estimation of the number of page available is
+ * larger than the number of pages passed as parameter.
+ */
+static inline
+bool wrapper_check_enough_free_pages(unsigned long num_pages)
+{
+ return num_pages < si_mem_available();
+}
+
+#else
+
+static inline
+bool wrapper_check_enough_free_pages(unsigned long num_pages)
+{
+ /*
+ * The si_mem_available function is not available on this kernel. Since
+ * we can't reliably know if there is enough memory available, so we
+ * return true.
+ */
+ return true;
+}
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
+static inline
+void wrapper_set_current_oom_origin(void)
+{
+ return set_current_oom_origin();
+}
+
+static inline
+void wrapper_clear_current_oom_origin(void)
+{
+ return clear_current_oom_origin();
+}
+
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)) */
+
+static inline
+void wrapper_set_current_oom_origin(void)
+{
+ return;
+}
+
+static inline
+void wrapper_clear_current_oom_origin(void)
+{
+ return;
+}
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)) */
+#endif /* _LTTNG_WRAPPER_MM_H */
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * wrapper/namespace.h
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * 2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ */
+
+#ifndef _LTTNG_WRAPPER_NAMESPACE_H
+#define _LTTNG_WRAPPER_NAMESPACE_H
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0))
+#define lttng_ns_inum ns.inum
+#else
+#define lttng_ns_inum proc_inum
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0))
+#define lttng_user_ns_parent parent
+#else
+#define lttng_user_ns_parent creator->user_ns
+#endif
+
+#endif /* _LTTNG_WRAPPER_NAMESPACE_H */
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * wrapper/page_alloc.h
+ *
+ * wrapper around get_pfnblock_flags_mask. Using KALLSYMS to get its address
+ * when available, else we need to have a kernel that exports this function to
+ * GPL modules.
+ *
+ * Copyright (C) 2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_WRAPPER_PAGE_ALLOC_H
+#define _LTTNG_WRAPPER_PAGE_ALLOC_H
+
+#include <lttng/kernel-version.h>
+
+/*
+ * We need to redefine get_pfnblock_flags_mask to our wrapper, because
+ * the get_pageblock_migratetype() macro uses it.
+ */
+#if (defined(CONFIG_KALLSYMS) \
+ && (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,2) \
+ || LTTNG_KERNEL_RANGE(3,14,36, 3,15,0) \
+ || LTTNG_KERNEL_RANGE(3,18,10, 3,19,0) \
+ || LTTNG_DEBIAN_KERNEL_RANGE(3,16,7,9,0,0, 3,17,0,0,0,0) \
+ || LTTNG_UBUNTU_KERNEL_RANGE(3,16,7,34, 3,17,0,0)))
+
+#define get_pfnblock_flags_mask wrapper_get_pfnblock_flags_mask
+
+#include <linux/mm_types.h>
+
+int wrapper_get_pfnblock_flags_mask_init(void);
+
+#else
+
+#include <linux/mm_types.h>
+
+static inline
+int wrapper_get_pfnblock_flags_mask_init(void)
+{
+ return 0;
+}
+
+#endif
+
+/*
+ * For a specific range of Ubuntu 3.13 kernels, we need to redefine
+ * get_pageblock_flags_mask to our wrapper, because the
+ * get_pageblock_migratetype() macro uses it. This function has been
+ * introduced into mainline within commit
+ * e58469bafd0524e848c3733bc3918d854595e20f, but never actually showed
+ * up in a stable kernel version, since it has been changed by commit
+ * dc4b0caff24d9b2918e9f27bc65499ee63187eba. Since Ubuntu chose to only
+ * backport the former commit but not the latter, we need to do a
+ * special case to cover this.
+ */
+#if (defined(CONFIG_KALLSYMS) \
+ && LTTNG_UBUNTU_KERNEL_RANGE(3,13,11,50, 3,14,0,0))
+
+#define get_pageblock_flags_mask wrapper_get_pageblock_flags_mask
+
+#include <linux/mm_types.h>
+
+int wrapper_get_pageblock_flags_mask_init(void);
+
+#else
+
+#include <linux/mm_types.h>
+
+static inline
+int wrapper_get_pageblock_flags_mask_init(void)
+{
+ return 0;
+}
+
+#endif
+
+#endif /* _LTTNG_WRAPPER_PAGE_ALLOC_H */
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * wrapper/percpu-defs.h
+ *
+ * wrapper around linux/percpu-defs.h.
+ *
+ * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_WRAPPER_PERCPU_DEFS_H
+#define _LTTNG_WRAPPER_PERCPU_DEFS_H
+
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0)
+
+#include <linux/percpu-defs.h>
+
+#define lttng_this_cpu_ptr(ptr) this_cpu_ptr(ptr)
+
+#else /* #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0) */
+
+#include <linux/percpu.h>
+
+#define lttng_this_cpu_ptr(ptr) (&__get_cpu_var(*(ptr)))
+
+#endif /* #else #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0) */
+
+#endif /* _LTTNG_WRAPPER_PERCPU_DEFS_H */
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * wrapper/perf.h
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_WRAPPER_PERF_H
+#define _LTTNG_WRAPPER_PERF_H
+
+#include <linux/perf_event.h>
+
+#ifdef CONFIG_PERF_EVENTS
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))
+static inline struct perf_event *
+wrapper_perf_event_create_kernel_counter(struct perf_event_attr *attr,
+ int cpu,
+ struct task_struct *task,
+ perf_overflow_handler_t callback)
+{
+ return perf_event_create_kernel_counter(attr, cpu, task, callback, NULL);
+}
+#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0)) */
+static inline struct perf_event *
+wrapper_perf_event_create_kernel_counter(struct perf_event_attr *attr,
+ int cpu,
+ struct task_struct *task,
+ perf_overflow_handler_t callback)
+{
+ return perf_event_create_kernel_counter(attr, cpu, task, callback);
+}
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0)) */
+
+#endif /* CONFIG_PERF_EVENTS */
+
+#endif /* _LTTNG_WRAPPER_PERF_H */
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * wrapper/poll.h
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_WRAPPER_POLL_H
+#define _LTTNG_WRAPPER_POLL_H
+
+#include <linux/poll.h>
+
+/*
+ * Note: poll_wait_set_exclusive() is defined as no-op. Thundering herd
+ * effect can be noticed with large number of consumer threads.
+ */
+
+#define poll_wait_set_exclusive(poll_table)
+
+#endif /* _LTTNG_WRAPPER_POLL_H */
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
+ *
+ * wrapper/random.h
+ *
+ * wrapper around bootid read. Read the boot id through the /proc filesystem.
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_WRAPPER_RANDOM_H
+#define _LTTNG_WRAPPER_RANDOM_H
+
+#include <lttng/clock.h>
+
+#define BOOT_ID_LEN LTTNG_MODULES_UUID_STR_LEN
+
+int wrapper_get_bootid(char *bootid);
+
+#endif /* _LTTNG_WRAPPER_RANDOM_H */
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * wrapper/rcu.h
+ *
+ * wrapper around linux/rcupdate.h and linux/rculist.h.
+ *
+ * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_WRAPPER_RCU_H
+#define _LTTNG_WRAPPER_RCU_H
+
+#include <linux/version.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <wrapper/list.h>
+
+#ifndef rcu_dereference_raw_notrace
+#define rcu_dereference_raw_notrace(p) rcu_dereference_raw(p)
+#endif
+
+#define lttng_rcu_dereference(p) rcu_dereference_raw_notrace(p)
+
+/**
+ * lttng_list_entry_rcu - get the struct for this entry
+ * @ptr: the &struct list_head pointer.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_head within the struct.
+ *
+ * This primitive may safely run concurrently with the _rcu list-mutation
+ * primitives such as list_add_rcu() as long as it's guarded by
+ * rcu_read_lock_sched().
+ * Can be used while tracing RCU.
+ */
+#define lttng_list_entry_rcu(ptr, type, member) \
+({ \
+ typeof(*ptr) __rcu *__ptr = (typeof(*ptr) __rcu __force *)ptr; \
+ container_of((typeof(ptr))lttng_rcu_dereference(__ptr), type, member); \
+})
+
+/**
+ * lttng_list_for_each_entry_rcu - iterate over rcu list of given type
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ *
+ * This list-traversal primitive may safely run concurrently with
+ * the _rcu list-mutation primitives such as list_add_rcu()
+ * as long as the traversal is guarded by rcu_read_lock_sched().
+ * Can be used while tracing RCU.
+ */
+#define lttng_list_for_each_entry_rcu(pos, head, member) \
+ for (pos = lttng_list_entry_rcu((head)->next, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = lttng_list_entry_rcu(pos->member.next, typeof(*pos), member))
+
+/**
+ * lttng_hlist_for_each_entry_rcu - iterate over rcu list of given type (for tracing)
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the hlist_node within the struct.
+ *
+ * This list-traversal primitive may safely run concurrently with
+ * the _rcu list-mutation primitives such as hlist_add_head_rcu()
+ * as long as the traversal is guarded by rcu_read_lock().
+ *
+ * This is the same as hlist_for_each_entry_rcu() except that it does
+ * not do any RCU debugging or tracing.
+ */
+#define lttng_hlist_for_each_entry_rcu(pos, head, member) \
+ for (pos = lttng_hlist_entry_safe (lttng_rcu_dereference(lttng_hlist_first_rcu(head)), \
+ typeof(*(pos)), member); \
+ pos; \
+ pos = lttng_hlist_entry_safe(lttng_rcu_dereference(lttng_hlist_next_rcu( \
+ &(pos)->member)), typeof(*(pos)), member))
+
+#endif /* _LTTNG_WRAPPER_RCU_H */
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * wrapper/splice.h
+ *
+ * wrapper around splice_to_pipe. Using KALLSYMS to get its address when
+ * available, else we need to have a kernel that exports this function to GPL
+ * modules.
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_WRAPPER_SPLICE_H
+#define _LTTNG_WRAPPER_SPLICE_H
+
+#include <linux/splice.h>
+
+ssize_t wrapper_splice_to_pipe(struct pipe_inode_info *pipe,
+ struct splice_pipe_desc *spd);
+
+#ifndef PIPE_DEF_BUFFERS
+#define PIPE_DEF_BUFFERS 16
+#endif
+
+#endif /* _LTTNG_WRAPPER_SPLICE_H */
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * wrapper/syscall.h
+ *
+ * wrapper around asm/syscall.h.
+ *
+ * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
+ */
+
+#ifndef _LTTNG_WRAPPER_SYSCALL_H
+#define _LTTNG_WRAPPER_SYSCALL_H
+
+#include <asm/syscall.h>
+#include <lttng/kernel-version.h>
+
+#define LTTNG_SYSCALL_NR_ARGS 6
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0))
+
+#define lttng_syscall_get_arguments(task, regs, args) \
+ syscall_get_arguments(task, regs, args)
+
+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0) */
+
+static inline
+void lttng_syscall_get_arguments(struct task_struct *task,
+ struct pt_regs *regs, unsigned long *args)
+{
+ syscall_get_arguments(task, regs, 0, LTTNG_SYSCALL_NR_ARGS, args);
+}
+
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0) */
+
+#endif /* _LTTNG_WRAPPER_SYSCALL_H */
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * wrapper/time.h
+ *
+ * Copyright (C) 2020 Michael Jeanson <mjeanson@efficios.com>
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_WRAPPER_TIME_H
+#define _LTTNG_WRAPPER_TIME_H
+
+#include <linux/version.h>
+
+/*
+ * Use 64bit timespec on kernels that have it, this makes 32bit arch
+ * y2038 compliant.
+ */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0))
+# define LTTNG_KERNEL_HAS_TIMESPEC64
+#endif
+
+#endif /* _LTTNG_WRAPPER_TIME_H */
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * wrapper/timer.h
+ *
+ * wrapper around linux/timer.h.
+ *
+ * Copyright (C) 2016 Michael Jeanson <mjeanson@efficios.com>
+ */
+
+#ifndef _LTTNG_WRAPPER_TIMER_H
+#define _LTTNG_WRAPPER_TIMER_H
+
+#include <linux/version.h>
+#include <linux/timer.h>
+#include <lttng/kernel-version.h>
+
+/*
+ * In the olden days, pinned timers were initialized normaly with init_timer()
+ * and then modified with mod_timer_pinned().
+ *
+ * Then came kernel 4.8.0 and they had to be initilized as pinned with
+ * init_timer_pinned() and then modified as regular timers with mod_timer().
+ *
+ * Then came kernel 4.15.0 with a new timer API where init_timer() is no more.
+ * It's replaced by timer_setup() where pinned is now part of timer flags.
+ */
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,15,0))
+
+#define LTTNG_TIMER_PINNED TIMER_PINNED
+#define LTTNG_TIMER_FUNC_ARG_TYPE struct timer_list *
+
+#define lttng_mod_timer_pinned(timer, expires) \
+ mod_timer(timer, expires)
+
+#define lttng_from_timer(var, callback_timer, timer_fieldname) \
+ from_timer(var, callback_timer, timer_fieldname)
+
+#define lttng_timer_setup(timer, callback, flags, unused) \
+ timer_setup(timer, callback, flags)
+
+
+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4,15,0) */
+
+
+# if (LTTNG_RT_VERSION_CODE >= LTTNG_RT_KERNEL_VERSION(4,6,4,8) \
+ || LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0))
+
+#define lttng_init_timer_pinned(timer) \
+ init_timer_pinned(timer)
+
+#define lttng_mod_timer_pinned(timer, expires) \
+ mod_timer(timer, expires)
+
+# else /* LTTNG_RT_VERSION_CODE >= LTTNG_RT_KERNEL_VERSION(4,6,4,8) */
+
+#define lttng_init_timer_pinned(timer) \
+ init_timer(timer)
+
+#define lttng_mod_timer_pinned(timer, expires) \
+ mod_timer_pinned(timer, expires)
+
+# endif /* LTTNG_RT_VERSION_CODE >= LTTNG_RT_KERNEL_VERSION(4,6,4,8) */
+
+
+#define LTTNG_TIMER_PINNED TIMER_PINNED
+#define LTTNG_TIMER_FUNC_ARG_TYPE unsigned long
+
+/* timer_fieldname is unused prior to 4.15. */
+#define lttng_from_timer(var, timer_data, timer_fieldname) \
+ ((typeof(var))timer_data)
+
+static inline void lttng_timer_setup(struct timer_list *timer,
+ void (*function)(LTTNG_TIMER_FUNC_ARG_TYPE),
+ unsigned int flags, void *data)
+{
+ if (flags & LTTNG_TIMER_PINNED)
+ lttng_init_timer_pinned(timer);
+ else
+ init_timer(timer);
+
+ timer->function = function;
+ timer->data = (unsigned long)data;
+}
+
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4,15,0) */
+
+#endif /* _LTTNG_WRAPPER_TIMER_H */
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * wrapper/trace-clock.h
+ *
+ * Contains LTTng trace clock mapping to LTTng trace clock or mainline monotonic
+ * clock. This wrapper depends on CONFIG_HIGH_RES_TIMERS=y.
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_TRACE_CLOCK_H
+#define _LTTNG_TRACE_CLOCK_H
+
+#ifdef CONFIG_HAVE_TRACE_CLOCK
+#include <linux/trace-clock.h>
+#else /* CONFIG_HAVE_TRACE_CLOCK */
+
+#include <linux/hardirq.h>
+#include <linux/ktime.h>
+#include <linux/time.h>
+#include <linux/hrtimer.h>
+#include <linux/percpu.h>
+#include <linux/version.h>
+#include <asm/local.h>
+#include <lttng/kernel-version.h>
+#include <lttng/clock.h>
+#include <wrapper/compiler.h>
+#include <wrapper/percpu-defs.h>
+#include <wrapper/random.h>
+#include <blacklist/timekeeping.h>
+
+extern struct lttng_trace_clock *lttng_trace_clock;
+
+/*
+ * Upstream Linux commit 27727df240c7 ("Avoid taking lock in NMI path with
+ * CONFIG_DEBUG_TIMEKEEPING") introduces a buggy ktime_get_mono_fast_ns().
+ * This is fixed by patch "timekeeping: Fix __ktime_get_fast_ns() regression".
+ */
+#if (LTTNG_KERNEL_RANGE(4,8,0, 4,8,2) \
+ || LTTNG_KERNEL_RANGE(4,7,4, 4,7,8) \
+ || LTTNG_KERNEL_RANGE(4,4,20, 4,4,25) \
+ || LTTNG_KERNEL_RANGE(4,1,32, 4,1,35))
+#define LTTNG_CLOCK_NMI_SAFE_BROKEN
+#endif
+
+/*
+ * We need clock values to be monotonically increasing per-cpu, which is
+ * not strictly guaranteed by ktime_get_mono_fast_ns(). It is
+ * straightforward to do on architectures with a 64-bit cmpxchg(), but
+ * not so on architectures without 64-bit cmpxchg. For now, only enable
+ * this feature on 64-bit architectures.
+ */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0) \
+ && BITS_PER_LONG == 64 \
+ && !defined(LTTNG_CLOCK_NMI_SAFE_BROKEN))
+#define LTTNG_USE_NMI_SAFE_CLOCK
+#endif
+
+#ifdef LTTNG_USE_NMI_SAFE_CLOCK
+
+DECLARE_PER_CPU(u64, lttng_last_tsc);
+
+/*
+ * Sometimes called with preemption enabled. Can be interrupted.
+ */
+static inline u64 trace_clock_monotonic_wrapper(void)
+{
+ u64 now, last, result;
+ u64 *last_tsc_ptr;
+
+ /* Use fast nmi-safe monotonic clock provided by the Linux kernel. */
+ preempt_disable();
+ last_tsc_ptr = lttng_this_cpu_ptr(<tng_last_tsc);
+ last = *last_tsc_ptr;
+ /*
+ * Read "last" before "now". It is not strictly required, but it ensures
+ * that an interrupt coming in won't artificially trigger a case where
+ * "now" < "last". This kind of situation should only happen if the
+ * mono_fast time source goes slightly backwards.
+ */
+ barrier();
+ now = ktime_get_mono_fast_ns();
+ if (U64_MAX / 2 < now - last)
+ now = last;
+ result = cmpxchg64_local(last_tsc_ptr, last, now);
+ preempt_enable();
+ if (result == last) {
+ /* Update done. */
+ return now;
+ } else {
+ /*
+ * Update not done, due to concurrent update. We can use
+ * "result", since it has been sampled concurrently with our
+ * time read, so it should not be far from "now".
+ */
+ return result;
+ }
+}
+
+#else /* #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
+static inline u64 trace_clock_monotonic_wrapper(void)
+{
+ ktime_t ktime;
+
+ /*
+ * Refuse to trace from NMIs with this wrapper, because an NMI could
+ * nest over the xtime write seqlock and deadlock.
+ */
+ if (in_nmi())
+ return (u64) -EIO;
+
+ ktime = ktime_get();
+ return ktime_to_ns(ktime);
+}
+#endif /* #else #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
+
+static inline u64 trace_clock_read64_monotonic(void)
+{
+ return (u64) trace_clock_monotonic_wrapper();
+}
+
+static inline u64 trace_clock_freq_monotonic(void)
+{
+ return (u64) NSEC_PER_SEC;
+}
+
+static inline int trace_clock_uuid_monotonic(char *uuid)
+{
+ return wrapper_get_bootid(uuid);
+}
+
+static inline const char *trace_clock_name_monotonic(void)
+{
+ return "monotonic";
+}
+
+static inline const char *trace_clock_description_monotonic(void)
+{
+ return "Monotonic Clock";
+}
+
+#ifdef LTTNG_USE_NMI_SAFE_CLOCK
+static inline int get_trace_clock(void)
+{
+ printk_once(KERN_WARNING "LTTng: Using mainline kernel monotonic fast clock, which is NMI-safe.\n");
+ return 0;
+}
+#else /* #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
+static inline int get_trace_clock(void)
+{
+ printk_once(KERN_WARNING "LTTng: Using mainline kernel monotonic clock. NMIs will not be traced.\n");
+ return 0;
+}
+#endif /* #else #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
+
+static inline void put_trace_clock(void)
+{
+}
+
+static inline u64 trace_clock_read64(void)
+{
+ struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
+
+ if (likely(!ltc)) {
+ return trace_clock_read64_monotonic();
+ } else {
+ read_barrier_depends(); /* load ltc before content */
+ return ltc->read64();
+ }
+}
+
+static inline u64 trace_clock_freq(void)
+{
+ struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
+
+ if (!ltc) {
+ return trace_clock_freq_monotonic();
+ } else {
+ read_barrier_depends(); /* load ltc before content */
+ return ltc->freq();
+ }
+}
+
+static inline int trace_clock_uuid(char *uuid)
+{
+ struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
+
+ read_barrier_depends(); /* load ltc before content */
+ /* Use default UUID cb when NULL */
+ if (!ltc || !ltc->uuid) {
+ return trace_clock_uuid_monotonic(uuid);
+ } else {
+ return ltc->uuid(uuid);
+ }
+}
+
+static inline const char *trace_clock_name(void)
+{
+ struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
+
+ if (!ltc) {
+ return trace_clock_name_monotonic();
+ } else {
+ read_barrier_depends(); /* load ltc before content */
+ return ltc->name();
+ }
+}
+
+static inline const char *trace_clock_description(void)
+{
+ struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
+
+ if (!ltc) {
+ return trace_clock_description_monotonic();
+ } else {
+ read_barrier_depends(); /* load ltc before content */
+ return ltc->description();
+ }
+}
+
+#endif /* CONFIG_HAVE_TRACE_CLOCK */
+
+#endif /* _LTTNG_TRACE_CLOCK_H */
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * wrapper/tracepoint.h
+ *
+ * wrapper around DECLARE_EVENT_CLASS.
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_WRAPPER_TRACEPOINT_H
+#define _LTTNG_WRAPPER_TRACEPOINT_H
+
+#include <linux/version.h>
+#include <linux/tracepoint.h>
+#include <linux/module.h>
+
+#ifndef HAVE_KABI_2635_TRACEPOINT
+
+#define kabi_2635_tracepoint_probe_register tracepoint_probe_register
+#define kabi_2635_tracepoint_probe_unregister tracepoint_probe_unregister
+
+#endif /* HAVE_KABI_2635_TRACEPOINT */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,15,0))
+
+#include <lttng/tracepoint.h>
+
+#define lttng_wrapper_tracepoint_probe_register lttng_tracepoint_probe_register
+#define lttng_wrapper_tracepoint_probe_unregister lttng_tracepoint_probe_unregister
+
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,15,0)) */
+
+#define lttng_wrapper_tracepoint_probe_register kabi_2635_tracepoint_probe_register
+#define lttng_wrapper_tracepoint_probe_unregister kabi_2635_tracepoint_probe_unregister
+
+static inline
+int lttng_tracepoint_init(void)
+{
+ return 0;
+}
+
+static inline
+void lttng_tracepoint_exit(void)
+{
+}
+
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,15,0)) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) && defined(CONFIG_MODULE_SIG))
+
+#include <linux/kallsyms.h>
+#include <wrapper/kallsyms.h>
+
+static inline
+int wrapper_tracepoint_module_notify(struct notifier_block *nb,
+ unsigned long val, struct module *mod)
+{
+ int (*tracepoint_module_notify_sym)(struct notifier_block *nb,
+ unsigned long val, struct module *mod);
+
+ tracepoint_module_notify_sym =
+ (void *) kallsyms_lookup_funcptr("tracepoint_module_notify");
+ if (tracepoint_module_notify_sym) {
+ return tracepoint_module_notify_sym(nb, val, mod);
+ } else {
+ printk_once(KERN_WARNING "LTTng: tracepoint_module_notify symbol lookup failed. It probably means you kernel don't need this work-around. Please consider upgrading LTTng modules to make this warning go away.\n");
+ return -ENOSYS;
+ }
+}
+
+#endif /* #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) && defined(CONFIG_MODULE_SIG)) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) && defined(CONFIG_MODULE_SIG) && defined(MODULE))
+
+static inline
+int wrapper_lttng_fixup_sig(struct module *mod)
+{
+ int ret = 0;
+
+ /*
+ * This is for module.c confusing force loaded modules with
+ * unsigned modules.
+ */
+ if (!THIS_MODULE->sig_ok &&
+ THIS_MODULE->taints & (1U << TAINT_FORCED_MODULE)) {
+ THIS_MODULE->taints &= ~(1U << TAINT_FORCED_MODULE);
+ ret = wrapper_tracepoint_module_notify(NULL,
+ MODULE_STATE_COMING, mod);
+ THIS_MODULE->taints |= (1U << TAINT_FORCED_MODULE);
+ }
+ return ret;
+}
+
+#else /* #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) && defined(CONFIG_MODULE_SIG) && defined(MODULE)) */
+
+static inline
+int wrapper_lttng_fixup_sig(struct module *mod)
+{
+ return 0;
+}
+
+#endif /* #else #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) && defined(CONFIG_MODULE_SIG) && defined(MODULE)) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0))
+static inline struct tracepoint *lttng_tracepoint_ptr_deref(tracepoint_ptr_t *p)
+{
+ return tracepoint_ptr_deref(p);
+}
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0)) */
+static inline struct tracepoint *lttng_tracepoint_ptr_deref(struct tracepoint * const *p)
+{
+ return *p;
+}
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0)) */
+
+#endif /* _LTTNG_WRAPPER_TRACEPOINT_H */
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * wrapper/types.h
+ *
+ * Copyright (C) 2017 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_WRAPPER_TYPES_H
+#define _LTTNG_WRAPPER_TYPES_H
+
+#define LTTNG_SIZE_MAX (~(size_t)0)
+
+#endif /* _LTTNG_WRAPPER_TYPES_H */
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * wrapper/uaccess.h
+ *
+ * wrapper around linux/uaccess.h.
+ *
+ * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
+ */
+
+#ifndef _LTTNG_WRAPPER_UACCESS_H
+#define _LTTNG_WRAPPER_UACCESS_H
+
+#include <linux/uaccess.h>
+#include <lttng/kernel-version.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0) || \
+ LTTNG_RHEL_KERNEL_RANGE(4,18,0,147,0,0, 4,19,0,0,0,0))
+
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+#define lttng_access_ok(type, addr, size) access_ok(addr, size)
+
+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0) */
+
+#define lttng_access_ok(type, addr, size) access_ok(type, addr, size)
+
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0) */
+
+#endif /* _LTTNG_WRAPPER_UACCESS_H */
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * wrapper/uprobes.h
+ *
+ * wrapper around uprobes. Using KALLSYMS to get its address when
+ * available, else we need to have a kernel that exports this function to GPL
+ * modules.
+ *
+ * Copyright (C) 2013 Yannick Brosseau <yannick.brosseau@gmail.com>
+ * Copyright (C) 2017 Francis Deslauriers <francis.deslauriers@efficios.com>
+ *
+ */
+
+#ifndef _LTTNG_WRAPPER_UPROBES_H
+#define _LTTNG_WRAPPER_UPROBES_H
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0))
+#include <linux/uprobes.h>
+
+/* Use kallsym lookup for version before 3.9. */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
+
+static inline
+int wrapper_uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
+{
+ return uprobe_register(inode, offset, uc);
+}
+
+static inline
+void wrapper_uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
+{
+ uprobe_unregister(inode, offset, uc);
+}
+
+#else /* Version < 3.9, use kallsym lookup. */
+#include "kallsyms.h"
+
+static inline
+int wrapper_uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
+{
+ int (*uprobe_register_sym)(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
+
+ uprobe_register_sym = (void *) kallsyms_lookup_funcptr("uprobe_register");
+
+ if (uprobe_register_sym) {
+ return uprobe_register_sym(inode, offset, uc);
+ } else {
+ printk(KERN_WARNING "LTTng: uprobe_register symbol lookup failed.\n");
+ return -EINVAL;
+ }
+}
+
+static inline
+void wrapper_uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
+{
+ int (*uprobe_unregister_sym)(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
+
+ uprobe_unregister_sym = (void *) kallsyms_lookup_funcptr("uprobe_unregister");
+
+ if (uprobe_unregister_sym) {
+ uprobe_unregister_sym(inode, offset, uc);
+ } else {
+ printk(KERN_WARNING "LTTng: uprobe_unregister symbol lookup failed.\n");
+ WARN_ON(1);
+ }
+}
+#endif
+#else
+/* Version < 3.5, before uprobe was added. */
+struct uprobe_consumer {};
+
+#endif
+#endif
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * wrapper/user_namespace.h
+ *
+ * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ */
+
+#ifndef _LTTNG_WRAPPER_USER_NAMESPACE_H
+#define _LTTNG_WRAPPER_USER_NAMESPACE_H
+
+#include <linux/version.h>
+#include <linux/user_namespace.h>
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
+
+#define lttng_current_xxuid(xxx) \
+ (from_kuid_munged(&init_user_ns, current_##xxx()))
+
+#define lttng_current_vxxuid(xxx) \
+ (from_kuid_munged(current_user_ns(), current_##xxx()))
+
+#define lttng_current_xxgid(xxx) \
+ (from_kgid_munged(&init_user_ns, current_##xxx()))
+
+#define lttng_current_vxxgid(xxx) \
+ (from_kgid_munged(current_user_ns(), current_##xxx()))
+
+static inline
+uid_t lttng_task_vuid(struct task_struct *p, struct user_namespace *ns)
+{
+ uid_t uid;
+ kuid_t kuid;
+
+ kuid = task_cred_xxx(p, uid);
+ uid = from_kuid_munged(ns, kuid);
+
+ return uid;
+}
+
+static inline
+gid_t lttng_task_vgid(struct task_struct *p, struct user_namespace *ns)
+{
+ gid_t gid;
+ kgid_t kgid;
+
+ kgid = task_cred_xxx(p, gid);
+ gid = from_kgid_munged(ns, kgid);
+
+ return gid;
+}
+
+#else
+
+#define lttng_current_xxuid(xxx) (current_##xxx())
+
+#define lttng_current_vxxuid(xxx) \
+ (user_ns_map_uid(current_user_ns(), current_cred(), current_##xxx()))
+
+#define lttng_current_xxgid(xxx) (current_##xxx())
+
+#define lttng_current_vxxgid(xxx) \
+ (user_ns_map_gid(current_user_ns(), current_cred(), current_##xxx()))
+
+static inline
+uid_t lttng_task_vuid(struct task_struct *p, struct user_namespace *ns)
+{
+ uid_t uid;
+
+ /*
+ * __task_cred requires the RCU readlock be held
+ */
+ rcu_read_lock();
+ uid = user_ns_map_uid(ns, __task_cred(p), __task_cred(p)->uid);
+ rcu_read_unlock();
+
+ return uid;
+}
+
+static inline
+gid_t lttng_task_vgid(struct task_struct *p, struct user_namespace *ns)
+{
+ gid_t gid;
+
+ /*
+ * __task_cred requires the RCU readlock be held
+ */
+ rcu_read_lock();
+ gid = user_ns_map_gid(ns, __task_cred(p), __task_cred(p)->gid);
+ rcu_read_unlock();
+
+ return gid;
+}
+
+#endif
+
+#define lttng_current_uid() (lttng_current_xxuid(uid))
+#define lttng_current_euid() (lttng_current_xxuid(euid))
+#define lttng_current_suid() (lttng_current_xxuid(suid))
+#define lttng_current_fsuid() (lttng_current_xxuid(fsuid))
+#define lttng_current_gid() (lttng_current_xxgid(gid))
+#define lttng_current_egid() (lttng_current_xxgid(egid))
+#define lttng_current_sgid() (lttng_current_xxgid(sgid))
+#define lttng_current_fsgid() (lttng_current_xxgid(fsgid))
+
+#define lttng_current_vuid() (lttng_current_vxxuid(uid))
+#define lttng_current_veuid() (lttng_current_vxxuid(euid))
+#define lttng_current_vsuid() (lttng_current_vxxuid(suid))
+#define lttng_current_vfsuid() (lttng_current_vxxuid(fsuid))
+#define lttng_current_vgid() (lttng_current_vxxgid(gid))
+#define lttng_current_vegid() (lttng_current_vxxgid(egid))
+#define lttng_current_vsgid() (lttng_current_vxxgid(sgid))
+#define lttng_current_vfsgid() (lttng_current_vxxgid(fsgid))
+
+#endif /* _LTTNG_WRAPPER_USER_NAMESPACE_H */
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * wrapper/uuid.h
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_WRAPPER_UUID_H
+#define _LTTNG_WRAPPER_UUID_H
+
+#include <linux/version.h>
+#include <linux/uuid.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0))
+static inline
+void lttng_guid_gen(guid_t *u)
+{
+ return guid_gen(u);
+}
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)) */
+
+typedef uuid_le guid_t;
+
+static inline
+void lttng_guid_gen(guid_t *u)
+{
+ return uuid_le_gen(u);
+}
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)) */
+
+#endif /* _LTTNG_WRAPPER_UUID_H */
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * wrapper/vmalloc.h
+ *
+ * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when
+ * available, else we need to have a kernel that exports this function to GPL
+ * modules.
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_WRAPPER_VMALLOC_H
+#define _LTTNG_WRAPPER_VMALLOC_H
+
+#include <linux/version.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+
+#ifdef CONFIG_KALLSYMS
+
+#include <linux/kallsyms.h>
+#include <wrapper/kallsyms.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0))
+
+static inline
+void wrapper_vmalloc_sync_mappings(void)
+{
+ void (*vmalloc_sync_mappings_sym)(void);
+
+ vmalloc_sync_mappings_sym = (void *) kallsyms_lookup_funcptr("vmalloc_sync_mappings");
+ if (vmalloc_sync_mappings_sym) {
+ vmalloc_sync_mappings_sym();
+ } else {
+#ifdef CONFIG_X86
+ /*
+ * Only x86 needs vmalloc_sync_mappings to make sure LTTng does not
+ * trigger recursive page faults.
+ */
+ printk_once(KERN_WARNING "LTTng: vmalloc_sync_mappings symbol lookup failed.\n");
+ printk_once(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n");
+#endif
+ }
+}
+
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)) */
+
+/*
+ * Map vmalloc_sync_mappings to vmalloc_sync_all() on kernels before 5.7.
+ */
+static inline
+void wrapper_vmalloc_sync_mappings(void)
+{
+ void (*vmalloc_sync_all_sym)(void);
+
+ vmalloc_sync_all_sym = (void *) kallsyms_lookup_funcptr("vmalloc_sync_all");
+ if (vmalloc_sync_all_sym) {
+ vmalloc_sync_all_sym();
+ } else {
+#ifdef CONFIG_X86
+ /*
+ * Only x86 needs vmalloc_sync_all to make sure LTTng does not
+ * trigger recursive page faults.
+ */
+ printk_once(KERN_WARNING "LTTng: vmalloc_sync_all symbol lookup failed.\n");
+ printk_once(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n");
+#endif
+ }
+}
+
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)) */
+
+#else
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0))
+
+static inline
+void wrapper_vmalloc_sync_mappings(void)
+{
+ return vmalloc_sync_mappings();
+}
+
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)) */
+
+static inline
+void wrapper_vmalloc_sync_mappings(void)
+{
+ return vmalloc_sync_all();
+}
+
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)) */
+
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0))
+static inline
+void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node)
+{
+ void *ret;
+
+ ret = kvmalloc_node(size, flags, node);
+ if (is_vmalloc_addr(ret)) {
+ /*
+ * Make sure we don't trigger recursive page faults in the
+ * tracing fast path.
+ */
+ wrapper_vmalloc_sync_mappings();
+ }
+ return ret;
+}
+
+static inline
+void *lttng_kvzalloc_node(unsigned long size, gfp_t flags, int node)
+{
+ return lttng_kvmalloc_node(size, flags | __GFP_ZERO, node);
+}
+
+static inline
+void *lttng_kvmalloc(unsigned long size, gfp_t flags)
+{
+ return lttng_kvmalloc_node(size, flags, NUMA_NO_NODE);
+}
+
+static inline
+void *lttng_kvzalloc(unsigned long size, gfp_t flags)
+{
+ return lttng_kvzalloc_node(size, flags, NUMA_NO_NODE);
+}
+
+static inline
+void lttng_kvfree(const void *addr)
+{
+ kvfree(addr);
+}
+
+#else
+
+#include <linux/slab.h>
+
+static inline
+void print_vmalloc_node_range_warning(void)
+{
+ printk_once(KERN_WARNING "LTTng: __vmalloc_node_range symbol lookup failed.\n");
+ printk_once(KERN_WARNING "Tracer performance will be degraded on NUMA systems.\n");
+ printk_once(KERN_WARNING "Please rebuild your kernel with CONFIG_KALLSYMS enabled.\n");
+}
+
+/*
+ * kallsyms wrapper of __vmalloc_node with a fallback to kmalloc_node.
+ */
+static inline
+void *__lttng_vmalloc_node_range(unsigned long size, unsigned long align,
+ unsigned long start, unsigned long end, gfp_t gfp_mask,
+ pgprot_t prot, unsigned long vm_flags, int node,
+ const void *caller)
+{
+#ifdef CONFIG_KALLSYMS
+ /*
+ * If we have KALLSYMS, get * __vmalloc_node_range which is not exported.
+ */
+ void *(*lttng__vmalloc_node_range)(unsigned long size, unsigned long align,
+ unsigned long start, unsigned long end, gfp_t gfp_mask,
+ pgprot_t prot, unsigned long vm_flags, int node,
+ const void *caller);
+
+ lttng__vmalloc_node_range = (void *) kallsyms_lookup_funcptr("__vmalloc_node_range");
+ if (lttng__vmalloc_node_range)
+ return lttng__vmalloc_node_range(size, align, start, end, gfp_mask, prot,
+ vm_flags, node, caller);
+#endif
+ if (node != NUMA_NO_NODE)
+ print_vmalloc_node_range_warning();
+ return __vmalloc(size, gfp_mask, prot);
+}
+
+/**
+ * lttng_kvmalloc_node - attempt to allocate physically contiguous memory, but upon
+ * failure, fall back to non-contiguous (vmalloc) allocation.
+ * @size: size of the request.
+ * @flags: gfp mask for the allocation - must be compatible with GFP_KERNEL.
+ *
+ * Uses kmalloc to get the memory but if the allocation fails then falls back
+ * to the vmalloc allocator. Use lttng_kvfree to free the memory.
+ *
+ * Reclaim modifiers - __GFP_NORETRY, __GFP_REPEAT and __GFP_NOFAIL are not supported
+ */
+static inline
+void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node)
+{
+ void *ret;
+
+ /*
+ * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
+ * so the given set of flags has to be compatible.
+ */
+ WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL);
+
+ /*
+ * If the allocation fits in a single page, do not fallback.
+ */
+ if (size <= PAGE_SIZE) {
+ return kmalloc_node(size, flags, node);
+ }
+
+ /*
+ * Make sure that larger requests are not too disruptive - no OOM
+ * killer and no allocation failure warnings as we have a fallback
+ */
+ ret = kmalloc_node(size, flags | __GFP_NOWARN | __GFP_NORETRY, node);
+ if (!ret) {
+ ret = __lttng_vmalloc_node_range(size, 1,
+ VMALLOC_START, VMALLOC_END,
+ flags | __GFP_HIGHMEM, PAGE_KERNEL, 0,
+ node, __builtin_return_address(0));
+ /*
+ * Make sure we don't trigger recursive page faults in the
+ * tracing fast path.
+ */
+ wrapper_vmalloc_sync_mappings();
+ }
+ return ret;
+}
+
+static inline
+void *lttng_kvzalloc_node(unsigned long size, gfp_t flags, int node)
+{
+ return lttng_kvmalloc_node(size, flags | __GFP_ZERO, node);
+}
+
+static inline
+void *lttng_kvmalloc(unsigned long size, gfp_t flags)
+{
+ return lttng_kvmalloc_node(size, flags, NUMA_NO_NODE);
+}
+
+static inline
+void *lttng_kvzalloc(unsigned long size, gfp_t flags)
+{
+ return lttng_kvzalloc_node(size, flags, NUMA_NO_NODE);
+}
+
+static inline
+void lttng_kvfree(const void *addr)
+{
+ if (is_vmalloc_addr(addr)) {
+ vfree(addr);
+ } else {
+ kfree(addr);
+ }
+}
+#endif
+
+#endif /* _LTTNG_WRAPPER_VMALLOC_H */
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * wrapper/writeback.h
+ *
+ * wrapper around global_dirty_limit read. Using KALLSYMS with KALLSYMS_ALL
+ * to get its address when available, else we need to have a kernel that
+ * exports this variable to GPL modules.
+ *
+ * Copyright (C) 2013 Mentor Graphics Corp.
+ */
+
+#ifndef _LTTNG_WRAPPER_WRITEBACK_H
+#define _LTTNG_WRAPPER_WRITEBACK_H
+
+#include <lttng/kernel-version.h>
+
+#ifdef CONFIG_KALLSYMS_ALL
+#include <linux/kallsyms.h>
+#include <wrapper/kallsyms.h>
+
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0)
+
+static struct wb_domain *global_wb_domain_sym;
+
+static inline
+unsigned long wrapper_global_dirty_limit(void)
+{
+ if (!global_wb_domain_sym)
+ global_wb_domain_sym =
+ (void *) kallsyms_lookup_dataptr("global_wb_domain");
+ if (global_wb_domain_sym) {
+ return global_wb_domain_sym->dirty_limit;
+ } else {
+ printk_once(KERN_WARNING "LTTng: global_wb_domain symbol lookup failed.\n");
+ return 0;
+ }
+}
+#else
+
+static unsigned long *global_dirty_limit_sym;
+
+static inline
+unsigned long wrapper_global_dirty_limit(void)
+{
+ if (!global_dirty_limit_sym)
+ global_dirty_limit_sym =
+ (void *) kallsyms_lookup_dataptr("global_dirty_limit");
+ if (global_dirty_limit_sym) {
+ return *global_dirty_limit_sym;
+ } else {
+ printk_once(KERN_WARNING "LTTng: global_dirty_limit symbol lookup failed.\n");
+ return 0;
+ }
+}
+#endif
+
+#else /* CONFIG_KALLSYMS_ALL */
+
+#include <linux/writeback.h>
+
+static inline
+unsigned long wrapper_global_dirty_limit(void)
+{
+ return global_dirty_limit;
+}
+
+#endif
+
+#endif /* _LTTNG_WRAPPER_WRITEBACK_H */
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
- *
- * wrapper/atomic.h
- *
- * wrapper around linux/atomic.h.
- *
- * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_WRAPPER_ATOMIC_H
-#define _LTTNG_WRAPPER_ATOMIC_H
-
-#include <linux/version.h>
-#include <linux/atomic.h>
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0))
-static inline void lttng_smp_mb__before_atomic(void)
-{
- smp_mb__before_atomic();
-}
-
-static inline void lttng_smp_mb__after_atomic(void)
-{
- smp_mb__after_atomic();
-}
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0)) */
-static inline void lttng_smp_mb__before_atomic(void)
-{
- smp_mb__before_atomic_inc();
-}
-
-static inline void lttng_smp_mb__after_atomic(void)
-{
- smp_mb__after_atomic_inc();
-}
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0)) */
-
-#endif /* _LTTNG_WRAPPER_ATOMIC_H */
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * wrapper/compiler.h
- *
- * Copyright (C) 2013 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_WRAPPER_COMPILER_H
-#define _LTTNG_WRAPPER_COMPILER_H
-
-#include <linux/compiler.h>
-
-/*
- * Don't allow compiling with buggy compiler.
- */
-
-#ifdef GCC_VERSION
-
-/*
- * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58854
- */
-# ifdef __ARMEL__
-# if GCC_VERSION >= 40800 && GCC_VERSION <= 40802
-# error Your gcc version produces clobbered frame accesses
-# endif
-# endif
-#endif
-
-/*
- * READ/WRITE_ONCE were introduced in kernel 3.19 and ACCESS_ONCE
- * was removed in 4.15. Prefer READ/WRITE but fallback to ACCESS
- * when they are not available.
- */
-#ifndef READ_ONCE
-# define READ_ONCE(x) ACCESS_ONCE(x)
-#endif
-
-#ifndef WRITE_ONCE
-# define WRITE_ONCE(x, val) ({ ACCESS_ONCE(x) = val; })
-#endif
-
-#define __LTTNG_COMPOUND_LITERAL(type, ...) (type[]) { __VA_ARGS__ }
-
-#endif /* _LTTNG_WRAPPER_COMPILER_H */
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * wrapper/fdtable.h
- *
- * Copyright (C) 2013 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_WRAPPER_FDTABLE_H
-#define _LTTNG_WRAPPER_FDTABLE_H
-
-#include <linux/version.h>
-#include <linux/fdtable.h>
-
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
-
-int lttng_iterate_fd(struct files_struct *files,
- unsigned int first,
- int (*cb)(const void *, struct file *, unsigned int),
- const void *ctx);
-
-#else
-
-/*
- * iterate_fd() appeared at commit
- * c3c073f808b22dfae15ef8412b6f7b998644139a in the Linux kernel (first
- * released kernel: v3.7).
- */
-#define lttng_iterate_fd iterate_fd
-
-#endif
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
-
-static inline bool lttng_close_on_exec(int fd, const struct fdtable *fdt)
-{
- return close_on_exec(fd, fdt);
-}
-
-#else
-
-static inline bool lttng_close_on_exec(int fd, const struct fdtable *fdt)
-{
- return FD_ISSET(fd, fdt->close_on_exec);
-}
-
-#endif
-
-#endif /* _LTTNG_WRAPPER_FDTABLE_H */
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * wrapper/file.h
- *
- * wrapper around linux/file.h.
- *
- * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_WRAPPER_FILE_H
-#define _LTTNG_WRAPPER_FILE_H
-
-#include <linux/version.h>
-#include <linux/file.h>
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0)
-
-static
-inline int lttng_get_unused_fd(void)
-{
- return get_unused_fd_flags(0);
-}
-
-#define lttng_f_dentry f_path.dentry
-
-#else /* #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0) */
-
-static
-inline int lttng_get_unused_fd(void)
-{
- return get_unused_fd();
-}
-
-#define lttng_f_dentry f_dentry
-
-#endif /* #else #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0) */
-
-#endif /* _LTTNG_WRAPPER_FILE_H */
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * wrapper/frame.h
- *
- * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_WRAPPER_FRAME_H
-#define _LTTNG_WRAPPER_FRAME_H
-
-#include <linux/version.h>
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0))
-
-#include <linux/frame.h>
-
-#define LTTNG_STACK_FRAME_NON_STANDARD(func) \
- STACK_FRAME_NON_STANDARD(func)
-
-#else
-
-#define LTTNG_STACK_FRAME_NON_STANDARD(func)
-
-#endif
-
-#endif /* _LTTNG_WRAPPER_FRAME_H */
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * wrapper/genhd.h
- *
- * wrapper around block layer functions and data structures. Using
- * KALLSYMS to get its address when available, else we need to have a
- * kernel that exports this function to GPL modules.
- *
- * Copyright (C) 2011-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_WRAPPER_GENHD_H
-#define _LTTNG_WRAPPER_GENHD_H
-
-#include <linux/genhd.h>
-
-#ifdef CONFIG_KALLSYMS
-
-#include <linux/kallsyms.h>
-#include <wrapper/kallsyms.h>
-
-static inline
-char *wrapper_disk_name(struct gendisk *hd, int partno, char *buf)
-{
- char *(*disk_name_sym)(struct gendisk *hd, int partno, char *buf);
-
- disk_name_sym = (void *) kallsyms_lookup_funcptr("disk_name");
- if (disk_name_sym) {
- return disk_name_sym(hd, partno, buf);
- } else {
- printk_once(KERN_WARNING "LTTng: disk_name symbol lookup failed.\n");
- return NULL;
- }
-}
-
-#else
-
-static inline
-char *wrapper_disk_name(struct gendisk *hd, int partno, char *buf)
-{
- return disk_name(hd, partno, buf);
-}
-
-#endif
-
-#ifdef CONFIG_KALLSYMS_ALL
-
-static inline
-struct class *wrapper_get_block_class(void)
-{
- struct class *ptr_block_class;
-
- ptr_block_class = (struct class *) kallsyms_lookup_dataptr("block_class");
- if (!ptr_block_class) {
- printk_once(KERN_WARNING "LTTng: block_class symbol lookup failed.\n");
- return NULL;
- }
- return ptr_block_class;
-}
-
-static inline
-struct device_type *wrapper_get_disk_type(void)
-{
- struct device_type *ptr_disk_type;
-
- ptr_disk_type = (struct device_type *) kallsyms_lookup_dataptr("disk_type");
- if (!ptr_disk_type) {
- printk_once(KERN_WARNING "LTTng: disk_type symbol lookup failed.\n");
- return NULL;
- }
- return ptr_disk_type;
-}
-
-#else
-
-static inline
-struct class *wrapper_get_block_class(void)
-{
- /*
- * Symbol block_class is not exported.
- * TODO: return &block_class;
- */
- /* Feature currently unavailable without KALLSYMS_ALL */
- return NULL;
-}
-
-static inline
-struct device_type *wrapper_get_disk_type(void)
-{
- /*
- * Symbol disk_type is not exported.
- * TODO: return &disk_type;
- */
- /* Feature currently unavailable without KALLSYMS_ALL */
- return NULL;
-}
-
-#endif
-
-#endif /* _LTTNG_WRAPPER_GENHD_H */
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * wrapper/inline_memcpy.h
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#if !defined(__HAVE_ARCH_INLINE_MEMCPY) && !defined(inline_memcpy)
-#define inline_memcpy memcpy
-#endif
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * wrapper/irq.h
- *
- * wrapper around linux/irq.h.
- *
- * Copyright (C) 2013 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_WRAPPER_IRQ_H
-#define _LTTNG_WRAPPER_IRQ_H
-
-#include <linux/version.h>
-
-/*
- * Starting from the 3.12 Linux kernel, all architectures use the
- * generic hard irqs system. More details can be seen at commit
- * 0244ad004a54e39308d495fee0a2e637f8b5c317 in the Linux kernel GIT.
- */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0) \
- || defined(CONFIG_GENERIC_HARDIRQS))
-# define CONFIG_LTTNG_HAS_LIST_IRQ
-#endif
-
-#endif /* _LTTNG_WRAPPER_IRQ_H */
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * wrapper/irqdesc.h
- *
- * wrapper around irq_to_desc. Using KALLSYMS to get its address when
- * available, else we need to have a kernel that exports this function to GPL
- * modules.
- *
- * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_WRAPPER_IRQDESC_H
-#define _LTTNG_WRAPPER_IRQDESC_H
-
-#include <linux/interrupt.h>
-#include <linux/irqnr.h>
-
-struct irq_desc *wrapper_irq_to_desc(unsigned int irq);
-
-#endif /* _LTTNG_WRAPPER_IRQDESC_H */
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * wrapper/irqflags.h
- *
- * wrapper around IRQ flags.
- *
- * Copyright (C) 2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_WRAPPER_IRQFLAGS_H
-#define _LTTNG_WRAPPER_IRQFLAGS_H
-
-#include <linux/version.h>
-#include <linux/irqflags.h>
-
-#ifdef CONFIG_X86
-
-static inline
-int lttng_regs_irqs_disabled(struct pt_regs *regs)
-{
- unsigned long flags = regs->flags;
-
- return raw_irqs_disabled_flags(flags);
-}
-
-#else
-/*
- * lttng_regs_irqs_disabled() returns -1 if irqoff state is unknown.
- * TODO: should implement lttng_regs_irqs_disabled for each architecture
- * to add interruptible context for kprobes and kretprobes.
- */
-
-static inline
-int lttng_regs_irqs_disabled(struct pt_regs *regs)
-{
- return -1;
-}
-#endif
-
-#endif /* _LTTNG_WRAPPER_IRQFLAGS_H */
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * wrapper/kallsyms.h
- *
- * wrapper around kallsyms_lookup_name. Implements arch-dependent code for
- * arches where the address of the start of the function body is different
- * from the pointer which can be used to call the function, e.g. ARM THUMB2.
- *
- * Copyright (C) 2011 Avik Sil (avik.sil@linaro.org)
- * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_WRAPPER_KALLSYMS_H
-#define _LTTNG_WRAPPER_KALLSYMS_H
-
-#include <linux/kallsyms.h>
-#include <linux/version.h>
-
-/*
- * PowerPC ABIv1 needs KALLSYMS_ALL to get the function descriptor,
- * which is needed to perform the function call.
- */
-#if defined(CONFIG_PPC64) && (!defined(_CALL_ELF) || _CALL_ELF < 2)
-# ifndef CONFIG_KALLSYMS_ALL
-# error "LTTng-modules requires CONFIG_KALLSYMS_ALL on PowerPC ABIv1"
-# endif
-#endif
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0))
-
-unsigned long wrapper_kallsyms_lookup_name(const char *name);
-
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)) */
-
-static inline
-unsigned long wrapper_kallsyms_lookup_name(const char *name)
-{
- return kallsyms_lookup_name(name);
-}
-
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)) */
-
-static inline
-unsigned long kallsyms_lookup_funcptr(const char *name)
-{
- unsigned long addr;
-
- addr = wrapper_kallsyms_lookup_name(name);
-#ifdef CONFIG_ARM
-#ifdef CONFIG_THUMB2_KERNEL
- if (addr)
- addr |= 1; /* set bit 0 in address for thumb mode */
-#endif
-#endif
- return addr;
-}
-
-static inline
-unsigned long kallsyms_lookup_dataptr(const char *name)
-{
- return wrapper_kallsyms_lookup_name(name);
-}
-
-#endif /* _LTTNG_WRAPPER_KALLSYMS_H */
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0-only
- *
- * wrapper/kref.h
- *
- * wrapper around linux/kref.h.
- *
- * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * This wrapper code is derived from Linux 3.19.2 include/linux/list.h
- * and include/linux/rculist.h, hence the GPLv2 license applied to this
- * file.
- */
-
-#ifndef _LTTNG_WRAPPER_KREF_H
-#define _LTTNG_WRAPPER_KREF_H
-
-#include <linux/kref.h>
-#include <linux/rculist.h>
-#include <linux/version.h>
-
-/*
- * lttng_kref_get: get reference count, checking for overflow.
- *
- * Return 1 if reference is taken, 0 otherwise (overflow).
- */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0))
-static inline int lttng_kref_get(struct kref *kref)
-{
- kref_get(kref);
- return 1;
-}
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0)) */
-static inline int lttng_kref_get(struct kref *kref)
-{
- return atomic_add_unless(&kref->refcount, 1, INT_MAX);
-}
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0)) */
-
-#endif /* _LTTNG_WRAPPER_KREF_H */
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0-only
- *
- * wrapper/list.h
- *
- * wrapper around linux/list.h.
- *
- * Copyright (C) 2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * This wrapper code is derived from Linux 3.19.2 include/linux/list.h
- * and include/linux/rculist.h, hence the GPLv2 license applied to this
- * file.
- */
-
-#ifndef _LTTNG_WRAPPER_LIST_H
-#define _LTTNG_WRAPPER_LIST_H
-
-#include <linux/list.h>
-#include <linux/rculist.h>
-
-/*
- * return the first or the next element in an RCU protected hlist
- */
-#define lttng_hlist_first_rcu(head) (*((struct hlist_node __rcu **)(&(head)->first)))
-#define lttng_hlist_next_rcu(node) (*((struct hlist_node __rcu **)(&(node)->next)))
-#define lttng_hlist_pprev_rcu(node) (*((struct hlist_node __rcu **)((node)->pprev)))
-
-#define lttng_hlist_entry_safe(ptr, type, member) \
- ({ typeof(ptr) ____ptr = (ptr); \
- ____ptr ? hlist_entry(____ptr, type, member) : NULL; \
- })
-
-/**
- * lttng_hlist_for_each_entry - iterate over list of given type
- * @pos: the type * to use as a loop cursor.
- * @head: the head for your list.
- * @member: the name of the hlist_node within the struct.
- */
-#define lttng_hlist_for_each_entry(pos, head, member) \
- for (pos = lttng_hlist_entry_safe((head)->first, typeof(*(pos)), member);\
- pos; \
- pos = lttng_hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
-
-/**
- * lttng_hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
- * @pos: the type * to use as a loop cursor.
- * @n: another &struct hlist_node to use as temporary storage
- * @head: the head for your list.
- * @member: the name of the hlist_node within the struct.
- */
-#define lttng_hlist_for_each_entry_safe(pos, n, head, member) \
- for (pos = lttng_hlist_entry_safe((head)->first, typeof(*pos), member);\
- pos && ({ n = pos->member.next; 1; }); \
- pos = lttng_hlist_entry_safe(n, typeof(*pos), member))
-
-#endif /* _LTTNG_WRAPPER_LIST_H */
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * wrapper/mm.h
- *
- * Copyright (C) 2018 Francis Deslauriers <francis.deslauriers@efficios.com>
- */
-
-#ifndef _LTTNG_WRAPPER_MM_H
-#define _LTTNG_WRAPPER_MM_H
-
-#include <linux/mm.h>
-#include <linux/oom.h>
-
-#include <lttng/kernel-version.h>
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0) \
- || LTTNG_UBUNTU_KERNEL_RANGE(4,4,25,44, 4,5,0,0))
-
-/*
- * Returns true if the current estimation of the number of page available is
- * larger than the number of pages passed as parameter.
- */
-static inline
-bool wrapper_check_enough_free_pages(unsigned long num_pages)
-{
- return num_pages < si_mem_available();
-}
-
-#else
-
-static inline
-bool wrapper_check_enough_free_pages(unsigned long num_pages)
-{
- /*
- * The si_mem_available function is not available on this kernel. Since
- * we can't reliably know if there is enough memory available, so we
- * return true.
- */
- return true;
-}
-#endif
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
-static inline
-void wrapper_set_current_oom_origin(void)
-{
- return set_current_oom_origin();
-}
-
-static inline
-void wrapper_clear_current_oom_origin(void)
-{
- return clear_current_oom_origin();
-}
-
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)) */
-
-static inline
-void wrapper_set_current_oom_origin(void)
-{
- return;
-}
-
-static inline
-void wrapper_clear_current_oom_origin(void)
-{
- return;
-}
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)) */
-#endif /* _LTTNG_WRAPPER_MM_H */
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * wrapper/namespace.h
- *
- * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * 2019 Michael Jeanson <mjeanson@efficios.com>
- *
- */
-
-#ifndef _LTTNG_WRAPPER_NAMESPACE_H
-#define _LTTNG_WRAPPER_NAMESPACE_H
-
-#include <linux/version.h>
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0))
-#define lttng_ns_inum ns.inum
-#else
-#define lttng_ns_inum proc_inum
-#endif
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0))
-#define lttng_user_ns_parent parent
-#else
-#define lttng_user_ns_parent creator->user_ns
-#endif
-
-#endif /* _LTTNG_WRAPPER_NAMESPACE_H */
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * wrapper/page_alloc.h
- *
- * wrapper around get_pfnblock_flags_mask. Using KALLSYMS to get its address
- * when available, else we need to have a kernel that exports this function to
- * GPL modules.
- *
- * Copyright (C) 2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_WRAPPER_PAGE_ALLOC_H
-#define _LTTNG_WRAPPER_PAGE_ALLOC_H
-
-#include <lttng/kernel-version.h>
-
-/*
- * We need to redefine get_pfnblock_flags_mask to our wrapper, because
- * the get_pageblock_migratetype() macro uses it.
- */
-#if (defined(CONFIG_KALLSYMS) \
- && (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,2) \
- || LTTNG_KERNEL_RANGE(3,14,36, 3,15,0) \
- || LTTNG_KERNEL_RANGE(3,18,10, 3,19,0) \
- || LTTNG_DEBIAN_KERNEL_RANGE(3,16,7,9,0,0, 3,17,0,0,0,0) \
- || LTTNG_UBUNTU_KERNEL_RANGE(3,16,7,34, 3,17,0,0)))
-
-#define get_pfnblock_flags_mask wrapper_get_pfnblock_flags_mask
-
-#include <linux/mm_types.h>
-
-int wrapper_get_pfnblock_flags_mask_init(void);
-
-#else
-
-#include <linux/mm_types.h>
-
-static inline
-int wrapper_get_pfnblock_flags_mask_init(void)
-{
- return 0;
-}
-
-#endif
-
-/*
- * For a specific range of Ubuntu 3.13 kernels, we need to redefine
- * get_pageblock_flags_mask to our wrapper, because the
- * get_pageblock_migratetype() macro uses it. This function has been
- * introduced into mainline within commit
- * e58469bafd0524e848c3733bc3918d854595e20f, but never actually showed
- * up in a stable kernel version, since it has been changed by commit
- * dc4b0caff24d9b2918e9f27bc65499ee63187eba. Since Ubuntu chose to only
- * backport the former commit but not the latter, we need to do a
- * special case to cover this.
- */
-#if (defined(CONFIG_KALLSYMS) \
- && LTTNG_UBUNTU_KERNEL_RANGE(3,13,11,50, 3,14,0,0))
-
-#define get_pageblock_flags_mask wrapper_get_pageblock_flags_mask
-
-#include <linux/mm_types.h>
-
-int wrapper_get_pageblock_flags_mask_init(void);
-
-#else
-
-#include <linux/mm_types.h>
-
-static inline
-int wrapper_get_pageblock_flags_mask_init(void)
-{
- return 0;
-}
-
-#endif
-
-#endif /* _LTTNG_WRAPPER_PAGE_ALLOC_H */
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * wrapper/percpu-defs.h
- *
- * wrapper around linux/percpu-defs.h.
- *
- * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_WRAPPER_PERCPU_DEFS_H
-#define _LTTNG_WRAPPER_PERCPU_DEFS_H
-
-#include <linux/version.h>
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0)
-
-#include <linux/percpu-defs.h>
-
-#define lttng_this_cpu_ptr(ptr) this_cpu_ptr(ptr)
-
-#else /* #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0) */
-
-#include <linux/percpu.h>
-
-#define lttng_this_cpu_ptr(ptr) (&__get_cpu_var(*(ptr)))
-
-#endif /* #else #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0) */
-
-#endif /* _LTTNG_WRAPPER_PERCPU_DEFS_H */
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * wrapper/perf.h
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_WRAPPER_PERF_H
-#define _LTTNG_WRAPPER_PERF_H
-
-#include <linux/perf_event.h>
-
-#ifdef CONFIG_PERF_EVENTS
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))
-static inline struct perf_event *
-wrapper_perf_event_create_kernel_counter(struct perf_event_attr *attr,
- int cpu,
- struct task_struct *task,
- perf_overflow_handler_t callback)
-{
- return perf_event_create_kernel_counter(attr, cpu, task, callback, NULL);
-}
-#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0)) */
-static inline struct perf_event *
-wrapper_perf_event_create_kernel_counter(struct perf_event_attr *attr,
- int cpu,
- struct task_struct *task,
- perf_overflow_handler_t callback)
-{
- return perf_event_create_kernel_counter(attr, cpu, task, callback);
-}
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0)) */
-
-#endif /* CONFIG_PERF_EVENTS */
-
-#endif /* _LTTNG_WRAPPER_PERF_H */
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * wrapper/poll.h
- *
- * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_WRAPPER_POLL_H
-#define _LTTNG_WRAPPER_POLL_H
-
-#include <linux/poll.h>
-
-/*
- * Note: poll_wait_set_exclusive() is defined as no-op. Thundering herd
- * effect can be noticed with large number of consumer threads.
- */
-
-#define poll_wait_set_exclusive(poll_table)
-
-#endif /* _LTTNG_WRAPPER_POLL_H */
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
- *
- * wrapper/random.h
- *
- * wrapper around bootid read. Read the boot id through the /proc filesystem.
- *
- * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_WRAPPER_RANDOM_H
-#define _LTTNG_WRAPPER_RANDOM_H
-
-#include <lttng/clock.h>
-
-#define BOOT_ID_LEN LTTNG_MODULES_UUID_STR_LEN
-
-int wrapper_get_bootid(char *bootid);
-
-#endif /* _LTTNG_WRAPPER_RANDOM_H */
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * wrapper/rcu.h
- *
- * wrapper around linux/rcupdate.h and linux/rculist.h.
- *
- * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_WRAPPER_RCU_H
-#define _LTTNG_WRAPPER_RCU_H
-
-#include <linux/version.h>
-#include <linux/rculist.h>
-#include <linux/rcupdate.h>
-#include <wrapper/list.h>
-
-#ifndef rcu_dereference_raw_notrace
-#define rcu_dereference_raw_notrace(p) rcu_dereference_raw(p)
-#endif
-
-#define lttng_rcu_dereference(p) rcu_dereference_raw_notrace(p)
-
-/**
- * lttng_list_entry_rcu - get the struct for this entry
- * @ptr: the &struct list_head pointer.
- * @type: the type of the struct this is embedded in.
- * @member: the name of the list_head within the struct.
- *
- * This primitive may safely run concurrently with the _rcu list-mutation
- * primitives such as list_add_rcu() as long as it's guarded by
- * rcu_read_lock_sched().
- * Can be used while tracing RCU.
- */
-#define lttng_list_entry_rcu(ptr, type, member) \
-({ \
- typeof(*ptr) __rcu *__ptr = (typeof(*ptr) __rcu __force *)ptr; \
- container_of((typeof(ptr))lttng_rcu_dereference(__ptr), type, member); \
-})
-
-/**
- * lttng_list_for_each_entry_rcu - iterate over rcu list of given type
- * @pos: the type * to use as a loop cursor.
- * @head: the head for your list.
- * @member: the name of the list_head within the struct.
- *
- * This list-traversal primitive may safely run concurrently with
- * the _rcu list-mutation primitives such as list_add_rcu()
- * as long as the traversal is guarded by rcu_read_lock_sched().
- * Can be used while tracing RCU.
- */
-#define lttng_list_for_each_entry_rcu(pos, head, member) \
- for (pos = lttng_list_entry_rcu((head)->next, typeof(*pos), member); \
- &pos->member != (head); \
- pos = lttng_list_entry_rcu(pos->member.next, typeof(*pos), member))
-
-/**
- * lttng_hlist_for_each_entry_rcu - iterate over rcu list of given type (for tracing)
- * @pos: the type * to use as a loop cursor.
- * @head: the head for your list.
- * @member: the name of the hlist_node within the struct.
- *
- * This list-traversal primitive may safely run concurrently with
- * the _rcu list-mutation primitives such as hlist_add_head_rcu()
- * as long as the traversal is guarded by rcu_read_lock().
- *
- * This is the same as hlist_for_each_entry_rcu() except that it does
- * not do any RCU debugging or tracing.
- */
-#define lttng_hlist_for_each_entry_rcu(pos, head, member) \
- for (pos = lttng_hlist_entry_safe (lttng_rcu_dereference(lttng_hlist_first_rcu(head)), \
- typeof(*(pos)), member); \
- pos; \
- pos = lttng_hlist_entry_safe(lttng_rcu_dereference(lttng_hlist_next_rcu( \
- &(pos)->member)), typeof(*(pos)), member))
-
-#endif /* _LTTNG_WRAPPER_RCU_H */
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * wrapper/splice.h
- *
- * wrapper around splice_to_pipe. Using KALLSYMS to get its address when
- * available, else we need to have a kernel that exports this function to GPL
- * modules.
- *
- * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_WRAPPER_SPLICE_H
-#define _LTTNG_WRAPPER_SPLICE_H
-
-#include <linux/splice.h>
-
-ssize_t wrapper_splice_to_pipe(struct pipe_inode_info *pipe,
- struct splice_pipe_desc *spd);
-
-#ifndef PIPE_DEF_BUFFERS
-#define PIPE_DEF_BUFFERS 16
-#endif
-
-#endif /* _LTTNG_WRAPPER_SPLICE_H */
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * wrapper/syscall.h
- *
- * wrapper around asm/syscall.h.
- *
- * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
- */
-
-#ifndef _LTTNG_WRAPPER_SYSCALL_H
-#define _LTTNG_WRAPPER_SYSCALL_H
-
-#include <asm/syscall.h>
-#include <lttng/kernel-version.h>
-
-#define LTTNG_SYSCALL_NR_ARGS 6
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0))
-
-#define lttng_syscall_get_arguments(task, regs, args) \
- syscall_get_arguments(task, regs, args)
-
-#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0) */
-
-static inline
-void lttng_syscall_get_arguments(struct task_struct *task,
- struct pt_regs *regs, unsigned long *args)
-{
- syscall_get_arguments(task, regs, 0, LTTNG_SYSCALL_NR_ARGS, args);
-}
-
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0) */
-
-#endif /* _LTTNG_WRAPPER_SYSCALL_H */
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * wrapper/time.h
- *
- * Copyright (C) 2020 Michael Jeanson <mjeanson@efficios.com>
- * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_WRAPPER_TIME_H
-#define _LTTNG_WRAPPER_TIME_H
-
-#include <linux/version.h>
-
-/*
- * Use 64bit timespec on kernels that have it, this makes 32bit arch
- * y2038 compliant.
- */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0))
-# define LTTNG_KERNEL_HAS_TIMESPEC64
-#endif
-
-#endif /* _LTTNG_WRAPPER_TIME_H */
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * wrapper/timer.h
- *
- * wrapper around linux/timer.h.
- *
- * Copyright (C) 2016 Michael Jeanson <mjeanson@efficios.com>
- */
-
-#ifndef _LTTNG_WRAPPER_TIMER_H
-#define _LTTNG_WRAPPER_TIMER_H
-
-#include <linux/version.h>
-#include <linux/timer.h>
-#include <lttng/kernel-version.h>
-
-/*
- * In the olden days, pinned timers were initialized normaly with init_timer()
- * and then modified with mod_timer_pinned().
- *
- * Then came kernel 4.8.0 and they had to be initilized as pinned with
- * init_timer_pinned() and then modified as regular timers with mod_timer().
- *
- * Then came kernel 4.15.0 with a new timer API where init_timer() is no more.
- * It's replaced by timer_setup() where pinned is now part of timer flags.
- */
-
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,15,0))
-
-#define LTTNG_TIMER_PINNED TIMER_PINNED
-#define LTTNG_TIMER_FUNC_ARG_TYPE struct timer_list *
-
-#define lttng_mod_timer_pinned(timer, expires) \
- mod_timer(timer, expires)
-
-#define lttng_from_timer(var, callback_timer, timer_fieldname) \
- from_timer(var, callback_timer, timer_fieldname)
-
-#define lttng_timer_setup(timer, callback, flags, unused) \
- timer_setup(timer, callback, flags)
-
-
-#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4,15,0) */
-
-
-# if (LTTNG_RT_VERSION_CODE >= LTTNG_RT_KERNEL_VERSION(4,6,4,8) \
- || LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0))
-
-#define lttng_init_timer_pinned(timer) \
- init_timer_pinned(timer)
-
-#define lttng_mod_timer_pinned(timer, expires) \
- mod_timer(timer, expires)
-
-# else /* LTTNG_RT_VERSION_CODE >= LTTNG_RT_KERNEL_VERSION(4,6,4,8) */
-
-#define lttng_init_timer_pinned(timer) \
- init_timer(timer)
-
-#define lttng_mod_timer_pinned(timer, expires) \
- mod_timer_pinned(timer, expires)
-
-# endif /* LTTNG_RT_VERSION_CODE >= LTTNG_RT_KERNEL_VERSION(4,6,4,8) */
-
-
-#define LTTNG_TIMER_PINNED TIMER_PINNED
-#define LTTNG_TIMER_FUNC_ARG_TYPE unsigned long
-
-/* timer_fieldname is unused prior to 4.15. */
-#define lttng_from_timer(var, timer_data, timer_fieldname) \
- ((typeof(var))timer_data)
-
-static inline void lttng_timer_setup(struct timer_list *timer,
- void (*function)(LTTNG_TIMER_FUNC_ARG_TYPE),
- unsigned int flags, void *data)
-{
- if (flags & LTTNG_TIMER_PINNED)
- lttng_init_timer_pinned(timer);
- else
- init_timer(timer);
-
- timer->function = function;
- timer->data = (unsigned long)data;
-}
-
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4,15,0) */
-
-#endif /* _LTTNG_WRAPPER_TIMER_H */
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * wrapper/trace-clock.h
- *
- * Contains LTTng trace clock mapping to LTTng trace clock or mainline monotonic
- * clock. This wrapper depends on CONFIG_HIGH_RES_TIMERS=y.
- *
- * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_TRACE_CLOCK_H
-#define _LTTNG_TRACE_CLOCK_H
-
-#ifdef CONFIG_HAVE_TRACE_CLOCK
-#include <linux/trace-clock.h>
-#else /* CONFIG_HAVE_TRACE_CLOCK */
-
-#include <linux/hardirq.h>
-#include <linux/ktime.h>
-#include <linux/time.h>
-#include <linux/hrtimer.h>
-#include <linux/percpu.h>
-#include <linux/version.h>
-#include <asm/local.h>
-#include <lttng/kernel-version.h>
-#include <lttng/clock.h>
-#include <wrapper/compiler.h>
-#include <wrapper/percpu-defs.h>
-#include <wrapper/random.h>
-#include <blacklist/timekeeping.h>
-
-extern struct lttng_trace_clock *lttng_trace_clock;
-
-/*
- * Upstream Linux commit 27727df240c7 ("Avoid taking lock in NMI path with
- * CONFIG_DEBUG_TIMEKEEPING") introduces a buggy ktime_get_mono_fast_ns().
- * This is fixed by patch "timekeeping: Fix __ktime_get_fast_ns() regression".
- */
-#if (LTTNG_KERNEL_RANGE(4,8,0, 4,8,2) \
- || LTTNG_KERNEL_RANGE(4,7,4, 4,7,8) \
- || LTTNG_KERNEL_RANGE(4,4,20, 4,4,25) \
- || LTTNG_KERNEL_RANGE(4,1,32, 4,1,35))
-#define LTTNG_CLOCK_NMI_SAFE_BROKEN
-#endif
-
-/*
- * We need clock values to be monotonically increasing per-cpu, which is
- * not strictly guaranteed by ktime_get_mono_fast_ns(). It is
- * straightforward to do on architectures with a 64-bit cmpxchg(), but
- * not so on architectures without 64-bit cmpxchg. For now, only enable
- * this feature on 64-bit architectures.
- */
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0) \
- && BITS_PER_LONG == 64 \
- && !defined(LTTNG_CLOCK_NMI_SAFE_BROKEN))
-#define LTTNG_USE_NMI_SAFE_CLOCK
-#endif
-
-#ifdef LTTNG_USE_NMI_SAFE_CLOCK
-
-DECLARE_PER_CPU(u64, lttng_last_tsc);
-
-/*
- * Sometimes called with preemption enabled. Can be interrupted.
- */
-static inline u64 trace_clock_monotonic_wrapper(void)
-{
- u64 now, last, result;
- u64 *last_tsc_ptr;
-
- /* Use fast nmi-safe monotonic clock provided by the Linux kernel. */
- preempt_disable();
- last_tsc_ptr = lttng_this_cpu_ptr(<tng_last_tsc);
- last = *last_tsc_ptr;
- /*
- * Read "last" before "now". It is not strictly required, but it ensures
- * that an interrupt coming in won't artificially trigger a case where
- * "now" < "last". This kind of situation should only happen if the
- * mono_fast time source goes slightly backwards.
- */
- barrier();
- now = ktime_get_mono_fast_ns();
- if (U64_MAX / 2 < now - last)
- now = last;
- result = cmpxchg64_local(last_tsc_ptr, last, now);
- preempt_enable();
- if (result == last) {
- /* Update done. */
- return now;
- } else {
- /*
- * Update not done, due to concurrent update. We can use
- * "result", since it has been sampled concurrently with our
- * time read, so it should not be far from "now".
- */
- return result;
- }
-}
-
-#else /* #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
-static inline u64 trace_clock_monotonic_wrapper(void)
-{
- ktime_t ktime;
-
- /*
- * Refuse to trace from NMIs with this wrapper, because an NMI could
- * nest over the xtime write seqlock and deadlock.
- */
- if (in_nmi())
- return (u64) -EIO;
-
- ktime = ktime_get();
- return ktime_to_ns(ktime);
-}
-#endif /* #else #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
-
-static inline u64 trace_clock_read64_monotonic(void)
-{
- return (u64) trace_clock_monotonic_wrapper();
-}
-
-static inline u64 trace_clock_freq_monotonic(void)
-{
- return (u64) NSEC_PER_SEC;
-}
-
-static inline int trace_clock_uuid_monotonic(char *uuid)
-{
- return wrapper_get_bootid(uuid);
-}
-
-static inline const char *trace_clock_name_monotonic(void)
-{
- return "monotonic";
-}
-
-static inline const char *trace_clock_description_monotonic(void)
-{
- return "Monotonic Clock";
-}
-
-#ifdef LTTNG_USE_NMI_SAFE_CLOCK
-static inline int get_trace_clock(void)
-{
- printk_once(KERN_WARNING "LTTng: Using mainline kernel monotonic fast clock, which is NMI-safe.\n");
- return 0;
-}
-#else /* #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
-static inline int get_trace_clock(void)
-{
- printk_once(KERN_WARNING "LTTng: Using mainline kernel monotonic clock. NMIs will not be traced.\n");
- return 0;
-}
-#endif /* #else #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
-
-static inline void put_trace_clock(void)
-{
-}
-
-static inline u64 trace_clock_read64(void)
-{
- struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
-
- if (likely(!ltc)) {
- return trace_clock_read64_monotonic();
- } else {
- read_barrier_depends(); /* load ltc before content */
- return ltc->read64();
- }
-}
-
-static inline u64 trace_clock_freq(void)
-{
- struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
-
- if (!ltc) {
- return trace_clock_freq_monotonic();
- } else {
- read_barrier_depends(); /* load ltc before content */
- return ltc->freq();
- }
-}
-
-static inline int trace_clock_uuid(char *uuid)
-{
- struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
-
- read_barrier_depends(); /* load ltc before content */
- /* Use default UUID cb when NULL */
- if (!ltc || !ltc->uuid) {
- return trace_clock_uuid_monotonic(uuid);
- } else {
- return ltc->uuid(uuid);
- }
-}
-
-static inline const char *trace_clock_name(void)
-{
- struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
-
- if (!ltc) {
- return trace_clock_name_monotonic();
- } else {
- read_barrier_depends(); /* load ltc before content */
- return ltc->name();
- }
-}
-
-static inline const char *trace_clock_description(void)
-{
- struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
-
- if (!ltc) {
- return trace_clock_description_monotonic();
- } else {
- read_barrier_depends(); /* load ltc before content */
- return ltc->description();
- }
-}
-
-#endif /* CONFIG_HAVE_TRACE_CLOCK */
-
-#endif /* _LTTNG_TRACE_CLOCK_H */
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * wrapper/tracepoint.h
- *
- * wrapper around DECLARE_EVENT_CLASS.
- *
- * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_WRAPPER_TRACEPOINT_H
-#define _LTTNG_WRAPPER_TRACEPOINT_H
-
-#include <linux/version.h>
-#include <linux/tracepoint.h>
-#include <linux/module.h>
-
-#ifndef HAVE_KABI_2635_TRACEPOINT
-
-#define kabi_2635_tracepoint_probe_register tracepoint_probe_register
-#define kabi_2635_tracepoint_probe_unregister tracepoint_probe_unregister
-
-#endif /* HAVE_KABI_2635_TRACEPOINT */
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,15,0))
-
-#include <lttng/tracepoint.h>
-
-#define lttng_wrapper_tracepoint_probe_register lttng_tracepoint_probe_register
-#define lttng_wrapper_tracepoint_probe_unregister lttng_tracepoint_probe_unregister
-
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,15,0)) */
-
-#define lttng_wrapper_tracepoint_probe_register kabi_2635_tracepoint_probe_register
-#define lttng_wrapper_tracepoint_probe_unregister kabi_2635_tracepoint_probe_unregister
-
-static inline
-int lttng_tracepoint_init(void)
-{
- return 0;
-}
-
-static inline
-void lttng_tracepoint_exit(void)
-{
-}
-
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,15,0)) */
-
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) && defined(CONFIG_MODULE_SIG))
-
-#include <linux/kallsyms.h>
-#include <wrapper/kallsyms.h>
-
-static inline
-int wrapper_tracepoint_module_notify(struct notifier_block *nb,
- unsigned long val, struct module *mod)
-{
- int (*tracepoint_module_notify_sym)(struct notifier_block *nb,
- unsigned long val, struct module *mod);
-
- tracepoint_module_notify_sym =
- (void *) kallsyms_lookup_funcptr("tracepoint_module_notify");
- if (tracepoint_module_notify_sym) {
- return tracepoint_module_notify_sym(nb, val, mod);
- } else {
- printk_once(KERN_WARNING "LTTng: tracepoint_module_notify symbol lookup failed. It probably means you kernel don't need this work-around. Please consider upgrading LTTng modules to make this warning go away.\n");
- return -ENOSYS;
- }
-}
-
-#endif /* #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) && defined(CONFIG_MODULE_SIG)) */
-
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) && defined(CONFIG_MODULE_SIG) && defined(MODULE))
-
-static inline
-int wrapper_lttng_fixup_sig(struct module *mod)
-{
- int ret = 0;
-
- /*
- * This is for module.c confusing force loaded modules with
- * unsigned modules.
- */
- if (!THIS_MODULE->sig_ok &&
- THIS_MODULE->taints & (1U << TAINT_FORCED_MODULE)) {
- THIS_MODULE->taints &= ~(1U << TAINT_FORCED_MODULE);
- ret = wrapper_tracepoint_module_notify(NULL,
- MODULE_STATE_COMING, mod);
- THIS_MODULE->taints |= (1U << TAINT_FORCED_MODULE);
- }
- return ret;
-}
-
-#else /* #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) && defined(CONFIG_MODULE_SIG) && defined(MODULE)) */
-
-static inline
-int wrapper_lttng_fixup_sig(struct module *mod)
-{
- return 0;
-}
-
-#endif /* #else #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) && defined(CONFIG_MODULE_SIG) && defined(MODULE)) */
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0))
-static inline struct tracepoint *lttng_tracepoint_ptr_deref(tracepoint_ptr_t *p)
-{
- return tracepoint_ptr_deref(p);
-}
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0)) */
-static inline struct tracepoint *lttng_tracepoint_ptr_deref(struct tracepoint * const *p)
-{
- return *p;
-}
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0)) */
-
-#endif /* _LTTNG_WRAPPER_TRACEPOINT_H */
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * wrapper/types.h
- *
- * Copyright (C) 2017 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_WRAPPER_TYPES_H
-#define _LTTNG_WRAPPER_TYPES_H
-
-#define LTTNG_SIZE_MAX (~(size_t)0)
-
-#endif /* _LTTNG_WRAPPER_TYPES_H */
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * wrapper/uaccess.h
- *
- * wrapper around linux/uaccess.h.
- *
- * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
- */
-
-#ifndef _LTTNG_WRAPPER_UACCESS_H
-#define _LTTNG_WRAPPER_UACCESS_H
-
-#include <linux/uaccess.h>
-#include <lttng/kernel-version.h>
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0) || \
- LTTNG_RHEL_KERNEL_RANGE(4,18,0,147,0,0, 4,19,0,0,0,0))
-
-#define VERIFY_READ 0
-#define VERIFY_WRITE 1
-#define lttng_access_ok(type, addr, size) access_ok(addr, size)
-
-#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0) */
-
-#define lttng_access_ok(type, addr, size) access_ok(type, addr, size)
-
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0) */
-
-#endif /* _LTTNG_WRAPPER_UACCESS_H */
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * wrapper/uprobes.h
- *
- * wrapper around uprobes. Using KALLSYMS to get its address when
- * available, else we need to have a kernel that exports this function to GPL
- * modules.
- *
- * Copyright (C) 2013 Yannick Brosseau <yannick.brosseau@gmail.com>
- * Copyright (C) 2017 Francis Deslauriers <francis.deslauriers@efficios.com>
- *
- */
-
-#ifndef _LTTNG_WRAPPER_UPROBES_H
-#define _LTTNG_WRAPPER_UPROBES_H
-
-#include <linux/version.h>
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0))
-#include <linux/uprobes.h>
-
-/* Use kallsym lookup for version before 3.9. */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
-
-static inline
-int wrapper_uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
-{
- return uprobe_register(inode, offset, uc);
-}
-
-static inline
-void wrapper_uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
-{
- uprobe_unregister(inode, offset, uc);
-}
-
-#else /* Version < 3.9, use kallsym lookup. */
-#include "kallsyms.h"
-
-static inline
-int wrapper_uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
-{
- int (*uprobe_register_sym)(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
-
- uprobe_register_sym = (void *) kallsyms_lookup_funcptr("uprobe_register");
-
- if (uprobe_register_sym) {
- return uprobe_register_sym(inode, offset, uc);
- } else {
- printk(KERN_WARNING "LTTng: uprobe_register symbol lookup failed.\n");
- return -EINVAL;
- }
-}
-
-static inline
-void wrapper_uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
-{
- int (*uprobe_unregister_sym)(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
-
- uprobe_unregister_sym = (void *) kallsyms_lookup_funcptr("uprobe_unregister");
-
- if (uprobe_unregister_sym) {
- uprobe_unregister_sym(inode, offset, uc);
- } else {
- printk(KERN_WARNING "LTTng: uprobe_unregister symbol lookup failed.\n");
- WARN_ON(1);
- }
-}
-#endif
-#else
-/* Version < 3.5, before uprobe was added. */
-struct uprobe_consumer {};
-
-#endif
-#endif
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * wrapper/user_namespace.h
- *
- * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
- *
- */
-
-#ifndef _LTTNG_WRAPPER_USER_NAMESPACE_H
-#define _LTTNG_WRAPPER_USER_NAMESPACE_H
-
-#include <linux/version.h>
-#include <linux/user_namespace.h>
-
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
-
-#define lttng_current_xxuid(xxx) \
- (from_kuid_munged(&init_user_ns, current_##xxx()))
-
-#define lttng_current_vxxuid(xxx) \
- (from_kuid_munged(current_user_ns(), current_##xxx()))
-
-#define lttng_current_xxgid(xxx) \
- (from_kgid_munged(&init_user_ns, current_##xxx()))
-
-#define lttng_current_vxxgid(xxx) \
- (from_kgid_munged(current_user_ns(), current_##xxx()))
-
-static inline
-uid_t lttng_task_vuid(struct task_struct *p, struct user_namespace *ns)
-{
- uid_t uid;
- kuid_t kuid;
-
- kuid = task_cred_xxx(p, uid);
- uid = from_kuid_munged(ns, kuid);
-
- return uid;
-}
-
-static inline
-gid_t lttng_task_vgid(struct task_struct *p, struct user_namespace *ns)
-{
- gid_t gid;
- kgid_t kgid;
-
- kgid = task_cred_xxx(p, gid);
- gid = from_kgid_munged(ns, kgid);
-
- return gid;
-}
-
-#else
-
-#define lttng_current_xxuid(xxx) (current_##xxx())
-
-#define lttng_current_vxxuid(xxx) \
- (user_ns_map_uid(current_user_ns(), current_cred(), current_##xxx()))
-
-#define lttng_current_xxgid(xxx) (current_##xxx())
-
-#define lttng_current_vxxgid(xxx) \
- (user_ns_map_gid(current_user_ns(), current_cred(), current_##xxx()))
-
-static inline
-uid_t lttng_task_vuid(struct task_struct *p, struct user_namespace *ns)
-{
- uid_t uid;
-
- /*
- * __task_cred requires the RCU readlock be held
- */
- rcu_read_lock();
- uid = user_ns_map_uid(ns, __task_cred(p), __task_cred(p)->uid);
- rcu_read_unlock();
-
- return uid;
-}
-
-static inline
-gid_t lttng_task_vgid(struct task_struct *p, struct user_namespace *ns)
-{
- gid_t gid;
-
- /*
- * __task_cred requires the RCU readlock be held
- */
- rcu_read_lock();
- gid = user_ns_map_gid(ns, __task_cred(p), __task_cred(p)->gid);
- rcu_read_unlock();
-
- return gid;
-}
-
-#endif
-
-#define lttng_current_uid() (lttng_current_xxuid(uid))
-#define lttng_current_euid() (lttng_current_xxuid(euid))
-#define lttng_current_suid() (lttng_current_xxuid(suid))
-#define lttng_current_fsuid() (lttng_current_xxuid(fsuid))
-#define lttng_current_gid() (lttng_current_xxgid(gid))
-#define lttng_current_egid() (lttng_current_xxgid(egid))
-#define lttng_current_sgid() (lttng_current_xxgid(sgid))
-#define lttng_current_fsgid() (lttng_current_xxgid(fsgid))
-
-#define lttng_current_vuid() (lttng_current_vxxuid(uid))
-#define lttng_current_veuid() (lttng_current_vxxuid(euid))
-#define lttng_current_vsuid() (lttng_current_vxxuid(suid))
-#define lttng_current_vfsuid() (lttng_current_vxxuid(fsuid))
-#define lttng_current_vgid() (lttng_current_vxxgid(gid))
-#define lttng_current_vegid() (lttng_current_vxxgid(egid))
-#define lttng_current_vsgid() (lttng_current_vxxgid(sgid))
-#define lttng_current_vfsgid() (lttng_current_vxxgid(fsgid))
-
-#endif /* _LTTNG_WRAPPER_USER_NAMESPACE_H */
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * wrapper/uuid.h
- *
- * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_WRAPPER_UUID_H
-#define _LTTNG_WRAPPER_UUID_H
-
-#include <linux/version.h>
-#include <linux/uuid.h>
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0))
-static inline
-void lttng_guid_gen(guid_t *u)
-{
- return guid_gen(u);
-}
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)) */
-
-typedef uuid_le guid_t;
-
-static inline
-void lttng_guid_gen(guid_t *u)
-{
- return uuid_le_gen(u);
-}
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)) */
-
-#endif /* _LTTNG_WRAPPER_UUID_H */
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * wrapper/vmalloc.h
- *
- * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when
- * available, else we need to have a kernel that exports this function to GPL
- * modules.
- *
- * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_WRAPPER_VMALLOC_H
-#define _LTTNG_WRAPPER_VMALLOC_H
-
-#include <linux/version.h>
-#include <linux/vmalloc.h>
-#include <linux/mm.h>
-
-#ifdef CONFIG_KALLSYMS
-
-#include <linux/kallsyms.h>
-#include <wrapper/kallsyms.h>
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0))
-
-static inline
-void wrapper_vmalloc_sync_mappings(void)
-{
- void (*vmalloc_sync_mappings_sym)(void);
-
- vmalloc_sync_mappings_sym = (void *) kallsyms_lookup_funcptr("vmalloc_sync_mappings");
- if (vmalloc_sync_mappings_sym) {
- vmalloc_sync_mappings_sym();
- } else {
-#ifdef CONFIG_X86
- /*
- * Only x86 needs vmalloc_sync_mappings to make sure LTTng does not
- * trigger recursive page faults.
- */
- printk_once(KERN_WARNING "LTTng: vmalloc_sync_mappings symbol lookup failed.\n");
- printk_once(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n");
-#endif
- }
-}
-
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)) */
-
-/*
- * Map vmalloc_sync_mappings to vmalloc_sync_all() on kernels before 5.7.
- */
-static inline
-void wrapper_vmalloc_sync_mappings(void)
-{
- void (*vmalloc_sync_all_sym)(void);
-
- vmalloc_sync_all_sym = (void *) kallsyms_lookup_funcptr("vmalloc_sync_all");
- if (vmalloc_sync_all_sym) {
- vmalloc_sync_all_sym();
- } else {
-#ifdef CONFIG_X86
- /*
- * Only x86 needs vmalloc_sync_all to make sure LTTng does not
- * trigger recursive page faults.
- */
- printk_once(KERN_WARNING "LTTng: vmalloc_sync_all symbol lookup failed.\n");
- printk_once(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n");
-#endif
- }
-}
-
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)) */
-
-#else
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0))
-
-static inline
-void wrapper_vmalloc_sync_mappings(void)
-{
- return vmalloc_sync_mappings();
-}
-
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)) */
-
-static inline
-void wrapper_vmalloc_sync_mappings(void)
-{
- return vmalloc_sync_all();
-}
-
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)) */
-
-#endif
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0))
-static inline
-void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node)
-{
- void *ret;
-
- ret = kvmalloc_node(size, flags, node);
- if (is_vmalloc_addr(ret)) {
- /*
- * Make sure we don't trigger recursive page faults in the
- * tracing fast path.
- */
- wrapper_vmalloc_sync_mappings();
- }
- return ret;
-}
-
-static inline
-void *lttng_kvzalloc_node(unsigned long size, gfp_t flags, int node)
-{
- return lttng_kvmalloc_node(size, flags | __GFP_ZERO, node);
-}
-
-static inline
-void *lttng_kvmalloc(unsigned long size, gfp_t flags)
-{
- return lttng_kvmalloc_node(size, flags, NUMA_NO_NODE);
-}
-
-static inline
-void *lttng_kvzalloc(unsigned long size, gfp_t flags)
-{
- return lttng_kvzalloc_node(size, flags, NUMA_NO_NODE);
-}
-
-static inline
-void lttng_kvfree(const void *addr)
-{
- kvfree(addr);
-}
-
-#else
-
-#include <linux/slab.h>
-
-static inline
-void print_vmalloc_node_range_warning(void)
-{
- printk_once(KERN_WARNING "LTTng: __vmalloc_node_range symbol lookup failed.\n");
- printk_once(KERN_WARNING "Tracer performance will be degraded on NUMA systems.\n");
- printk_once(KERN_WARNING "Please rebuild your kernel with CONFIG_KALLSYMS enabled.\n");
-}
-
-/*
- * kallsyms wrapper of __vmalloc_node with a fallback to kmalloc_node.
- */
-static inline
-void *__lttng_vmalloc_node_range(unsigned long size, unsigned long align,
- unsigned long start, unsigned long end, gfp_t gfp_mask,
- pgprot_t prot, unsigned long vm_flags, int node,
- const void *caller)
-{
-#ifdef CONFIG_KALLSYMS
- /*
- * If we have KALLSYMS, get * __vmalloc_node_range which is not exported.
- */
- void *(*lttng__vmalloc_node_range)(unsigned long size, unsigned long align,
- unsigned long start, unsigned long end, gfp_t gfp_mask,
- pgprot_t prot, unsigned long vm_flags, int node,
- const void *caller);
-
- lttng__vmalloc_node_range = (void *) kallsyms_lookup_funcptr("__vmalloc_node_range");
- if (lttng__vmalloc_node_range)
- return lttng__vmalloc_node_range(size, align, start, end, gfp_mask, prot,
- vm_flags, node, caller);
-#endif
- if (node != NUMA_NO_NODE)
- print_vmalloc_node_range_warning();
- return __vmalloc(size, gfp_mask, prot);
-}
-
-/**
- * lttng_kvmalloc_node - attempt to allocate physically contiguous memory, but upon
- * failure, fall back to non-contiguous (vmalloc) allocation.
- * @size: size of the request.
- * @flags: gfp mask for the allocation - must be compatible with GFP_KERNEL.
- *
- * Uses kmalloc to get the memory but if the allocation fails then falls back
- * to the vmalloc allocator. Use lttng_kvfree to free the memory.
- *
- * Reclaim modifiers - __GFP_NORETRY, __GFP_REPEAT and __GFP_NOFAIL are not supported
- */
-static inline
-void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node)
-{
- void *ret;
-
- /*
- * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
- * so the given set of flags has to be compatible.
- */
- WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL);
-
- /*
- * If the allocation fits in a single page, do not fallback.
- */
- if (size <= PAGE_SIZE) {
- return kmalloc_node(size, flags, node);
- }
-
- /*
- * Make sure that larger requests are not too disruptive - no OOM
- * killer and no allocation failure warnings as we have a fallback
- */
- ret = kmalloc_node(size, flags | __GFP_NOWARN | __GFP_NORETRY, node);
- if (!ret) {
- ret = __lttng_vmalloc_node_range(size, 1,
- VMALLOC_START, VMALLOC_END,
- flags | __GFP_HIGHMEM, PAGE_KERNEL, 0,
- node, __builtin_return_address(0));
- /*
- * Make sure we don't trigger recursive page faults in the
- * tracing fast path.
- */
- wrapper_vmalloc_sync_mappings();
- }
- return ret;
-}
-
-static inline
-void *lttng_kvzalloc_node(unsigned long size, gfp_t flags, int node)
-{
- return lttng_kvmalloc_node(size, flags | __GFP_ZERO, node);
-}
-
-static inline
-void *lttng_kvmalloc(unsigned long size, gfp_t flags)
-{
- return lttng_kvmalloc_node(size, flags, NUMA_NO_NODE);
-}
-
-static inline
-void *lttng_kvzalloc(unsigned long size, gfp_t flags)
-{
- return lttng_kvzalloc_node(size, flags, NUMA_NO_NODE);
-}
-
-static inline
-void lttng_kvfree(const void *addr)
-{
- if (is_vmalloc_addr(addr)) {
- vfree(addr);
- } else {
- kfree(addr);
- }
-}
-#endif
-
-#endif /* _LTTNG_WRAPPER_VMALLOC_H */
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * wrapper/writeback.h
- *
- * wrapper around global_dirty_limit read. Using KALLSYMS with KALLSYMS_ALL
- * to get its address when available, else we need to have a kernel that
- * exports this variable to GPL modules.
- *
- * Copyright (C) 2013 Mentor Graphics Corp.
- */
-
-#ifndef _LTTNG_WRAPPER_WRITEBACK_H
-#define _LTTNG_WRAPPER_WRITEBACK_H
-
-#include <lttng/kernel-version.h>
-
-#ifdef CONFIG_KALLSYMS_ALL
-#include <linux/kallsyms.h>
-#include <wrapper/kallsyms.h>
-
-
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0)
-
-static struct wb_domain *global_wb_domain_sym;
-
-static inline
-unsigned long wrapper_global_dirty_limit(void)
-{
- if (!global_wb_domain_sym)
- global_wb_domain_sym =
- (void *) kallsyms_lookup_dataptr("global_wb_domain");
- if (global_wb_domain_sym) {
- return global_wb_domain_sym->dirty_limit;
- } else {
- printk_once(KERN_WARNING "LTTng: global_wb_domain symbol lookup failed.\n");
- return 0;
- }
-}
-#else
-
-static unsigned long *global_dirty_limit_sym;
-
-static inline
-unsigned long wrapper_global_dirty_limit(void)
-{
- if (!global_dirty_limit_sym)
- global_dirty_limit_sym =
- (void *) kallsyms_lookup_dataptr("global_dirty_limit");
- if (global_dirty_limit_sym) {
- return *global_dirty_limit_sym;
- } else {
- printk_once(KERN_WARNING "LTTng: global_dirty_limit symbol lookup failed.\n");
- return 0;
- }
-}
-#endif
-
-#else /* CONFIG_KALLSYMS_ALL */
-
-#include <linux/writeback.h>
-
-static inline
-unsigned long wrapper_global_dirty_limit(void)
-{
- return global_dirty_limit;
-}
-
-#endif
-
-#endif /* _LTTNG_WRAPPER_WRITEBACK_H */