#include <linux/vmalloc.h>
#include <wrapper/mm.h>
-#include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_all() */
+#include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
#include <wrapper/ringbuffer/config.h>
#include <wrapper/ringbuffer/backend.h>
#include <wrapper/ringbuffer/frontend.h>
* If kmalloc ever uses vmalloc underneath, make sure the buffer pages
* will not fault.
*/
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
wrapper_clear_current_oom_origin();
vfree(pages);
return 0;
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/err.h>
-#include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_all() */
+#include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
#include <wrapper/ringbuffer/vfs.h>
#include <wrapper/ringbuffer/backend.h>
#include <wrapper/ringbuffer/frontend.h>
{
int ret = 0;
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
lttng_clock_ref();
ret = lttng_tp_mempool_init();
field->record = lttng_callstack_record;
field->priv = fdata;
field->destroy = lttng_callstack_destroy;
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
return 0;
error_create:
field->record = cgroup_ns_record;
field->get_value = cgroup_ns_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_cgroup_ns_to_ctx);
field->record = cpu_id_record;
field->get_value = cpu_id_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_cpu_id_to_ctx);
field->record = egid_record;
field->get_value = egid_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_egid_to_ctx);
field->record = euid_record;
field->get_value = euid_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_euid_to_ctx);
field->record = gid_record;
field->get_value = gid_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_gid_to_ctx);
field->record = hostname_record;
field->get_value = hostname_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_hostname_to_ctx);
field->record = interruptible_record;
field->get_value = interruptible_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_interruptible_to_ctx);
field->record = ipc_ns_record;
field->get_value = ipc_ns_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_ipc_ns_to_ctx);
field->record = migratable_record;
field->get_value = migratable_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_migratable_to_ctx);
field->record = mnt_ns_record;
field->get_value = mnt_ns_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_mnt_ns_to_ctx);
field->record = need_reschedule_record;
field->get_value = need_reschedule_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_need_reschedule_to_ctx);
field->record = net_ns_record;
field->get_value = net_ns_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_net_ns_to_ctx);
field->record = nice_record;
field->get_value = nice_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_nice_to_ctx);
field->u.perf_counter = perf_field;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
return 0;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
field->record = pid_ns_record;
field->get_value = pid_ns_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_pid_ns_to_ctx);
field->record = pid_record;
field->get_value = pid_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_pid_to_ctx);
field->record = ppid_record;
field->get_value = ppid_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_ppid_to_ctx);
field->record = preemptible_record;
field->get_value = preemptible_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_preemptible_to_ctx);
field->record = prio_record;
field->get_value = prio_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_prio_to_ctx);
field->record = procname_record;
field->get_value = procname_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_procname_to_ctx);
field->record = sgid_record;
field->get_value = sgid_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_sgid_to_ctx);
field->record = suid_record;
field->get_value = suid_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_suid_to_ctx);
field->record = tid_record;
field->get_value = tid_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_tid_to_ctx);
field->record = uid_record;
field->get_value = uid_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_uid_to_ctx);
field->record = user_ns_record;
field->get_value = user_ns_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_user_ns_to_ctx);
field->record = uts_ns_record;
field->get_value = uts_ns_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_uts_ns_to_ctx);
field->record = vegid_record;
field->get_value = vegid_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_vegid_to_ctx);
field->record = veuid_record;
field->get_value = veuid_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_veuid_to_ctx);
field->record = vgid_record;
field->get_value = vgid_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_vgid_to_ctx);
field->record = vpid_record;
field->get_value = vpid_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_vpid_to_ctx);
field->record = vppid_record;
field->get_value = vppid_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_vppid_to_ctx);
field->record = vsgid_record;
field->get_value = vsgid_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_vsgid_to_ctx);
field->record = vsuid_record;
field->get_value = vsuid_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_vsuid_to_ctx);
field->record = vtid_record;
field->get_value = vtid_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_vtid_to_ctx);
field->record = vuid_record;
field->get_value = vuid_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_vuid_to_ctx);
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-#include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_all() */
+#include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
#include <lttng-events.h>
#include <lttng-tracer.h>
#include <linux/dmi.h>
#include <wrapper/uuid.h>
-#include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_all() */
+#include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
#include <wrapper/random.h>
#include <wrapper/tracepoint.h>
#include <wrapper/list.h>
* Registers a transport which can be used as output to extract the data out of
* LTTng. The module calling this registration function must ensure that no
* trap-inducing code will be executed by the transport functions. E.g.
- * vmalloc_sync_all() must be called between a vmalloc and the moment the memory
+ * vmalloc_sync_mappings() must be called between a vmalloc and the moment the memory
* is made visible to the transport function. This registration acts as a
- * vmalloc_sync_all. Therefore, only if the module allocates virtual memory
+ * vmalloc_sync_mappings. Therefore, only if the module allocates virtual memory
* after its registration must it synchronize the TLBs.
*/
void lttng_transport_register(struct lttng_transport *transport)
/*
* Make sure no page fault can be triggered by the module about to be
* registered. We deal with this here so we don't have to call
- * vmalloc_sync_all() in each module's init.
+ * vmalloc_sync_mappings() in each module's init.
*/
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
mutex_lock(&sessions_mutex);
list_add_tail(&transport->node, <tng_transport_list);
#include <linux/module.h>
#include <linux/types.h>
#include <lib/bitfield.h>
-#include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_all() */
+#include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
#include <wrapper/trace-clock.h>
#include <lttng-events.h>
#include <lttng-tracer.h>
* This vmalloc sync all also takes care of the lib ring buffer
* vmalloc'd module pages when it is built as a module into LTTng.
*/
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
lttng_transport_register(<tng_relay_transport);
return 0;
}
#include <linux/module.h>
#include <linux/types.h>
-#include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_all() */
+#include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
#include <lttng-events.h>
#include <lttng-tracer.h>
* This vmalloc sync all also takes care of the lib ring buffer
* vmalloc'd module pages when it is built as a module into LTTng.
*/
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
lttng_transport_register(<tng_relay_transport);
return 0;
}
struct lttng_kernel_event ev;
int ret;
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
if (!chan->sc_table) {
/* create syscall table mapping syscall to events */
* Well.. kprobes itself puts the page fault handler on the blacklist,
* but we can never be too careful.
*/
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
ret = register_kprobe(&event->u.kprobe.kp);
if (ret)
* Well.. kprobes itself puts the page fault handler on the blacklist,
* but we can never be too careful.
*/
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
ret = register_kretprobe(<tng_krp->krp);
if (ret)
#include <probes/lttng.h>
#include <probes/lttng-types.h>
#include <probes/lttng-probe-user.h>
-#include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_all() */
+#include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
#include <wrapper/ringbuffer/frontend_types.h>
#include <wrapper/ringbuffer/backend.h>
#include <wrapper/rcu.h>
#ifndef TP_MODULE_NOINIT
static int TP_ID(__lttng_events_init__, TRACE_SYSTEM)(void)
{
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
return lttng_probe_register(&TP_ID(__probe_desc___, TRACE_SYSTEM));
}
}
/* Ensure the memory we just allocated don't trigger page faults. */
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
uprobe_handler->event = event;
uprobe_handler->up_consumer.handler = lttng_uprobes_handler_pre;
{
int ret = 0;
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
/* /dev/lttng-logger */
ret = misc_register(&logger_dev);
int ret = 0;
(void) wrapper_lttng_fixup_sig(THIS_MODULE);
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
lttng_test_filter_event_dentry =
proc_create_data(LTTNG_TEST_FILTER_EVENT_FILE,
S_IRUGO | S_IWUGO, NULL,
#include <linux/kallsyms.h>
#include <wrapper/kallsyms.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0))
+
+static inline
+void wrapper_vmalloc_sync_mappings(void)
+{
+ void (*vmalloc_sync_mappings_sym)(void);
+
+ vmalloc_sync_mappings_sym = (void *) kallsyms_lookup_funcptr("vmalloc_sync_mappings");
+ if (vmalloc_sync_mappings_sym) {
+ vmalloc_sync_mappings_sym();
+ } else {
+#ifdef CONFIG_X86
+ /*
+ * Only x86 needs vmalloc_sync_mappings to make sure LTTng does not
+ * trigger recursive page faults.
+ */
+ printk_once(KERN_WARNING "LTTng: vmalloc_sync_mappings symbol lookup failed.\n");
+ printk_once(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n");
+#endif
+ }
+}
+
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)) */
+
+/*
+ * Map vmalloc_sync_mappings to vmalloc_sync_all() on kernels before 5.7.
+ */
static inline
-void wrapper_vmalloc_sync_all(void)
+void wrapper_vmalloc_sync_mappings(void)
{
void (*vmalloc_sync_all_sym)(void);
#endif
}
}
+
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)) */
+
#else
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0))
+
+static inline
+void wrapper_vmalloc_sync_mappings(void)
+{
+ return vmalloc_sync_mappings();
+}
+
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)) */
+
static inline
-void wrapper_vmalloc_sync_all(void)
+void wrapper_vmalloc_sync_mappings(void)
{
return vmalloc_sync_all();
}
+
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)) */
+
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0))
* Make sure we don't trigger recursive page faults in the
* tracing fast path.
*/
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
}
return ret;
}