1 /* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1)
5 * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when
6 * available, else we need to have a kernel that exports this function to GPL
9 * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
12 #ifndef _LTTNG_WRAPPER_VMALLOC_H
13 #define _LTTNG_WRAPPER_VMALLOC_H
15 #include <linux/version.h>
16 #include <linux/vmalloc.h>
19 #ifdef CONFIG_KALLSYMS
21 #include <linux/kallsyms.h>
22 #include <wrapper/kallsyms.h>
23 #include <lttng-kernel-version.h>
25 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,8,0))
28 * wrapper_vmalloc_sync_mappings was removed in v5.8, the vmalloc mappings
29 * are now synchronized when they are created or torn down.
32 void wrapper_vmalloc_sync_mappings(void)
35 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) \
36 || LTTNG_KERNEL_RANGE(5,5,12, 5,6,0) \
37 || LTTNG_KERNEL_RANGE(5,4,28, 5,5,0) \
38 || LTTNG_KERNEL_RANGE(5,2,37, 5,3,0) \
39 || LTTNG_KERNEL_RANGE(4,19,113, 4,20,0) \
40 || LTTNG_KERNEL_RANGE(4,14,175, 4,15,0) \
41 || LTTNG_KERNEL_RANGE(4,9,218, 4,10,0) \
42 || LTTNG_KERNEL_RANGE(4,4,218, 4,5,0)) \
43 || LTTNG_UBUNTU_KERNEL_RANGE(4,15,18,97, 4,16,0,0) \
44 || LTTNG_UBUNTU_KERNEL_RANGE(5,0,21,48, 5,1,0,0) \
45 || LTTNG_UBUNTU_KERNEL_RANGE(5,3,18,52, 5,4,0,0)
48 void wrapper_vmalloc_sync_mappings(void)
50 void (*vmalloc_sync_mappings_sym
)(void);
52 vmalloc_sync_mappings_sym
= (void *) kallsyms_lookup_funcptr("vmalloc_sync_mappings");
53 if (vmalloc_sync_mappings_sym
) {
54 vmalloc_sync_mappings_sym();
58 * Only x86 needs vmalloc_sync_mappings to make sure LTTng does not
59 * trigger recursive page faults.
61 printk_once(KERN_WARNING
"LTTng: vmalloc_sync_mappings symbol lookup failed.\n");
62 printk_once(KERN_WARNING
"Page fault handler and NMI tracing might trigger faults.\n");
67 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */
70 * Map vmalloc_sync_mappings to vmalloc_sync_all() on kernels before 5.7.
73 void wrapper_vmalloc_sync_mappings(void)
75 void (*vmalloc_sync_all_sym
)(void);
77 vmalloc_sync_all_sym
= (void *) kallsyms_lookup_funcptr("vmalloc_sync_all");
78 if (vmalloc_sync_all_sym
) {
79 vmalloc_sync_all_sym();
83 * Only x86 needs vmalloc_sync_all to make sure LTTng does not
84 * trigger recursive page faults.
86 printk_once(KERN_WARNING
"LTTng: vmalloc_sync_all symbol lookup failed.\n");
87 printk_once(KERN_WARNING
"Page fault handler and NMI tracing might trigger faults.\n");
92 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */
96 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) \
97 || LTTNG_KERNEL_RANGE(5,5,12, 5,6,0) \
98 || LTTNG_KERNEL_RANGE(5,4,28, 5,5,0) \
99 || LTTNG_KERNEL_RANGE(5,2,37, 5,3,0) \
100 || LTTNG_KERNEL_RANGE(4,19,113, 4,20,0) \
101 || LTTNG_KERNEL_RANGE(4,14,175, 4,15,0) \
102 || LTTNG_KERNEL_RANGE(4,9,218, 4,10,0) \
103 || LTTNG_KERNEL_RANGE(4,4,218, 4,5,0)) \
104 || LTTNG_UBUNTU_KERNEL_RANGE(4,15,18,97, 4,18,0,0) \
105 || LTTNG_UBUNTU_KERNEL_RANGE(5,0,21,48, 5,1,0,0) \
106 || LTTNG_UBUNTU_KERNEL_RANGE(5,3,18,52, 5,4,0,0)
109 void wrapper_vmalloc_sync_mappings(void)
111 return vmalloc_sync_mappings();
114 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */
117 void wrapper_vmalloc_sync_mappings(void)
119 return vmalloc_sync_all();
122 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */
126 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0))
128 void *lttng_kvmalloc_node(unsigned long size
, gfp_t flags
, int node
)
132 ret
= kvmalloc_node(size
, flags
, node
);
133 if (is_vmalloc_addr(ret
)) {
135 * Make sure we don't trigger recursive page faults in the
138 wrapper_vmalloc_sync_mappings();
144 void *lttng_kvzalloc_node(unsigned long size
, gfp_t flags
, int node
)
146 return lttng_kvmalloc_node(size
, flags
| __GFP_ZERO
, node
);
150 void *lttng_kvmalloc(unsigned long size
, gfp_t flags
)
152 return lttng_kvmalloc_node(size
, flags
, NUMA_NO_NODE
);
156 void *lttng_kvzalloc(unsigned long size
, gfp_t flags
)
158 return lttng_kvzalloc_node(size
, flags
, NUMA_NO_NODE
);
162 void lttng_kvfree(const void *addr
)
169 #include <linux/slab.h>
172 void print_vmalloc_node_range_warning(void)
174 printk_once(KERN_WARNING
"LTTng: __vmalloc_node_range symbol lookup failed.\n");
175 printk_once(KERN_WARNING
"Tracer performance will be degraded on NUMA systems.\n");
176 printk_once(KERN_WARNING
"Please rebuild your kernel with CONFIG_KALLSYMS enabled.\n");
180 * kallsyms wrapper of __vmalloc_node with a fallback to kmalloc_node.
183 void *__lttng_vmalloc_node_range(unsigned long size
, unsigned long align
,
184 unsigned long start
, unsigned long end
, gfp_t gfp_mask
,
185 pgprot_t prot
, unsigned long vm_flags
, int node
,
188 #ifdef CONFIG_KALLSYMS
190 * If we have KALLSYMS, get * __vmalloc_node_range which is not exported.
192 void *(*lttng__vmalloc_node_range
)(unsigned long size
, unsigned long align
,
193 unsigned long start
, unsigned long end
, gfp_t gfp_mask
,
194 pgprot_t prot
, unsigned long vm_flags
, int node
,
197 lttng__vmalloc_node_range
= (void *) kallsyms_lookup_funcptr("__vmalloc_node_range");
198 if (lttng__vmalloc_node_range
)
199 return lttng__vmalloc_node_range(size
, align
, start
, end
, gfp_mask
, prot
,
200 vm_flags
, node
, caller
);
202 if (node
!= NUMA_NO_NODE
)
203 print_vmalloc_node_range_warning();
204 return __vmalloc(size
, gfp_mask
, prot
);
208 * lttng_kvmalloc_node - attempt to allocate physically contiguous memory, but upon
209 * failure, fall back to non-contiguous (vmalloc) allocation.
210 * @size: size of the request.
211 * @flags: gfp mask for the allocation - must be compatible with GFP_KERNEL.
213 * Uses kmalloc to get the memory but if the allocation fails then falls back
214 * to the vmalloc allocator. Use lttng_kvfree to free the memory.
216 * Reclaim modifiers - __GFP_NORETRY, __GFP_REPEAT and __GFP_NOFAIL are not supported
219 void *lttng_kvmalloc_node(unsigned long size
, gfp_t flags
, int node
)
224 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
225 * so the given set of flags has to be compatible.
227 WARN_ON_ONCE((flags
& GFP_KERNEL
) != GFP_KERNEL
);
230 * If the allocation fits in a single page, do not fallback.
232 if (size
<= PAGE_SIZE
) {
233 return kmalloc_node(size
, flags
, node
);
237 * Make sure that larger requests are not too disruptive - no OOM
238 * killer and no allocation failure warnings as we have a fallback
240 ret
= kmalloc_node(size
, flags
| __GFP_NOWARN
| __GFP_NORETRY
, node
);
242 ret
= __lttng_vmalloc_node_range(size
, 1,
243 VMALLOC_START
, VMALLOC_END
,
244 flags
| __GFP_HIGHMEM
, PAGE_KERNEL
, 0,
245 node
, __builtin_return_address(0));
247 * Make sure we don't trigger recursive page faults in the
250 wrapper_vmalloc_sync_mappings();
256 void *lttng_kvzalloc_node(unsigned long size
, gfp_t flags
, int node
)
258 return lttng_kvmalloc_node(size
, flags
| __GFP_ZERO
, node
);
262 void *lttng_kvmalloc(unsigned long size
, gfp_t flags
)
264 return lttng_kvmalloc_node(size
, flags
, NUMA_NO_NODE
);
268 void *lttng_kvzalloc(unsigned long size
, gfp_t flags
)
270 return lttng_kvzalloc_node(size
, flags
, NUMA_NO_NODE
);
274 void lttng_kvfree(const void *addr
)
276 if (is_vmalloc_addr(addr
)) {
284 #endif /* _LTTNG_WRAPPER_VMALLOC_H */