1 /* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1)
5 * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when
6 * available, else we need to have a kernel that exports this function to GPL
9 * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
12 #ifndef _LTTNG_WRAPPER_VMALLOC_H
13 #define _LTTNG_WRAPPER_VMALLOC_H
15 #include <linux/version.h>
16 #include <linux/vmalloc.h>
19 #ifdef CONFIG_KALLSYMS
21 #include <linux/kallsyms.h>
22 #include <wrapper/kallsyms.h>
24 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0))
27 void wrapper_vmalloc_sync_mappings(void)
29 void (*vmalloc_sync_mappings_sym
)(void);
31 vmalloc_sync_mappings_sym
= (void *) kallsyms_lookup_funcptr("vmalloc_sync_mappings");
32 if (vmalloc_sync_mappings_sym
) {
33 vmalloc_sync_mappings_sym();
37 * Only x86 needs vmalloc_sync_mappings to make sure LTTng does not
38 * trigger recursive page faults.
40 printk_once(KERN_WARNING
"LTTng: vmalloc_sync_mappings symbol lookup failed.\n");
41 printk_once(KERN_WARNING
"Page fault handler and NMI tracing might trigger faults.\n");
46 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)) */
49 * Map vmalloc_sync_mappings to vmalloc_sync_all() on kernels before 5.7.
52 void wrapper_vmalloc_sync_mappings(void)
54 void (*vmalloc_sync_all_sym
)(void);
56 vmalloc_sync_all_sym
= (void *) kallsyms_lookup_funcptr("vmalloc_sync_all");
57 if (vmalloc_sync_all_sym
) {
58 vmalloc_sync_all_sym();
62 * Only x86 needs vmalloc_sync_all to make sure LTTng does not
63 * trigger recursive page faults.
65 printk_once(KERN_WARNING
"LTTng: vmalloc_sync_all symbol lookup failed.\n");
66 printk_once(KERN_WARNING
"Page fault handler and NMI tracing might trigger faults.\n");
71 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)) */
75 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0))
78 void wrapper_vmalloc_sync_mappings(void)
80 return vmalloc_sync_mappings();
83 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)) */
86 void wrapper_vmalloc_sync_mappings(void)
88 return vmalloc_sync_all();
91 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)) */
95 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0))
97 void *lttng_kvmalloc_node(unsigned long size
, gfp_t flags
, int node
)
101 ret
= kvmalloc_node(size
, flags
, node
);
102 if (is_vmalloc_addr(ret
)) {
104 * Make sure we don't trigger recursive page faults in the
107 wrapper_vmalloc_sync_mappings();
113 void *lttng_kvzalloc_node(unsigned long size
, gfp_t flags
, int node
)
115 return lttng_kvmalloc_node(size
, flags
| __GFP_ZERO
, node
);
119 void *lttng_kvmalloc(unsigned long size
, gfp_t flags
)
121 return lttng_kvmalloc_node(size
, flags
, NUMA_NO_NODE
);
125 void *lttng_kvzalloc(unsigned long size
, gfp_t flags
)
127 return lttng_kvzalloc_node(size
, flags
, NUMA_NO_NODE
);
131 void lttng_kvfree(const void *addr
)
138 #include <linux/slab.h>
141 void print_vmalloc_node_range_warning(void)
143 printk_once(KERN_WARNING
"LTTng: __vmalloc_node_range symbol lookup failed.\n");
144 printk_once(KERN_WARNING
"Tracer performance will be degraded on NUMA systems.\n");
145 printk_once(KERN_WARNING
"Please rebuild your kernel with CONFIG_KALLSYMS enabled.\n");
149 * kallsyms wrapper of __vmalloc_node with a fallback to kmalloc_node.
152 void *__lttng_vmalloc_node_range(unsigned long size
, unsigned long align
,
153 unsigned long start
, unsigned long end
, gfp_t gfp_mask
,
154 pgprot_t prot
, unsigned long vm_flags
, int node
,
157 #ifdef CONFIG_KALLSYMS
159 * If we have KALLSYMS, get * __vmalloc_node_range which is not exported.
161 void *(*lttng__vmalloc_node_range
)(unsigned long size
, unsigned long align
,
162 unsigned long start
, unsigned long end
, gfp_t gfp_mask
,
163 pgprot_t prot
, unsigned long vm_flags
, int node
,
166 lttng__vmalloc_node_range
= (void *) kallsyms_lookup_funcptr("__vmalloc_node_range");
167 if (lttng__vmalloc_node_range
)
168 return lttng__vmalloc_node_range(size
, align
, start
, end
, gfp_mask
, prot
,
169 vm_flags
, node
, caller
);
171 if (node
!= NUMA_NO_NODE
)
172 print_vmalloc_node_range_warning();
173 return __vmalloc(size
, gfp_mask
, prot
);
177 * lttng_kvmalloc_node - attempt to allocate physically contiguous memory, but upon
178 * failure, fall back to non-contiguous (vmalloc) allocation.
179 * @size: size of the request.
180 * @flags: gfp mask for the allocation - must be compatible with GFP_KERNEL.
182 * Uses kmalloc to get the memory but if the allocation fails then falls back
183 * to the vmalloc allocator. Use lttng_kvfree to free the memory.
185 * Reclaim modifiers - __GFP_NORETRY, __GFP_REPEAT and __GFP_NOFAIL are not supported
188 void *lttng_kvmalloc_node(unsigned long size
, gfp_t flags
, int node
)
193 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
194 * so the given set of flags has to be compatible.
196 WARN_ON_ONCE((flags
& GFP_KERNEL
) != GFP_KERNEL
);
199 * If the allocation fits in a single page, do not fallback.
201 if (size
<= PAGE_SIZE
) {
202 return kmalloc_node(size
, flags
, node
);
206 * Make sure that larger requests are not too disruptive - no OOM
207 * killer and no allocation failure warnings as we have a fallback
209 ret
= kmalloc_node(size
, flags
| __GFP_NOWARN
| __GFP_NORETRY
, node
);
211 ret
= __lttng_vmalloc_node_range(size
, 1,
212 VMALLOC_START
, VMALLOC_END
,
213 flags
| __GFP_HIGHMEM
, PAGE_KERNEL
, 0,
214 node
, __builtin_return_address(0));
216 * Make sure we don't trigger recursive page faults in the
219 wrapper_vmalloc_sync_mappings();
225 void *lttng_kvzalloc_node(unsigned long size
, gfp_t flags
, int node
)
227 return lttng_kvmalloc_node(size
, flags
| __GFP_ZERO
, node
);
231 void *lttng_kvmalloc(unsigned long size
, gfp_t flags
)
233 return lttng_kvmalloc_node(size
, flags
, NUMA_NO_NODE
);
237 void *lttng_kvzalloc(unsigned long size
, gfp_t flags
)
239 return lttng_kvzalloc_node(size
, flags
, NUMA_NO_NODE
);
243 void lttng_kvfree(const void *addr
)
245 if (is_vmalloc_addr(addr
)) {
253 #endif /* _LTTNG_WRAPPER_VMALLOC_H */