1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
5 * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when
6 * available, else we need to have a kernel that exports this function to GPL
9 * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
12 #ifndef _LTTNG_WRAPPER_VMALLOC_H
13 #define _LTTNG_WRAPPER_VMALLOC_H
15 #include <lttng/kernel-version.h>
16 #include <linux/vmalloc.h>
19 #ifdef CONFIG_KALLSYMS
21 #include <linux/kallsyms.h>
22 #include <wrapper/kallsyms.h>
23 #include <lttng/kernel-version.h>
25 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,8,0))
28 * wrapper_vmalloc_sync_mappings was removed in v5.8, the vmalloc mappings
29 * are now synchronized when they are created or torn down.
32 void wrapper_vmalloc_sync_mappings(void)
35 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0) \
36 || LTTNG_KERNEL_RANGE(5,5,12, 5,6,0) \
37 || LTTNG_KERNEL_RANGE(5,4,28, 5,5,0) \
38 || LTTNG_KERNEL_RANGE(5,2,37, 5,3,0) \
39 || LTTNG_KERNEL_RANGE(4,19,113, 4,20,0) \
40 || LTTNG_KERNEL_RANGE(4,14,175, 4,15,0) \
41 || LTTNG_KERNEL_RANGE(4,9,218, 4,10,0) \
42 || LTTNG_KERNEL_RANGE(4,4,218, 4,5,0) \
43 || LTTNG_UBUNTU_KERNEL_RANGE(4,15,18,97, 4,16,0,0) \
44 || LTTNG_UBUNTU_KERNEL_RANGE(5,0,21,48, 5,1,0,0) \
45 || LTTNG_UBUNTU_KERNEL_RANGE(5,3,18,52, 5,4,0,0) \
46 || LTTNG_RHEL_KERNEL_RANGE(4,18,0,240,0,0, 4,19,0,0,0,0))
49 void wrapper_vmalloc_sync_mappings(void)
51 void (*vmalloc_sync_mappings_sym
)(void);
53 vmalloc_sync_mappings_sym
= (void *) kallsyms_lookup_funcptr("vmalloc_sync_mappings");
54 if (vmalloc_sync_mappings_sym
) {
55 vmalloc_sync_mappings_sym();
59 * Only x86 needs vmalloc_sync_mappings to make sure LTTng does not
60 * trigger recursive page faults.
62 printk_once(KERN_WARNING
"LTTng: vmalloc_sync_mappings symbol lookup failed.\n");
63 printk_once(KERN_WARNING
"LTTng: Page fault handler and NMI tracing might trigger faults.\n");
69 * Canary function to check for 'vmalloc_sync_mappings()' at compile time.
71 * From 'include/linux/vmalloc.h':
73 * void vmalloc_sync_mappings(void);
76 void __canary__vmalloc_sync_mappings(void)
78 vmalloc_sync_mappings();
81 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0)) */
84 * Map vmalloc_sync_mappings to vmalloc_sync_all() on kernels before 5.6.
87 void wrapper_vmalloc_sync_mappings(void)
89 void (*vmalloc_sync_all_sym
)(void);
91 vmalloc_sync_all_sym
= (void *) kallsyms_lookup_funcptr("vmalloc_sync_all");
92 if (vmalloc_sync_all_sym
) {
93 vmalloc_sync_all_sym();
97 * Only x86 needs vmalloc_sync_all to make sure LTTng does not
98 * trigger recursive page faults.
100 printk_once(KERN_WARNING
"LTTng: vmalloc_sync_all symbol lookup failed.\n");
101 printk_once(KERN_WARNING
"LTTng: Page fault handler and NMI tracing might trigger faults.\n");
107 * Canary function to check for 'vmalloc_sync_all()' at compile time.
109 * From 'include/linux/vmalloc.h':
111 * void vmalloc_sync_all(void);
114 void __canary__vmalloc_sync_all(void)
119 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0)) */
121 #else /* CONFIG_KALLSYMS */
123 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,8,0))
126 * wrapper_vmalloc_sync_mappings was removed in v5.8, the vmalloc mappings
127 * are now synchronized when they are created or torn down.
130 void wrapper_vmalloc_sync_mappings(void)
133 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0) \
134 || LTTNG_KERNEL_RANGE(5,5,12, 5,6,0) \
135 || LTTNG_KERNEL_RANGE(5,4,28, 5,5,0) \
136 || LTTNG_KERNEL_RANGE(5,2,37, 5,3,0) \
137 || LTTNG_KERNEL_RANGE(4,19,113, 4,20,0) \
138 || LTTNG_KERNEL_RANGE(4,14,175, 4,15,0) \
139 || LTTNG_KERNEL_RANGE(4,9,218, 4,10,0) \
140 || LTTNG_KERNEL_RANGE(4,4,218, 4,5,0)) \
141 || LTTNG_UBUNTU_KERNEL_RANGE(4,15,18,97, 4,18,0,0) \
142 || LTTNG_UBUNTU_KERNEL_RANGE(5,0,21,48, 5,1,0,0) \
143 || LTTNG_UBUNTU_KERNEL_RANGE(5,3,18,52, 5,4,0,0)
146 void wrapper_vmalloc_sync_mappings(void)
148 return vmalloc_sync_mappings();
151 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0)) */
154 void wrapper_vmalloc_sync_mappings(void)
156 return vmalloc_sync_all();
159 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0)) */
163 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,12,0))
165 void *lttng_kvmalloc_node(unsigned long size
, gfp_t flags
, int node
)
169 ret
= kvmalloc_node(size
, flags
, node
);
170 if (is_vmalloc_addr(ret
)) {
172 * Make sure we don't trigger recursive page faults in the
175 wrapper_vmalloc_sync_mappings();
181 void *lttng_kvzalloc_node(unsigned long size
, gfp_t flags
, int node
)
183 return lttng_kvmalloc_node(size
, flags
| __GFP_ZERO
, node
);
187 void *lttng_kvmalloc(unsigned long size
, gfp_t flags
)
189 return lttng_kvmalloc_node(size
, flags
, NUMA_NO_NODE
);
193 void *lttng_kvzalloc(unsigned long size
, gfp_t flags
)
195 return lttng_kvzalloc_node(size
, flags
, NUMA_NO_NODE
);
199 void lttng_kvfree(const void *addr
)
206 #include <linux/slab.h>
209 void print_vmalloc_node_range_warning(void)
211 printk_once(KERN_WARNING
"LTTng: __vmalloc_node_range symbol lookup failed.\n");
212 printk_once(KERN_WARNING
"LTTng: Tracer performance will be degraded on NUMA systems.\n");
213 printk_once(KERN_WARNING
"LTTng: Please rebuild your kernel with CONFIG_KALLSYMS enabled.\n");
216 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,0,0))
219 * kallsyms wrapper of __vmalloc_node with a fallback to kmalloc_node.
222 void *__lttng_vmalloc_node_range(unsigned long size
, unsigned long align
,
223 unsigned long start
, unsigned long end
, gfp_t gfp_mask
,
224 pgprot_t prot
, unsigned long vm_flags
, int node
,
227 #ifdef CONFIG_KALLSYMS
229 * If we have KALLSYMS, get * __vmalloc_node_range which is not exported.
231 void *(*lttng__vmalloc_node_range
)(unsigned long size
, unsigned long align
,
232 unsigned long start
, unsigned long end
, gfp_t gfp_mask
,
233 pgprot_t prot
, unsigned long vm_flags
, int node
,
236 lttng__vmalloc_node_range
= (void *) kallsyms_lookup_funcptr("__vmalloc_node_range");
237 if (lttng__vmalloc_node_range
)
238 return lttng__vmalloc_node_range(size
, align
, start
, end
, gfp_mask
, prot
,
239 vm_flags
, node
, caller
);
241 if (node
!= NUMA_NO_NODE
)
242 print_vmalloc_node_range_warning();
243 return __vmalloc(size
, gfp_mask
, prot
);
247 * Canary function to check for '__vmalloc_node_range()' at compile time.
249 * From 'include/linux/vmalloc.h':
251 * extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
252 * unsigned long start, unsigned long end, gfp_t gfp_mask,
253 * pgprot_t prot, unsigned long vm_flags, int node,
254 * const void *caller);
257 void *__canary____lttng_vmalloc_node_range(unsigned long size
, unsigned long align
,
258 unsigned long start
, unsigned long end
, gfp_t gfp_mask
,
259 pgprot_t prot
, unsigned long vm_flags
, int node
,
262 return __vmalloc_node_range(size
, align
, start
, end
, gfp_mask
, prot
,
263 vm_flags
, node
, caller
);
266 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,6,0))
269 * kallsyms wrapper of __vmalloc_node with a fallback to kmalloc_node.
272 void *__lttng_vmalloc_node_range(unsigned long size
, unsigned long align
,
273 unsigned long start
, unsigned long end
, gfp_t gfp_mask
,
274 pgprot_t prot
, unsigned long vm_flags
, int node
,
277 #ifdef CONFIG_KALLSYMS
279 * If we have KALLSYMS, get * __vmalloc_node_range which is not exported.
281 void *(*lttng__vmalloc_node_range
)(unsigned long size
, unsigned long align
,
282 unsigned long start
, unsigned long end
, gfp_t gfp_mask
,
283 pgprot_t prot
, int node
, const void *caller
);
285 lttng__vmalloc_node_range
= (void *) kallsyms_lookup_funcptr("__vmalloc_node_range");
286 if (lttng__vmalloc_node_range
)
287 return lttng__vmalloc_node_range(size
, align
, start
, end
, gfp_mask
, prot
,
290 if (node
!= NUMA_NO_NODE
)
291 print_vmalloc_node_range_warning();
292 return __vmalloc(size
, gfp_mask
, prot
);
296 * Canary function to check for '__vmalloc_node_range()' at compile time.
298 * From 'include/linux/vmalloc.h':
300 * extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
301 * unsigned long start, unsigned long end, gfp_t gfp_mask,
302 * pgprot_t prot, unsigned long vm_flags, int node,
303 * const void *caller);
306 void *__canary____lttng_vmalloc_node_range(unsigned long size
, unsigned long align
,
307 unsigned long start
, unsigned long end
, gfp_t gfp_mask
,
308 pgprot_t prot
, int node
, const void *caller
)
310 return __vmalloc_node_range(size
, align
, start
, end
, gfp_mask
, prot
,
314 #else /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,6,0)) */
317 * kallsyms wrapper of __vmalloc_node with a fallback to kmalloc_node.
320 void *__lttng_vmalloc_node_range(unsigned long size
, unsigned long align
,
321 unsigned long start
, unsigned long end
, gfp_t gfp_mask
,
322 pgprot_t prot
, unsigned long vm_flags
, int node
,
325 #ifdef CONFIG_KALLSYMS
327 * If we have KALLSYMS, get * __vmalloc_node_range which is not exported.
329 void *(*lttng__vmalloc_node_range
)(unsigned long size
, unsigned long align
,
330 unsigned long start
, unsigned long end
, gfp_t gfp_mask
,
331 pgprot_t prot
, int node
, void *caller
);
333 lttng__vmalloc_node_range
= (void *) kallsyms_lookup_funcptr("__vmalloc_node_range");
334 if (lttng__vmalloc_node_range
)
335 return lttng__vmalloc_node_range(size
, align
, start
, end
, gfp_mask
, prot
,
338 if (node
!= NUMA_NO_NODE
)
339 print_vmalloc_node_range_warning();
340 return __vmalloc(size
, gfp_mask
, prot
);
344 * Canary function to check for '__vmalloc_node_range()' at compile time.
346 * From 'include/linux/vmalloc.h':
348 * extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
349 * unsigned long start, unsigned long end, gfp_t gfp_mask,
350 * pgprot_t prot, unsigned long vm_flags, int node,
354 void *__canary____lttng_vmalloc_node_range(unsigned long size
, unsigned long align
,
355 unsigned long start
, unsigned long end
, gfp_t gfp_mask
,
356 pgprot_t prot
, int node
, void *caller
)
358 return __vmalloc_node_range(size
, align
, start
, end
, gfp_mask
, prot
,
365 * lttng_kvmalloc_node - attempt to allocate physically contiguous memory, but upon
366 * failure, fall back to non-contiguous (vmalloc) allocation.
367 * @size: size of the request.
368 * @flags: gfp mask for the allocation - must be compatible with GFP_KERNEL.
370 * Uses kmalloc to get the memory but if the allocation fails then falls back
371 * to the vmalloc allocator. Use lttng_kvfree to free the memory.
373 * Reclaim modifiers - __GFP_NORETRY, __GFP_REPEAT and __GFP_NOFAIL are not supported
376 void *lttng_kvmalloc_node(unsigned long size
, gfp_t flags
, int node
)
381 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
382 * so the given set of flags has to be compatible.
384 WARN_ON_ONCE((flags
& GFP_KERNEL
) != GFP_KERNEL
);
387 * If the allocation fits in a single page, do not fallback.
389 if (size
<= PAGE_SIZE
) {
390 return kmalloc_node(size
, flags
, node
);
394 * Make sure that larger requests are not too disruptive - no OOM
395 * killer and no allocation failure warnings as we have a fallback
397 ret
= kmalloc_node(size
, flags
| __GFP_NOWARN
| __GFP_NORETRY
, node
);
399 ret
= __lttng_vmalloc_node_range(size
, 1,
400 VMALLOC_START
, VMALLOC_END
,
401 flags
| __GFP_HIGHMEM
, PAGE_KERNEL
, 0,
402 node
, __builtin_return_address(0));
404 * Make sure we don't trigger recursive page faults in the
407 wrapper_vmalloc_sync_mappings();
413 void *lttng_kvzalloc_node(unsigned long size
, gfp_t flags
, int node
)
415 return lttng_kvmalloc_node(size
, flags
| __GFP_ZERO
, node
);
419 void *lttng_kvmalloc(unsigned long size
, gfp_t flags
)
421 return lttng_kvmalloc_node(size
, flags
, NUMA_NO_NODE
);
425 void *lttng_kvzalloc(unsigned long size
, gfp_t flags
)
427 return lttng_kvzalloc_node(size
, flags
, NUMA_NO_NODE
);
431 void lttng_kvfree(const void *addr
)
433 if (is_vmalloc_addr(addr
)) {
441 #endif /* _LTTNG_WRAPPER_VMALLOC_H */