Commit | Line | Data |
---|---|---|
9f36eaed MJ |
1 | /* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1) |
2 | * | |
886d51a3 | 3 | * wrapper/vmalloc.h |
6d2a620c MD |
4 | * |
5 | * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when | |
6 | * available, else we need to have a kernel that exports this function to GPL | |
7 | * modules. | |
8 | * | |
886d51a3 | 9 | * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
6d2a620c MD |
10 | */ |
11 | ||
9f36eaed MJ |
12 | #ifndef _LTTNG_WRAPPER_VMALLOC_H |
13 | #define _LTTNG_WRAPPER_VMALLOC_H | |
14 | ||
48f5e0b5 MJ |
15 | #include <linux/version.h> |
16 | #include <linux/vmalloc.h> | |
01ab5113 | 17 | #include <linux/mm.h> |
48f5e0b5 | 18 | |
6d2a620c MD |
19 | #ifdef CONFIG_KALLSYMS |
20 | ||
21 | #include <linux/kallsyms.h> | |
5a2f5e92 | 22 | #include <wrapper/kallsyms.h> |
5939591f | 23 | #include <lttng-kernel-version.h> |
6d2a620c | 24 | |
1bf0964a MJ |
25 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,8,0)) |
26 | ||
27 | /* | |
28 | * wrapper_vmalloc_sync_mappings was removed in v5.8, the vmalloc mappings | |
29 | * are now synchronized when they are created or torn down. | |
30 | */ | |
31 | static inline | |
32 | void wrapper_vmalloc_sync_mappings(void) | |
33 | {} | |
34 | ||
35 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) \ | |
3dd99395 | 36 | || LTTNG_KERNEL_RANGE(5,5,12, 5,6,0) \ |
5939591f OP |
37 | || LTTNG_KERNEL_RANGE(5,4,28, 5,5,0) \ |
38 | || LTTNG_KERNEL_RANGE(5,2,37, 5,3,0) \ | |
39 | || LTTNG_KERNEL_RANGE(4,19,113, 4,20,0) \ | |
40 | || LTTNG_KERNEL_RANGE(4,14,175, 4,15,0) \ | |
41 | || LTTNG_KERNEL_RANGE(4,9,218, 4,10,0) \ | |
4ad90183 SB |
42 | || LTTNG_KERNEL_RANGE(4,4,218, 4,5,0)) \ |
43 | || LTTNG_UBUNTU_KERNEL_RANGE(4,15,18,97, 4,16,0,0) \ | |
44 | || LTTNG_UBUNTU_KERNEL_RANGE(5,0,21,48, 5,1,0,0) \ | |
45 | || LTTNG_UBUNTU_KERNEL_RANGE(5,3,18,52, 5,4,0,0) | |
da0fcb14 MD |
46 | |
47 | static inline | |
48 | void wrapper_vmalloc_sync_mappings(void) | |
49 | { | |
50 | void (*vmalloc_sync_mappings_sym)(void); | |
51 | ||
52 | vmalloc_sync_mappings_sym = (void *) kallsyms_lookup_funcptr("vmalloc_sync_mappings"); | |
53 | if (vmalloc_sync_mappings_sym) { | |
54 | vmalloc_sync_mappings_sym(); | |
55 | } else { | |
56 | #ifdef CONFIG_X86 | |
57 | /* | |
58 | * Only x86 needs vmalloc_sync_mappings to make sure LTTng does not | |
59 | * trigger recursive page faults. | |
60 | */ | |
61 | printk_once(KERN_WARNING "LTTng: vmalloc_sync_mappings symbol lookup failed.\n"); | |
62 | printk_once(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n"); | |
63 | #endif | |
64 | } | |
65 | } | |
66 | ||
9e52289b MJ |
67 | /* |
68 | * Canary function to check for 'vmalloc_sync_mappings()' at compile time. | |
69 | * | |
70 | * From 'include/linux/vmalloc.h': | |
71 | * | |
72 | * void vmalloc_sync_mappings(void); | |
73 | */ | |
74 | static inline | |
75 | void __canary__vmalloc_sync_mappings(void) | |
76 | { | |
77 | vmalloc_sync_mappings(); | |
78 | } | |
79 | ||
da356b32 | 80 | #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */ |
da0fcb14 MD |
81 | |
82 | /* | |
9e52289b | 83 | * Map vmalloc_sync_mappings to vmalloc_sync_all() on kernels before 5.6. |
da0fcb14 | 84 | */ |
6d2a620c | 85 | static inline |
da0fcb14 | 86 | void wrapper_vmalloc_sync_mappings(void) |
6d2a620c MD |
87 | { |
88 | void (*vmalloc_sync_all_sym)(void); | |
89 | ||
c539a324 | 90 | vmalloc_sync_all_sym = (void *) kallsyms_lookup_funcptr("vmalloc_sync_all"); |
6d2a620c MD |
91 | if (vmalloc_sync_all_sym) { |
92 | vmalloc_sync_all_sym(); | |
93 | } else { | |
94 | #ifdef CONFIG_X86 | |
95 | /* | |
96 | * Only x86 needs vmalloc_sync_all to make sure LTTng does not | |
97 | * trigger recursive page faults. | |
98 | */ | |
e36de50d MD |
99 | printk_once(KERN_WARNING "LTTng: vmalloc_sync_all symbol lookup failed.\n"); |
100 | printk_once(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n"); | |
6d2a620c MD |
101 | #endif |
102 | } | |
103 | } | |
da0fcb14 | 104 | |
9e52289b MJ |
105 | /* |
106 | * Canary function to check for 'vmalloc_sync_all()' at compile time. | |
107 | * | |
108 | * From 'include/linux/vmalloc.h': | |
109 | * | |
110 | * void vmalloc_sync_all(void); | |
111 | */ | |
112 | static inline | |
113 | void __canary__vmalloc_sync_all(void) | |
114 | { | |
115 | vmalloc_sync_all(); | |
116 | } | |
117 | ||
da356b32 | 118 | #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */ |
da0fcb14 | 119 | |
02f0c95c | 120 | #else /* CONFIG_KALLSYMS */ |
6d2a620c | 121 | |
02f0c95c MJ |
122 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,8,0)) |
123 | ||
124 | /* | |
125 | * wrapper_vmalloc_sync_mappings was removed in v5.8, the vmalloc mappings | |
126 | * are now synchronized when they are created or torn down. | |
127 | */ | |
128 | static inline | |
129 | void wrapper_vmalloc_sync_mappings(void) | |
130 | {} | |
131 | ||
132 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) \ | |
3dd99395 | 133 | || LTTNG_KERNEL_RANGE(5,5,12, 5,6,0) \ |
5939591f OP |
134 | || LTTNG_KERNEL_RANGE(5,4,28, 5,5,0) \ |
135 | || LTTNG_KERNEL_RANGE(5,2,37, 5,3,0) \ | |
136 | || LTTNG_KERNEL_RANGE(4,19,113, 4,20,0) \ | |
137 | || LTTNG_KERNEL_RANGE(4,14,175, 4,15,0) \ | |
138 | || LTTNG_KERNEL_RANGE(4,9,218, 4,10,0) \ | |
4ad90183 SB |
139 | || LTTNG_KERNEL_RANGE(4,4,218, 4,5,0)) \ |
140 | || LTTNG_UBUNTU_KERNEL_RANGE(4,15,18,97, 4,18,0,0) \ | |
141 | || LTTNG_UBUNTU_KERNEL_RANGE(5,0,21,48, 5,1,0,0) \ | |
142 | || LTTNG_UBUNTU_KERNEL_RANGE(5,3,18,52, 5,4,0,0) | |
da0fcb14 | 143 | |
6d2a620c | 144 | static inline |
da0fcb14 MD |
145 | void wrapper_vmalloc_sync_mappings(void) |
146 | { | |
147 | return vmalloc_sync_mappings(); | |
148 | } | |
149 | ||
da356b32 | 150 | #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */ |
da0fcb14 MD |
151 | |
152 | static inline | |
153 | void wrapper_vmalloc_sync_mappings(void) | |
6d2a620c MD |
154 | { |
155 | return vmalloc_sync_all(); | |
156 | } | |
da0fcb14 | 157 | |
da356b32 | 158 | #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */ |
da0fcb14 | 159 | |
6d2a620c | 160 | #endif |
b13f3ebe | 161 | |
48f5e0b5 MJ |
162 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)) |
163 | static inline | |
164 | void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node) | |
165 | { | |
166 | void *ret; | |
167 | ||
168 | ret = kvmalloc_node(size, flags, node); | |
169 | if (is_vmalloc_addr(ret)) { | |
170 | /* | |
171 | * Make sure we don't trigger recursive page faults in the | |
172 | * tracing fast path. | |
173 | */ | |
da0fcb14 | 174 | wrapper_vmalloc_sync_mappings(); |
48f5e0b5 MJ |
175 | } |
176 | return ret; | |
177 | } | |
178 | ||
179 | static inline | |
180 | void *lttng_kvzalloc_node(unsigned long size, gfp_t flags, int node) | |
181 | { | |
182 | return lttng_kvmalloc_node(size, flags | __GFP_ZERO, node); | |
183 | } | |
184 | ||
185 | static inline | |
186 | void *lttng_kvmalloc(unsigned long size, gfp_t flags) | |
187 | { | |
188 | return lttng_kvmalloc_node(size, flags, NUMA_NO_NODE); | |
189 | } | |
190 | ||
191 | static inline | |
192 | void *lttng_kvzalloc(unsigned long size, gfp_t flags) | |
193 | { | |
194 | return lttng_kvzalloc_node(size, flags, NUMA_NO_NODE); | |
195 | } | |
196 | ||
197 | static inline | |
198 | void lttng_kvfree(const void *addr) | |
199 | { | |
200 | kvfree(addr); | |
201 | } | |
202 | ||
203 | #else | |
204 | ||
205 | #include <linux/slab.h> | |
48f5e0b5 | 206 | |
20eb87c9 MD |
207 | static inline |
208 | void print_vmalloc_node_range_warning(void) | |
209 | { | |
210 | printk_once(KERN_WARNING "LTTng: __vmalloc_node_range symbol lookup failed.\n"); | |
211 | printk_once(KERN_WARNING "Tracer performance will be degraded on NUMA systems.\n"); | |
212 | printk_once(KERN_WARNING "Please rebuild your kernel with CONFIG_KALLSYMS enabled.\n"); | |
213 | } | |
214 | ||
24b1dd94 MJ |
215 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,0,0)) |
216 | ||
48f5e0b5 MJ |
217 | /* |
218 | * kallsyms wrapper of __vmalloc_node with a fallback to kmalloc_node. | |
219 | */ | |
220 | static inline | |
20eb87c9 MD |
221 | void *__lttng_vmalloc_node_range(unsigned long size, unsigned long align, |
222 | unsigned long start, unsigned long end, gfp_t gfp_mask, | |
223 | pgprot_t prot, unsigned long vm_flags, int node, | |
224 | const void *caller) | |
48f5e0b5 | 225 | { |
48f5e0b5 MJ |
226 | #ifdef CONFIG_KALLSYMS |
227 | /* | |
20eb87c9 | 228 | * If we have KALLSYMS, get * __vmalloc_node_range which is not exported. |
48f5e0b5 | 229 | */ |
20eb87c9 MD |
230 | void *(*lttng__vmalloc_node_range)(unsigned long size, unsigned long align, |
231 | unsigned long start, unsigned long end, gfp_t gfp_mask, | |
232 | pgprot_t prot, unsigned long vm_flags, int node, | |
233 | const void *caller); | |
234 | ||
235 | lttng__vmalloc_node_range = (void *) kallsyms_lookup_funcptr("__vmalloc_node_range"); | |
236 | if (lttng__vmalloc_node_range) | |
237 | return lttng__vmalloc_node_range(size, align, start, end, gfp_mask, prot, | |
238 | vm_flags, node, caller); | |
48f5e0b5 | 239 | #endif |
20eb87c9 MD |
240 | if (node != NUMA_NO_NODE) |
241 | print_vmalloc_node_range_warning(); | |
242 | return __vmalloc(size, gfp_mask, prot); | |
48f5e0b5 | 243 | } |
9e52289b MJ |
244 | |
245 | /* | |
246 | * Canary function to check for '__vmalloc_node_range()' at compile time. | |
247 | * | |
248 | * From 'include/linux/vmalloc.h': | |
249 | * | |
250 | * extern void *__vmalloc_node_range(unsigned long size, unsigned long align, | |
251 | * unsigned long start, unsigned long end, gfp_t gfp_mask, | |
252 | * pgprot_t prot, unsigned long vm_flags, int node, | |
253 | * const void *caller); | |
254 | */ | |
255 | static inline | |
256 | void *__canary____lttng_vmalloc_node_range(unsigned long size, unsigned long align, | |
257 | unsigned long start, unsigned long end, gfp_t gfp_mask, | |
258 | pgprot_t prot, unsigned long vm_flags, int node, | |
259 | const void *caller) | |
260 | { | |
261 | return __vmalloc_node_range(size, align, start, end, gfp_mask, prot, | |
262 | vm_flags, node, caller); | |
263 | } | |
48f5e0b5 | 264 | |
e1c79262 | 265 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)) |
24b1dd94 MJ |
266 | |
267 | /* | |
268 | * kallsyms wrapper of __vmalloc_node with a fallback to kmalloc_node. | |
269 | */ | |
270 | static inline | |
271 | void *__lttng_vmalloc_node_range(unsigned long size, unsigned long align, | |
272 | unsigned long start, unsigned long end, gfp_t gfp_mask, | |
273 | pgprot_t prot, unsigned long vm_flags, int node, | |
274 | const void *caller) | |
275 | { | |
276 | #ifdef CONFIG_KALLSYMS | |
277 | /* | |
278 | * If we have KALLSYMS, get * __vmalloc_node_range which is not exported. | |
279 | */ | |
280 | void *(*lttng__vmalloc_node_range)(unsigned long size, unsigned long align, | |
281 | unsigned long start, unsigned long end, gfp_t gfp_mask, | |
282 | pgprot_t prot, int node, const void *caller); | |
283 | ||
284 | lttng__vmalloc_node_range = (void *) kallsyms_lookup_funcptr("__vmalloc_node_range"); | |
285 | if (lttng__vmalloc_node_range) | |
286 | return lttng__vmalloc_node_range(size, align, start, end, gfp_mask, prot, | |
287 | node, caller); | |
288 | #endif | |
289 | if (node != NUMA_NO_NODE) | |
290 | print_vmalloc_node_range_warning(); | |
291 | return __vmalloc(size, gfp_mask, prot); | |
292 | } | |
293 | ||
294 | /* | |
295 | * Canary function to check for '__vmalloc_node_range()' at compile time. | |
296 | * | |
297 | * From 'include/linux/vmalloc.h': | |
298 | * | |
299 | * extern void *__vmalloc_node_range(unsigned long size, unsigned long align, | |
300 | * unsigned long start, unsigned long end, gfp_t gfp_mask, | |
301 | * pgprot_t prot, unsigned long vm_flags, int node, | |
302 | * const void *caller); | |
303 | */ | |
304 | static inline | |
305 | void *__canary____lttng_vmalloc_node_range(unsigned long size, unsigned long align, | |
306 | unsigned long start, unsigned long end, gfp_t gfp_mask, | |
307 | pgprot_t prot, int node, const void *caller) | |
308 | { | |
309 | return __vmalloc_node_range(size, align, start, end, gfp_mask, prot, | |
310 | node, caller); | |
311 | } | |
312 | ||
4b1927af | 313 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38)) |
e1c79262 MJ |
314 | |
315 | /* | |
316 | * kallsyms wrapper of __vmalloc_node with a fallback to kmalloc_node. | |
317 | */ | |
318 | static inline | |
319 | void *__lttng_vmalloc_node_range(unsigned long size, unsigned long align, | |
320 | unsigned long start, unsigned long end, gfp_t gfp_mask, | |
321 | pgprot_t prot, unsigned long vm_flags, int node, | |
322 | void *caller) | |
323 | { | |
324 | #ifdef CONFIG_KALLSYMS | |
325 | /* | |
326 | * If we have KALLSYMS, get * __vmalloc_node_range which is not exported. | |
327 | */ | |
328 | void *(*lttng__vmalloc_node_range)(unsigned long size, unsigned long align, | |
329 | unsigned long start, unsigned long end, gfp_t gfp_mask, | |
330 | pgprot_t prot, int node, void *caller); | |
331 | ||
332 | lttng__vmalloc_node_range = (void *) kallsyms_lookup_funcptr("__vmalloc_node_range"); | |
333 | if (lttng__vmalloc_node_range) | |
334 | return lttng__vmalloc_node_range(size, align, start, end, gfp_mask, prot, | |
335 | node, caller); | |
336 | #endif | |
337 | if (node != NUMA_NO_NODE) | |
338 | print_vmalloc_node_range_warning(); | |
339 | return __vmalloc(size, gfp_mask, prot); | |
340 | } | |
341 | ||
342 | /* | |
343 | * Canary function to check for '__vmalloc_node_range()' at compile time. | |
344 | * | |
345 | * From 'include/linux/vmalloc.h': | |
346 | * | |
347 | * extern void *__vmalloc_node_range(unsigned long size, unsigned long align, | |
348 | * unsigned long start, unsigned long end, gfp_t gfp_mask, | |
349 | * pgprot_t prot, unsigned long vm_flags, int node, | |
350 | * void *caller); | |
351 | */ | |
352 | static inline | |
353 | void *__canary____lttng_vmalloc_node_range(unsigned long size, unsigned long align, | |
354 | unsigned long start, unsigned long end, gfp_t gfp_mask, | |
355 | pgprot_t prot, int node, void *caller) | |
356 | { | |
357 | return __vmalloc_node_range(size, align, start, end, gfp_mask, prot, | |
358 | node, caller); | |
359 | } | |
360 | ||
4b1927af MJ |
361 | #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38)) */ |
362 | ||
363 | /* | |
364 | * Basic fallback for kernel prior to 2.6.38 without __vmalloc_node_range() | |
365 | */ | |
366 | static inline | |
367 | void *__lttng_vmalloc_node_range(unsigned long size, unsigned long align, | |
368 | unsigned long start, unsigned long end, gfp_t gfp_mask, | |
369 | pgprot_t prot, unsigned long vm_flags, int node, | |
370 | void *caller) | |
371 | { | |
372 | return __vmalloc(size, gfp_mask, prot); | |
373 | } | |
374 | ||
24b1dd94 MJ |
375 | #endif |
376 | ||
48f5e0b5 MJ |
377 | /** |
378 | * lttng_kvmalloc_node - attempt to allocate physically contiguous memory, but upon | |
379 | * failure, fall back to non-contiguous (vmalloc) allocation. | |
380 | * @size: size of the request. | |
381 | * @flags: gfp mask for the allocation - must be compatible with GFP_KERNEL. | |
382 | * | |
383 | * Uses kmalloc to get the memory but if the allocation fails then falls back | |
384 | * to the vmalloc allocator. Use lttng_kvfree to free the memory. | |
385 | * | |
386 | * Reclaim modifiers - __GFP_NORETRY, __GFP_REPEAT and __GFP_NOFAIL are not supported | |
387 | */ | |
388 | static inline | |
389 | void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node) | |
390 | { | |
391 | void *ret; | |
392 | ||
393 | /* | |
394 | * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables) | |
395 | * so the given set of flags has to be compatible. | |
396 | */ | |
397 | WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL); | |
398 | ||
399 | /* | |
400 | * If the allocation fits in a single page, do not fallback. | |
401 | */ | |
402 | if (size <= PAGE_SIZE) { | |
403 | return kmalloc_node(size, flags, node); | |
404 | } | |
405 | ||
406 | /* | |
407 | * Make sure that larger requests are not too disruptive - no OOM | |
408 | * killer and no allocation failure warnings as we have a fallback | |
409 | */ | |
410 | ret = kmalloc_node(size, flags | __GFP_NOWARN | __GFP_NORETRY, node); | |
411 | if (!ret) { | |
20eb87c9 MD |
412 | ret = __lttng_vmalloc_node_range(size, 1, |
413 | VMALLOC_START, VMALLOC_END, | |
414 | flags | __GFP_HIGHMEM, PAGE_KERNEL, 0, | |
415 | node, __builtin_return_address(0)); | |
48f5e0b5 MJ |
416 | /* |
417 | * Make sure we don't trigger recursive page faults in the | |
418 | * tracing fast path. | |
419 | */ | |
c1ba644f | 420 | wrapper_vmalloc_sync_mappings(); |
48f5e0b5 MJ |
421 | } |
422 | return ret; | |
423 | } | |
424 | ||
425 | static inline | |
426 | void *lttng_kvzalloc_node(unsigned long size, gfp_t flags, int node) | |
427 | { | |
428 | return lttng_kvmalloc_node(size, flags | __GFP_ZERO, node); | |
429 | } | |
430 | ||
431 | static inline | |
432 | void *lttng_kvmalloc(unsigned long size, gfp_t flags) | |
433 | { | |
434 | return lttng_kvmalloc_node(size, flags, NUMA_NO_NODE); | |
435 | } | |
436 | ||
437 | static inline | |
438 | void *lttng_kvzalloc(unsigned long size, gfp_t flags) | |
439 | { | |
440 | return lttng_kvzalloc_node(size, flags, NUMA_NO_NODE); | |
441 | } | |
442 | ||
443 | static inline | |
444 | void lttng_kvfree(const void *addr) | |
445 | { | |
446 | if (is_vmalloc_addr(addr)) { | |
447 | vfree(addr); | |
448 | } else { | |
449 | kfree(addr); | |
450 | } | |
451 | } | |
452 | #endif | |
453 | ||
a90917c3 | 454 | #endif /* _LTTNG_WRAPPER_VMALLOC_H */ |