Commit | Line | Data |
---|---|---|
9f36eaed MJ |
1 | /* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1) |
2 | * | |
886d51a3 | 3 | * wrapper/vmalloc.h |
6d2a620c MD |
4 | * |
5 | * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when | |
6 | * available, else we need to have a kernel that exports this function to GPL | |
7 | * modules. | |
8 | * | |
886d51a3 | 9 | * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
6d2a620c MD |
10 | */ |
11 | ||
9f36eaed MJ |
12 | #ifndef _LTTNG_WRAPPER_VMALLOC_H |
13 | #define _LTTNG_WRAPPER_VMALLOC_H | |
14 | ||
48f5e0b5 MJ |
15 | #include <linux/version.h> |
16 | #include <linux/vmalloc.h> | |
01ab5113 | 17 | #include <linux/mm.h> |
48f5e0b5 | 18 | |
6d2a620c MD |
19 | #ifdef CONFIG_KALLSYMS |
20 | ||
21 | #include <linux/kallsyms.h> | |
5a2f5e92 | 22 | #include <wrapper/kallsyms.h> |
6d2a620c | 23 | |
23cfd7b5 | 24 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) |
ea53823c MD |
25 | |
26 | static inline | |
27 | void wrapper_vmalloc_sync_mappings(void) | |
28 | { | |
29 | void (*vmalloc_sync_mappings_sym)(void); | |
30 | ||
31 | vmalloc_sync_mappings_sym = (void *) kallsyms_lookup_funcptr("vmalloc_sync_mappings"); | |
32 | if (vmalloc_sync_mappings_sym) { | |
33 | vmalloc_sync_mappings_sym(); | |
34 | } else { | |
35 | #ifdef CONFIG_X86 | |
36 | /* | |
37 | * Only x86 needs vmalloc_sync_mappings to make sure LTTng does not | |
38 | * trigger recursive page faults. | |
39 | */ | |
40 | printk_once(KERN_WARNING "LTTng: vmalloc_sync_mappings symbol lookup failed.\n"); | |
41 | printk_once(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n"); | |
42 | #endif | |
43 | } | |
44 | } | |
45 | ||
23cfd7b5 | 46 | #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */ |
ea53823c MD |
47 | |
48 | /* | |
49 | * Map vmalloc_sync_mappings to vmalloc_sync_all() on kernels before 5.7. | |
50 | */ | |
6d2a620c | 51 | static inline |
ea53823c | 52 | void wrapper_vmalloc_sync_mappings(void) |
6d2a620c MD |
53 | { |
54 | void (*vmalloc_sync_all_sym)(void); | |
55 | ||
c539a324 | 56 | vmalloc_sync_all_sym = (void *) kallsyms_lookup_funcptr("vmalloc_sync_all"); |
6d2a620c MD |
57 | if (vmalloc_sync_all_sym) { |
58 | vmalloc_sync_all_sym(); | |
59 | } else { | |
60 | #ifdef CONFIG_X86 | |
61 | /* | |
62 | * Only x86 needs vmalloc_sync_all to make sure LTTng does not | |
63 | * trigger recursive page faults. | |
64 | */ | |
e36de50d MD |
65 | printk_once(KERN_WARNING "LTTng: vmalloc_sync_all symbol lookup failed.\n"); |
66 | printk_once(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n"); | |
6d2a620c MD |
67 | #endif |
68 | } | |
69 | } | |
ea53823c | 70 | |
23cfd7b5 | 71 | #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */ |
ea53823c | 72 | |
6d2a620c MD |
73 | #else |
74 | ||
23cfd7b5 | 75 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) |
ea53823c | 76 | |
6d2a620c | 77 | static inline |
ea53823c MD |
78 | void wrapper_vmalloc_sync_mappings(void) |
79 | { | |
80 | return vmalloc_sync_mappings(); | |
81 | } | |
82 | ||
23cfd7b5 | 83 | #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */ |
ea53823c MD |
84 | |
85 | static inline | |
86 | void wrapper_vmalloc_sync_mappings(void) | |
6d2a620c MD |
87 | { |
88 | return vmalloc_sync_all(); | |
89 | } | |
ea53823c | 90 | |
23cfd7b5 | 91 | #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */ |
ea53823c | 92 | |
6d2a620c | 93 | #endif |
b13f3ebe | 94 | |
48f5e0b5 MJ |
95 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)) |
96 | static inline | |
97 | void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node) | |
98 | { | |
99 | void *ret; | |
100 | ||
101 | ret = kvmalloc_node(size, flags, node); | |
102 | if (is_vmalloc_addr(ret)) { | |
103 | /* | |
104 | * Make sure we don't trigger recursive page faults in the | |
105 | * tracing fast path. | |
106 | */ | |
ea53823c | 107 | wrapper_vmalloc_sync_mappings(); |
48f5e0b5 MJ |
108 | } |
109 | return ret; | |
110 | } | |
111 | ||
112 | static inline | |
113 | void *lttng_kvzalloc_node(unsigned long size, gfp_t flags, int node) | |
114 | { | |
115 | return lttng_kvmalloc_node(size, flags | __GFP_ZERO, node); | |
116 | } | |
117 | ||
118 | static inline | |
119 | void *lttng_kvmalloc(unsigned long size, gfp_t flags) | |
120 | { | |
121 | return lttng_kvmalloc_node(size, flags, NUMA_NO_NODE); | |
122 | } | |
123 | ||
124 | static inline | |
125 | void *lttng_kvzalloc(unsigned long size, gfp_t flags) | |
126 | { | |
127 | return lttng_kvzalloc_node(size, flags, NUMA_NO_NODE); | |
128 | } | |
129 | ||
130 | static inline | |
131 | void lttng_kvfree(const void *addr) | |
132 | { | |
133 | kvfree(addr); | |
134 | } | |
135 | ||
136 | #else | |
137 | ||
138 | #include <linux/slab.h> | |
48f5e0b5 | 139 | |
20eb87c9 MD |
140 | static inline |
141 | void print_vmalloc_node_range_warning(void) | |
142 | { | |
143 | printk_once(KERN_WARNING "LTTng: __vmalloc_node_range symbol lookup failed.\n"); | |
144 | printk_once(KERN_WARNING "Tracer performance will be degraded on NUMA systems.\n"); | |
145 | printk_once(KERN_WARNING "Please rebuild your kernel with CONFIG_KALLSYMS enabled.\n"); | |
146 | } | |
147 | ||
48f5e0b5 MJ |
148 | /* |
149 | * kallsyms wrapper of __vmalloc_node with a fallback to kmalloc_node. | |
150 | */ | |
151 | static inline | |
20eb87c9 MD |
152 | void *__lttng_vmalloc_node_range(unsigned long size, unsigned long align, |
153 | unsigned long start, unsigned long end, gfp_t gfp_mask, | |
154 | pgprot_t prot, unsigned long vm_flags, int node, | |
155 | const void *caller) | |
48f5e0b5 | 156 | { |
48f5e0b5 MJ |
157 | #ifdef CONFIG_KALLSYMS |
158 | /* | |
20eb87c9 | 159 | * If we have KALLSYMS, get * __vmalloc_node_range which is not exported. |
48f5e0b5 | 160 | */ |
20eb87c9 MD |
161 | void *(*lttng__vmalloc_node_range)(unsigned long size, unsigned long align, |
162 | unsigned long start, unsigned long end, gfp_t gfp_mask, | |
163 | pgprot_t prot, unsigned long vm_flags, int node, | |
164 | const void *caller); | |
165 | ||
166 | lttng__vmalloc_node_range = (void *) kallsyms_lookup_funcptr("__vmalloc_node_range"); | |
167 | if (lttng__vmalloc_node_range) | |
168 | return lttng__vmalloc_node_range(size, align, start, end, gfp_mask, prot, | |
169 | vm_flags, node, caller); | |
48f5e0b5 | 170 | #endif |
20eb87c9 MD |
171 | if (node != NUMA_NO_NODE) |
172 | print_vmalloc_node_range_warning(); | |
173 | return __vmalloc(size, gfp_mask, prot); | |
48f5e0b5 MJ |
174 | } |
175 | ||
176 | /** | |
177 | * lttng_kvmalloc_node - attempt to allocate physically contiguous memory, but upon | |
178 | * failure, fall back to non-contiguous (vmalloc) allocation. | |
179 | * @size: size of the request. | |
180 | * @flags: gfp mask for the allocation - must be compatible with GFP_KERNEL. | |
181 | * | |
182 | * Uses kmalloc to get the memory but if the allocation fails then falls back | |
183 | * to the vmalloc allocator. Use lttng_kvfree to free the memory. | |
184 | * | |
185 | * Reclaim modifiers - __GFP_NORETRY, __GFP_REPEAT and __GFP_NOFAIL are not supported | |
186 | */ | |
187 | static inline | |
188 | void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node) | |
189 | { | |
190 | void *ret; | |
191 | ||
192 | /* | |
193 | * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables) | |
194 | * so the given set of flags has to be compatible. | |
195 | */ | |
196 | WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL); | |
197 | ||
198 | /* | |
199 | * If the allocation fits in a single page, do not fallback. | |
200 | */ | |
201 | if (size <= PAGE_SIZE) { | |
202 | return kmalloc_node(size, flags, node); | |
203 | } | |
204 | ||
205 | /* | |
206 | * Make sure that larger requests are not too disruptive - no OOM | |
207 | * killer and no allocation failure warnings as we have a fallback | |
208 | */ | |
209 | ret = kmalloc_node(size, flags | __GFP_NOWARN | __GFP_NORETRY, node); | |
210 | if (!ret) { | |
20eb87c9 MD |
211 | ret = __lttng_vmalloc_node_range(size, 1, |
212 | VMALLOC_START, VMALLOC_END, | |
213 | flags | __GFP_HIGHMEM, PAGE_KERNEL, 0, | |
214 | node, __builtin_return_address(0)); | |
48f5e0b5 MJ |
215 | /* |
216 | * Make sure we don't trigger recursive page faults in the | |
217 | * tracing fast path. | |
218 | */ | |
4d1817c1 | 219 | wrapper_vmalloc_sync_mappings(); |
48f5e0b5 MJ |
220 | } |
221 | return ret; | |
222 | } | |
223 | ||
224 | static inline | |
225 | void *lttng_kvzalloc_node(unsigned long size, gfp_t flags, int node) | |
226 | { | |
227 | return lttng_kvmalloc_node(size, flags | __GFP_ZERO, node); | |
228 | } | |
229 | ||
230 | static inline | |
231 | void *lttng_kvmalloc(unsigned long size, gfp_t flags) | |
232 | { | |
233 | return lttng_kvmalloc_node(size, flags, NUMA_NO_NODE); | |
234 | } | |
235 | ||
236 | static inline | |
237 | void *lttng_kvzalloc(unsigned long size, gfp_t flags) | |
238 | { | |
239 | return lttng_kvzalloc_node(size, flags, NUMA_NO_NODE); | |
240 | } | |
241 | ||
242 | static inline | |
243 | void lttng_kvfree(const void *addr) | |
244 | { | |
245 | if (is_vmalloc_addr(addr)) { | |
246 | vfree(addr); | |
247 | } else { | |
248 | kfree(addr); | |
249 | } | |
250 | } | |
251 | #endif | |
252 | ||
a90917c3 | 253 | #endif /* _LTTNG_WRAPPER_VMALLOC_H */ |