Commit | Line | Data |
---|---|---|
9f36eaed MJ |
1 | /* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1) |
2 | * | |
886d51a3 | 3 | * wrapper/vmalloc.h |
6d2a620c MD |
4 | * |
5 | * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when | |
6 | * available, else we need to have a kernel that exports this function to GPL | |
7 | * modules. | |
8 | * | |
886d51a3 | 9 | * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
6d2a620c MD |
10 | */ |
11 | ||
9f36eaed MJ |
12 | #ifndef _LTTNG_WRAPPER_VMALLOC_H |
13 | #define _LTTNG_WRAPPER_VMALLOC_H | |
14 | ||
48f5e0b5 MJ |
15 | #include <linux/version.h> |
16 | #include <linux/vmalloc.h> | |
01ab5113 | 17 | #include <linux/mm.h> |
48f5e0b5 | 18 | |
6d2a620c MD |
19 | #ifdef CONFIG_KALLSYMS |
20 | ||
21 | #include <linux/kallsyms.h> | |
5a2f5e92 | 22 | #include <wrapper/kallsyms.h> |
5939591f | 23 | #include <lttng-kernel-version.h> |
6d2a620c | 24 | |
5939591f OP |
25 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) \ |
26 | || LTTNG_KERNEL_RANGE(5,4,28, 5,5,0) \ | |
27 | || LTTNG_KERNEL_RANGE(5,2,37, 5,3,0) \ | |
28 | || LTTNG_KERNEL_RANGE(4,19,113, 4,20,0) \ | |
29 | || LTTNG_KERNEL_RANGE(4,14,175, 4,15,0) \ | |
30 | || LTTNG_KERNEL_RANGE(4,9,218, 4,10,0) \ | |
31 | || LTTNG_KERNEL_RANGE(4,4,218, 4,5,0)) | |
da0fcb14 MD |
32 | |
33 | static inline | |
34 | void wrapper_vmalloc_sync_mappings(void) | |
35 | { | |
36 | void (*vmalloc_sync_mappings_sym)(void); | |
37 | ||
38 | vmalloc_sync_mappings_sym = (void *) kallsyms_lookup_funcptr("vmalloc_sync_mappings"); | |
39 | if (vmalloc_sync_mappings_sym) { | |
40 | vmalloc_sync_mappings_sym(); | |
41 | } else { | |
42 | #ifdef CONFIG_X86 | |
43 | /* | |
44 | * Only x86 needs vmalloc_sync_mappings to make sure LTTng does not | |
45 | * trigger recursive page faults. | |
46 | */ | |
47 | printk_once(KERN_WARNING "LTTng: vmalloc_sync_mappings symbol lookup failed.\n"); | |
48 | printk_once(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n"); | |
49 | #endif | |
50 | } | |
51 | } | |
52 | ||
da356b32 | 53 | #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */ |
da0fcb14 MD |
54 | |
55 | /* | |
56 | * Map vmalloc_sync_mappings to vmalloc_sync_all() on kernels before 5.7. | |
57 | */ | |
6d2a620c | 58 | static inline |
da0fcb14 | 59 | void wrapper_vmalloc_sync_mappings(void) |
6d2a620c MD |
60 | { |
61 | void (*vmalloc_sync_all_sym)(void); | |
62 | ||
c539a324 | 63 | vmalloc_sync_all_sym = (void *) kallsyms_lookup_funcptr("vmalloc_sync_all"); |
6d2a620c MD |
64 | if (vmalloc_sync_all_sym) { |
65 | vmalloc_sync_all_sym(); | |
66 | } else { | |
67 | #ifdef CONFIG_X86 | |
68 | /* | |
69 | * Only x86 needs vmalloc_sync_all to make sure LTTng does not | |
70 | * trigger recursive page faults. | |
71 | */ | |
e36de50d MD |
72 | printk_once(KERN_WARNING "LTTng: vmalloc_sync_all symbol lookup failed.\n"); |
73 | printk_once(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n"); | |
6d2a620c MD |
74 | #endif |
75 | } | |
76 | } | |
da0fcb14 | 77 | |
da356b32 | 78 | #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */ |
da0fcb14 | 79 | |
6d2a620c MD |
80 | #else |
81 | ||
5939591f OP |
82 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) \ |
83 | || LTTNG_KERNEL_RANGE(5,4,28, 5,5,0) \ | |
84 | || LTTNG_KERNEL_RANGE(5,2,37, 5,3,0) \ | |
85 | || LTTNG_KERNEL_RANGE(4,19,113, 4,20,0) \ | |
86 | || LTTNG_KERNEL_RANGE(4,14,175, 4,15,0) \ | |
87 | || LTTNG_KERNEL_RANGE(4,9,218, 4,10,0) \ | |
88 | || LTTNG_KERNEL_RANGE(4,4,218, 4,5,0)) | |
da0fcb14 | 89 | |
6d2a620c | 90 | static inline |
da0fcb14 MD |
91 | void wrapper_vmalloc_sync_mappings(void) |
92 | { | |
93 | return vmalloc_sync_mappings(); | |
94 | } | |
95 | ||
da356b32 | 96 | #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */ |
da0fcb14 MD |
97 | |
98 | static inline | |
99 | void wrapper_vmalloc_sync_mappings(void) | |
6d2a620c MD |
100 | { |
101 | return vmalloc_sync_all(); | |
102 | } | |
da0fcb14 | 103 | |
da356b32 | 104 | #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */ |
da0fcb14 | 105 | |
6d2a620c | 106 | #endif |
b13f3ebe | 107 | |
48f5e0b5 MJ |
108 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)) |
109 | static inline | |
110 | void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node) | |
111 | { | |
112 | void *ret; | |
113 | ||
114 | ret = kvmalloc_node(size, flags, node); | |
115 | if (is_vmalloc_addr(ret)) { | |
116 | /* | |
117 | * Make sure we don't trigger recursive page faults in the | |
118 | * tracing fast path. | |
119 | */ | |
da0fcb14 | 120 | wrapper_vmalloc_sync_mappings(); |
48f5e0b5 MJ |
121 | } |
122 | return ret; | |
123 | } | |
124 | ||
125 | static inline | |
126 | void *lttng_kvzalloc_node(unsigned long size, gfp_t flags, int node) | |
127 | { | |
128 | return lttng_kvmalloc_node(size, flags | __GFP_ZERO, node); | |
129 | } | |
130 | ||
131 | static inline | |
132 | void *lttng_kvmalloc(unsigned long size, gfp_t flags) | |
133 | { | |
134 | return lttng_kvmalloc_node(size, flags, NUMA_NO_NODE); | |
135 | } | |
136 | ||
137 | static inline | |
138 | void *lttng_kvzalloc(unsigned long size, gfp_t flags) | |
139 | { | |
140 | return lttng_kvzalloc_node(size, flags, NUMA_NO_NODE); | |
141 | } | |
142 | ||
143 | static inline | |
144 | void lttng_kvfree(const void *addr) | |
145 | { | |
146 | kvfree(addr); | |
147 | } | |
148 | ||
149 | #else | |
150 | ||
151 | #include <linux/slab.h> | |
48f5e0b5 | 152 | |
20eb87c9 MD |
153 | static inline |
154 | void print_vmalloc_node_range_warning(void) | |
155 | { | |
156 | printk_once(KERN_WARNING "LTTng: __vmalloc_node_range symbol lookup failed.\n"); | |
157 | printk_once(KERN_WARNING "Tracer performance will be degraded on NUMA systems.\n"); | |
158 | printk_once(KERN_WARNING "Please rebuild your kernel with CONFIG_KALLSYMS enabled.\n"); | |
159 | } | |
160 | ||
48f5e0b5 MJ |
161 | /* |
162 | * kallsyms wrapper of __vmalloc_node with a fallback to kmalloc_node. | |
163 | */ | |
164 | static inline | |
20eb87c9 MD |
165 | void *__lttng_vmalloc_node_range(unsigned long size, unsigned long align, |
166 | unsigned long start, unsigned long end, gfp_t gfp_mask, | |
167 | pgprot_t prot, unsigned long vm_flags, int node, | |
168 | const void *caller) | |
48f5e0b5 | 169 | { |
48f5e0b5 MJ |
170 | #ifdef CONFIG_KALLSYMS |
171 | /* | |
20eb87c9 | 172 | * If we have KALLSYMS, get * __vmalloc_node_range which is not exported. |
48f5e0b5 | 173 | */ |
20eb87c9 MD |
174 | void *(*lttng__vmalloc_node_range)(unsigned long size, unsigned long align, |
175 | unsigned long start, unsigned long end, gfp_t gfp_mask, | |
176 | pgprot_t prot, unsigned long vm_flags, int node, | |
177 | const void *caller); | |
178 | ||
179 | lttng__vmalloc_node_range = (void *) kallsyms_lookup_funcptr("__vmalloc_node_range"); | |
180 | if (lttng__vmalloc_node_range) | |
181 | return lttng__vmalloc_node_range(size, align, start, end, gfp_mask, prot, | |
182 | vm_flags, node, caller); | |
48f5e0b5 | 183 | #endif |
20eb87c9 MD |
184 | if (node != NUMA_NO_NODE) |
185 | print_vmalloc_node_range_warning(); | |
186 | return __vmalloc(size, gfp_mask, prot); | |
48f5e0b5 MJ |
187 | } |
188 | ||
189 | /** | |
190 | * lttng_kvmalloc_node - attempt to allocate physically contiguous memory, but upon | |
191 | * failure, fall back to non-contiguous (vmalloc) allocation. | |
192 | * @size: size of the request. | |
193 | * @flags: gfp mask for the allocation - must be compatible with GFP_KERNEL. | |
194 | * | |
195 | * Uses kmalloc to get the memory but if the allocation fails then falls back | |
196 | * to the vmalloc allocator. Use lttng_kvfree to free the memory. | |
197 | * | |
198 | * Reclaim modifiers - __GFP_NORETRY, __GFP_REPEAT and __GFP_NOFAIL are not supported | |
199 | */ | |
200 | static inline | |
201 | void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node) | |
202 | { | |
203 | void *ret; | |
204 | ||
205 | /* | |
206 | * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables) | |
207 | * so the given set of flags has to be compatible. | |
208 | */ | |
209 | WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL); | |
210 | ||
211 | /* | |
212 | * If the allocation fits in a single page, do not fallback. | |
213 | */ | |
214 | if (size <= PAGE_SIZE) { | |
215 | return kmalloc_node(size, flags, node); | |
216 | } | |
217 | ||
218 | /* | |
219 | * Make sure that larger requests are not too disruptive - no OOM | |
220 | * killer and no allocation failure warnings as we have a fallback | |
221 | */ | |
222 | ret = kmalloc_node(size, flags | __GFP_NOWARN | __GFP_NORETRY, node); | |
223 | if (!ret) { | |
20eb87c9 MD |
224 | ret = __lttng_vmalloc_node_range(size, 1, |
225 | VMALLOC_START, VMALLOC_END, | |
226 | flags | __GFP_HIGHMEM, PAGE_KERNEL, 0, | |
227 | node, __builtin_return_address(0)); | |
48f5e0b5 MJ |
228 | /* |
229 | * Make sure we don't trigger recursive page faults in the | |
230 | * tracing fast path. | |
231 | */ | |
c1ba644f | 232 | wrapper_vmalloc_sync_mappings(); |
48f5e0b5 MJ |
233 | } |
234 | return ret; | |
235 | } | |
236 | ||
237 | static inline | |
238 | void *lttng_kvzalloc_node(unsigned long size, gfp_t flags, int node) | |
239 | { | |
240 | return lttng_kvmalloc_node(size, flags | __GFP_ZERO, node); | |
241 | } | |
242 | ||
243 | static inline | |
244 | void *lttng_kvmalloc(unsigned long size, gfp_t flags) | |
245 | { | |
246 | return lttng_kvmalloc_node(size, flags, NUMA_NO_NODE); | |
247 | } | |
248 | ||
249 | static inline | |
250 | void *lttng_kvzalloc(unsigned long size, gfp_t flags) | |
251 | { | |
252 | return lttng_kvzalloc_node(size, flags, NUMA_NO_NODE); | |
253 | } | |
254 | ||
255 | static inline | |
256 | void lttng_kvfree(const void *addr) | |
257 | { | |
258 | if (is_vmalloc_addr(addr)) { | |
259 | vfree(addr); | |
260 | } else { | |
261 | kfree(addr); | |
262 | } | |
263 | } | |
264 | #endif | |
265 | ||
a90917c3 | 266 | #endif /* _LTTNG_WRAPPER_VMALLOC_H */ |