Commit | Line | Data |
---|---|---|
a90917c3 MD |
1 | #ifndef _LTTNG_WRAPPER_VMALLOC_H |
2 | #define _LTTNG_WRAPPER_VMALLOC_H | |
b13f3ebe | 3 | |
6d2a620c | 4 | /* |
886d51a3 | 5 | * wrapper/vmalloc.h |
6d2a620c MD |
6 | * |
7 | * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when | |
8 | * available, else we need to have a kernel that exports this function to GPL | |
9 | * modules. | |
10 | * | |
886d51a3 MD |
11 | * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
12 | * | |
13 | * This library is free software; you can redistribute it and/or | |
14 | * modify it under the terms of the GNU Lesser General Public | |
15 | * License as published by the Free Software Foundation; only | |
16 | * version 2.1 of the License. | |
17 | * | |
18 | * This library is distributed in the hope that it will be useful, | |
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
21 | * Lesser General Public License for more details. | |
22 | * | |
23 | * You should have received a copy of the GNU Lesser General Public | |
24 | * License along with this library; if not, write to the Free Software | |
25 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
6d2a620c MD |
26 | */ |
27 | ||
fd0cebd8 MJ |
28 | #include <linux/version.h> |
29 | #include <linux/vmalloc.h> | |
fd547276 | 30 | #include <linux/mm.h> |
fd0cebd8 | 31 | |
6d2a620c MD |
32 | #ifdef CONFIG_KALLSYMS |
33 | ||
34 | #include <linux/kallsyms.h> | |
5a2f5e92 | 35 | #include <wrapper/kallsyms.h> |
6d2a620c MD |
36 | |
37 | static inline | |
38 | void wrapper_vmalloc_sync_all(void) | |
39 | { | |
40 | void (*vmalloc_sync_all_sym)(void); | |
41 | ||
c539a324 | 42 | vmalloc_sync_all_sym = (void *) kallsyms_lookup_funcptr("vmalloc_sync_all"); |
6d2a620c MD |
43 | if (vmalloc_sync_all_sym) { |
44 | vmalloc_sync_all_sym(); | |
45 | } else { | |
46 | #ifdef CONFIG_X86 | |
47 | /* | |
48 | * Only x86 needs vmalloc_sync_all to make sure LTTng does not | |
49 | * trigger recursive page faults. | |
50 | */ | |
e36de50d MD |
51 | printk_once(KERN_WARNING "LTTng: vmalloc_sync_all symbol lookup failed.\n"); |
52 | printk_once(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n"); | |
6d2a620c MD |
53 | #endif |
54 | } | |
55 | } | |
56 | #else | |
57 | ||
6d2a620c MD |
58 | static inline |
59 | void wrapper_vmalloc_sync_all(void) | |
60 | { | |
61 | return vmalloc_sync_all(); | |
62 | } | |
63 | #endif | |
b13f3ebe | 64 | |
fd0cebd8 MJ |
65 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)) |
66 | static inline | |
67 | void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node) | |
68 | { | |
69 | void *ret; | |
70 | ||
71 | ret = kvmalloc_node(size, flags, node); | |
72 | if (is_vmalloc_addr(ret)) { | |
73 | /* | |
74 | * Make sure we don't trigger recursive page faults in the | |
75 | * tracing fast path. | |
76 | */ | |
77 | wrapper_vmalloc_sync_all(); | |
78 | } | |
79 | return ret; | |
80 | } | |
81 | ||
82 | static inline | |
83 | void *lttng_kvzalloc_node(unsigned long size, gfp_t flags, int node) | |
84 | { | |
85 | return lttng_kvmalloc_node(size, flags | __GFP_ZERO, node); | |
86 | } | |
87 | ||
88 | static inline | |
89 | void *lttng_kvmalloc(unsigned long size, gfp_t flags) | |
90 | { | |
91 | return lttng_kvmalloc_node(size, flags, NUMA_NO_NODE); | |
92 | } | |
93 | ||
94 | static inline | |
95 | void *lttng_kvzalloc(unsigned long size, gfp_t flags) | |
96 | { | |
97 | return lttng_kvzalloc_node(size, flags, NUMA_NO_NODE); | |
98 | } | |
99 | ||
100 | static inline | |
101 | void lttng_kvfree(const void *addr) | |
102 | { | |
103 | kvfree(addr); | |
104 | } | |
105 | ||
106 | #else | |
107 | ||
108 | #include <linux/slab.h> | |
fd0cebd8 MJ |
109 | |
110 | /* | |
111 | * kallsyms wrapper of __vmalloc_node with a fallback to kmalloc_node. | |
112 | */ | |
113 | static inline | |
114 | void *__lttng_vmalloc_node_fallback(unsigned long size, unsigned long align, | |
115 | gfp_t gfp_mask, pgprot_t prot, int node, void *caller) | |
116 | { | |
117 | void *ret; | |
118 | ||
119 | #ifdef CONFIG_KALLSYMS | |
120 | /* | |
121 | * If we have KALLSYMS, get * __vmalloc_node which is not exported. | |
122 | */ | |
123 | void *(*lttng__vmalloc_node)(unsigned long size, unsigned long align, | |
124 | gfp_t gfp_mask, pgprot_t prot, int node, void *caller); | |
125 | ||
126 | lttng__vmalloc_node = (void *) kallsyms_lookup_funcptr("__vmalloc_node"); | |
127 | ret = lttng__vmalloc_node(size, align, gfp_mask, prot, node, caller); | |
128 | #else | |
129 | /* | |
130 | * If we don't have KALLSYMS, fallback to kmalloc_node. | |
131 | */ | |
132 | ret = kmalloc_node(size, flags, node); | |
133 | #endif | |
134 | ||
135 | return ret; | |
136 | } | |
137 | ||
138 | /** | |
139 | * lttng_kvmalloc_node - attempt to allocate physically contiguous memory, but upon | |
140 | * failure, fall back to non-contiguous (vmalloc) allocation. | |
141 | * @size: size of the request. | |
142 | * @flags: gfp mask for the allocation - must be compatible with GFP_KERNEL. | |
143 | * | |
144 | * Uses kmalloc to get the memory but if the allocation fails then falls back | |
145 | * to the vmalloc allocator. Use lttng_kvfree to free the memory. | |
146 | * | |
147 | * Reclaim modifiers - __GFP_NORETRY, __GFP_REPEAT and __GFP_NOFAIL are not supported | |
148 | */ | |
149 | static inline | |
150 | void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node) | |
151 | { | |
152 | void *ret; | |
153 | ||
154 | /* | |
155 | * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables) | |
156 | * so the given set of flags has to be compatible. | |
157 | */ | |
158 | WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL); | |
159 | ||
160 | /* | |
161 | * If the allocation fits in a single page, do not fallback. | |
162 | */ | |
163 | if (size <= PAGE_SIZE) { | |
164 | return kmalloc_node(size, flags, node); | |
165 | } | |
166 | ||
167 | /* | |
168 | * Make sure that larger requests are not too disruptive - no OOM | |
169 | * killer and no allocation failure warnings as we have a fallback | |
170 | */ | |
171 | ret = kmalloc_node(size, flags | __GFP_NOWARN | __GFP_NORETRY, node); | |
172 | if (!ret) { | |
173 | if (node == NUMA_NO_NODE) { | |
174 | /* | |
175 | * If no node was specified, use __vmalloc which is | |
176 | * always exported. | |
177 | */ | |
178 | ret = __vmalloc(size, flags | __GFP_HIGHMEM, PAGE_KERNEL); | |
179 | } else { | |
180 | /* | |
181 | * Otherwise, we need to select a node but __vmalloc_node | |
182 | * is not exported, use this fallback wrapper which uses | |
183 | * kallsyms if available or falls back to kmalloc_node. | |
184 | */ | |
185 | ret = __lttng_vmalloc_node_fallback(size, 1, | |
186 | flags | __GFP_HIGHMEM, PAGE_KERNEL, node, | |
187 | __builtin_return_address(0)); | |
188 | } | |
189 | ||
190 | /* | |
191 | * Make sure we don't trigger recursive page faults in the | |
192 | * tracing fast path. | |
193 | */ | |
194 | wrapper_vmalloc_sync_all(); | |
195 | } | |
196 | return ret; | |
197 | } | |
198 | ||
199 | static inline | |
200 | void *lttng_kvzalloc_node(unsigned long size, gfp_t flags, int node) | |
201 | { | |
202 | return lttng_kvmalloc_node(size, flags | __GFP_ZERO, node); | |
203 | } | |
204 | ||
205 | static inline | |
206 | void *lttng_kvmalloc(unsigned long size, gfp_t flags) | |
207 | { | |
208 | return lttng_kvmalloc_node(size, flags, NUMA_NO_NODE); | |
209 | } | |
210 | ||
211 | static inline | |
212 | void *lttng_kvzalloc(unsigned long size, gfp_t flags) | |
213 | { | |
214 | return lttng_kvzalloc_node(size, flags, NUMA_NO_NODE); | |
215 | } | |
216 | ||
217 | static inline | |
218 | void lttng_kvfree(const void *addr) | |
219 | { | |
220 | if (is_vmalloc_addr(addr)) { | |
221 | vfree(addr); | |
222 | } else { | |
223 | kfree(addr); | |
224 | } | |
225 | } | |
226 | #endif | |
227 | ||
a90917c3 | 228 | #endif /* _LTTNG_WRAPPER_VMALLOC_H */ |