| 1 | /* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1) |
| 2 | * |
| 3 | * wrapper/vmalloc.h |
| 4 | * |
| 5 | * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when |
| 6 | * available, else we need to have a kernel that exports this function to GPL |
| 7 | * modules. |
| 8 | * |
| 9 | * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
| 10 | */ |
| 11 | |
| 12 | #ifndef _LTTNG_WRAPPER_VMALLOC_H |
| 13 | #define _LTTNG_WRAPPER_VMALLOC_H |
| 14 | |
| 15 | #include <lttng-kernel-version.h> |
| 16 | #include <linux/vmalloc.h> |
| 17 | #include <linux/mm.h> |
| 18 | |
| 19 | #ifdef CONFIG_KALLSYMS |
| 20 | |
| 21 | #include <linux/kallsyms.h> |
| 22 | #include <wrapper/kallsyms.h> |
| 23 | #include <lttng-kernel-version.h> |
| 24 | |
| 25 | #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,8,0)) |
| 26 | |
| 27 | /* |
| 28 | * wrapper_vmalloc_sync_mappings was removed in v5.8, the vmalloc mappings |
| 29 | * are now synchronized when they are created or torn down. |
| 30 | */ |
| 31 | static inline |
| 32 | void wrapper_vmalloc_sync_mappings(void) |
| 33 | {} |
| 34 | |
| 35 | #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0) \ |
| 36 | || LTTNG_KERNEL_RANGE(5,5,12, 5,6,0) \ |
| 37 | || LTTNG_KERNEL_RANGE(5,4,28, 5,5,0) \ |
| 38 | || LTTNG_KERNEL_RANGE(5,2,37, 5,3,0) \ |
| 39 | || LTTNG_KERNEL_RANGE(4,19,113, 4,20,0) \ |
| 40 | || LTTNG_KERNEL_RANGE(4,14,175, 4,15,0) \ |
| 41 | || LTTNG_KERNEL_RANGE(4,9,218, 4,10,0) \ |
| 42 | || LTTNG_KERNEL_RANGE(4,4,218, 4,5,0)) \ |
| 43 | || LTTNG_UBUNTU_KERNEL_RANGE(4,15,18,97, 4,16,0,0) \ |
| 44 | || LTTNG_UBUNTU_KERNEL_RANGE(5,0,21,48, 5,1,0,0) \ |
| 45 | || LTTNG_UBUNTU_KERNEL_RANGE(5,3,18,52, 5,4,0,0) |
| 46 | |
| 47 | static inline |
| 48 | void wrapper_vmalloc_sync_mappings(void) |
| 49 | { |
| 50 | void (*vmalloc_sync_mappings_sym)(void); |
| 51 | |
| 52 | vmalloc_sync_mappings_sym = (void *) kallsyms_lookup_funcptr("vmalloc_sync_mappings"); |
| 53 | if (vmalloc_sync_mappings_sym) { |
| 54 | vmalloc_sync_mappings_sym(); |
| 55 | } else { |
| 56 | #ifdef CONFIG_X86 |
| 57 | /* |
| 58 | * Only x86 needs vmalloc_sync_mappings to make sure LTTng does not |
| 59 | * trigger recursive page faults. |
| 60 | */ |
| 61 | printk_once(KERN_WARNING "LTTng: vmalloc_sync_mappings symbol lookup failed.\n"); |
| 62 | printk_once(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n"); |
| 63 | #endif |
| 64 | } |
| 65 | } |
| 66 | |
| 67 | /* |
| 68 | * Canary function to check for 'vmalloc_sync_mappings()' at compile time. |
| 69 | * |
| 70 | * From 'include/linux/vmalloc.h': |
| 71 | * |
| 72 | * void vmalloc_sync_mappings(void); |
| 73 | */ |
| 74 | static inline |
| 75 | void __canary__vmalloc_sync_mappings(void) |
| 76 | { |
| 77 | vmalloc_sync_mappings(); |
| 78 | } |
| 79 | |
| 80 | #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0)) */ |
| 81 | |
| 82 | /* |
| 83 | * Map vmalloc_sync_mappings to vmalloc_sync_all() on kernels before 5.6. |
| 84 | */ |
| 85 | static inline |
| 86 | void wrapper_vmalloc_sync_mappings(void) |
| 87 | { |
| 88 | void (*vmalloc_sync_all_sym)(void); |
| 89 | |
| 90 | vmalloc_sync_all_sym = (void *) kallsyms_lookup_funcptr("vmalloc_sync_all"); |
| 91 | if (vmalloc_sync_all_sym) { |
| 92 | vmalloc_sync_all_sym(); |
| 93 | } else { |
| 94 | #ifdef CONFIG_X86 |
| 95 | /* |
| 96 | * Only x86 needs vmalloc_sync_all to make sure LTTng does not |
| 97 | * trigger recursive page faults. |
| 98 | */ |
| 99 | printk_once(KERN_WARNING "LTTng: vmalloc_sync_all symbol lookup failed.\n"); |
| 100 | printk_once(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n"); |
| 101 | #endif |
| 102 | } |
| 103 | } |
| 104 | |
| 105 | /* |
| 106 | * Canary function to check for 'vmalloc_sync_all()' at compile time. |
| 107 | * |
| 108 | * From 'include/linux/vmalloc.h': |
| 109 | * |
| 110 | * void vmalloc_sync_all(void); |
| 111 | */ |
| 112 | static inline |
| 113 | void __canary__vmalloc_sync_all(void) |
| 114 | { |
| 115 | vmalloc_sync_all(); |
| 116 | } |
| 117 | |
| 118 | #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0)) */ |
| 119 | |
| 120 | #else /* CONFIG_KALLSYMS */ |
| 121 | |
| 122 | #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,8,0)) |
| 123 | |
| 124 | /* |
| 125 | * wrapper_vmalloc_sync_mappings was removed in v5.8, the vmalloc mappings |
| 126 | * are now synchronized when they are created or torn down. |
| 127 | */ |
| 128 | static inline |
| 129 | void wrapper_vmalloc_sync_mappings(void) |
| 130 | {} |
| 131 | |
| 132 | #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0) \ |
| 133 | || LTTNG_KERNEL_RANGE(5,5,12, 5,6,0) \ |
| 134 | || LTTNG_KERNEL_RANGE(5,4,28, 5,5,0) \ |
| 135 | || LTTNG_KERNEL_RANGE(5,2,37, 5,3,0) \ |
| 136 | || LTTNG_KERNEL_RANGE(4,19,113, 4,20,0) \ |
| 137 | || LTTNG_KERNEL_RANGE(4,14,175, 4,15,0) \ |
| 138 | || LTTNG_KERNEL_RANGE(4,9,218, 4,10,0) \ |
| 139 | || LTTNG_KERNEL_RANGE(4,4,218, 4,5,0)) \ |
| 140 | || LTTNG_UBUNTU_KERNEL_RANGE(4,15,18,97, 4,18,0,0) \ |
| 141 | || LTTNG_UBUNTU_KERNEL_RANGE(5,0,21,48, 5,1,0,0) \ |
| 142 | || LTTNG_UBUNTU_KERNEL_RANGE(5,3,18,52, 5,4,0,0) |
| 143 | |
| 144 | static inline |
| 145 | void wrapper_vmalloc_sync_mappings(void) |
| 146 | { |
| 147 | return vmalloc_sync_mappings(); |
| 148 | } |
| 149 | |
| 150 | #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0)) */ |
| 151 | |
| 152 | static inline |
| 153 | void wrapper_vmalloc_sync_mappings(void) |
| 154 | { |
| 155 | return vmalloc_sync_all(); |
| 156 | } |
| 157 | |
| 158 | #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0)) */ |
| 159 | |
| 160 | #endif |
| 161 | |
| 162 | #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,12,0)) |
| 163 | static inline |
| 164 | void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node) |
| 165 | { |
| 166 | void *ret; |
| 167 | |
| 168 | ret = kvmalloc_node(size, flags, node); |
| 169 | if (is_vmalloc_addr(ret)) { |
| 170 | /* |
| 171 | * Make sure we don't trigger recursive page faults in the |
| 172 | * tracing fast path. |
| 173 | */ |
| 174 | wrapper_vmalloc_sync_mappings(); |
| 175 | } |
| 176 | return ret; |
| 177 | } |
| 178 | |
| 179 | static inline |
| 180 | void *lttng_kvzalloc_node(unsigned long size, gfp_t flags, int node) |
| 181 | { |
| 182 | return lttng_kvmalloc_node(size, flags | __GFP_ZERO, node); |
| 183 | } |
| 184 | |
| 185 | static inline |
| 186 | void *lttng_kvmalloc(unsigned long size, gfp_t flags) |
| 187 | { |
| 188 | return lttng_kvmalloc_node(size, flags, NUMA_NO_NODE); |
| 189 | } |
| 190 | |
| 191 | static inline |
| 192 | void *lttng_kvzalloc(unsigned long size, gfp_t flags) |
| 193 | { |
| 194 | return lttng_kvzalloc_node(size, flags, NUMA_NO_NODE); |
| 195 | } |
| 196 | |
| 197 | static inline |
| 198 | void lttng_kvfree(const void *addr) |
| 199 | { |
| 200 | kvfree(addr); |
| 201 | } |
| 202 | |
| 203 | #else |
| 204 | |
| 205 | #include <linux/slab.h> |
| 206 | |
| 207 | static inline |
| 208 | void print_vmalloc_node_range_warning(void) |
| 209 | { |
| 210 | printk_once(KERN_WARNING "LTTng: __vmalloc_node_range symbol lookup failed.\n"); |
| 211 | printk_once(KERN_WARNING "Tracer performance will be degraded on NUMA systems.\n"); |
| 212 | printk_once(KERN_WARNING "Please rebuild your kernel with CONFIG_KALLSYMS enabled.\n"); |
| 213 | } |
| 214 | |
| 215 | #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,0,0)) |
| 216 | |
| 217 | /* |
| 218 | * kallsyms wrapper of __vmalloc_node with a fallback to kmalloc_node. |
| 219 | */ |
| 220 | static inline |
| 221 | void *__lttng_vmalloc_node_range(unsigned long size, unsigned long align, |
| 222 | unsigned long start, unsigned long end, gfp_t gfp_mask, |
| 223 | pgprot_t prot, unsigned long vm_flags, int node, |
| 224 | const void *caller) |
| 225 | { |
| 226 | #ifdef CONFIG_KALLSYMS |
| 227 | /* |
| 228 | * If we have KALLSYMS, get * __vmalloc_node_range which is not exported. |
| 229 | */ |
| 230 | void *(*lttng__vmalloc_node_range)(unsigned long size, unsigned long align, |
| 231 | unsigned long start, unsigned long end, gfp_t gfp_mask, |
| 232 | pgprot_t prot, unsigned long vm_flags, int node, |
| 233 | const void *caller); |
| 234 | |
| 235 | lttng__vmalloc_node_range = (void *) kallsyms_lookup_funcptr("__vmalloc_node_range"); |
| 236 | if (lttng__vmalloc_node_range) |
| 237 | return lttng__vmalloc_node_range(size, align, start, end, gfp_mask, prot, |
| 238 | vm_flags, node, caller); |
| 239 | #endif |
| 240 | if (node != NUMA_NO_NODE) |
| 241 | print_vmalloc_node_range_warning(); |
| 242 | return __vmalloc(size, gfp_mask, prot); |
| 243 | } |
| 244 | |
| 245 | /* |
| 246 | * Canary function to check for '__vmalloc_node_range()' at compile time. |
| 247 | * |
| 248 | * From 'include/linux/vmalloc.h': |
| 249 | * |
| 250 | * extern void *__vmalloc_node_range(unsigned long size, unsigned long align, |
| 251 | * unsigned long start, unsigned long end, gfp_t gfp_mask, |
| 252 | * pgprot_t prot, unsigned long vm_flags, int node, |
| 253 | * const void *caller); |
| 254 | */ |
| 255 | static inline |
| 256 | void *__canary____lttng_vmalloc_node_range(unsigned long size, unsigned long align, |
| 257 | unsigned long start, unsigned long end, gfp_t gfp_mask, |
| 258 | pgprot_t prot, unsigned long vm_flags, int node, |
| 259 | const void *caller) |
| 260 | { |
| 261 | return __vmalloc_node_range(size, align, start, end, gfp_mask, prot, |
| 262 | vm_flags, node, caller); |
| 263 | } |
| 264 | |
| 265 | #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,6,0)) |
| 266 | |
| 267 | /* |
| 268 | * kallsyms wrapper of __vmalloc_node with a fallback to kmalloc_node. |
| 269 | */ |
| 270 | static inline |
| 271 | void *__lttng_vmalloc_node_range(unsigned long size, unsigned long align, |
| 272 | unsigned long start, unsigned long end, gfp_t gfp_mask, |
| 273 | pgprot_t prot, unsigned long vm_flags, int node, |
| 274 | const void *caller) |
| 275 | { |
| 276 | #ifdef CONFIG_KALLSYMS |
| 277 | /* |
| 278 | * If we have KALLSYMS, get * __vmalloc_node_range which is not exported. |
| 279 | */ |
| 280 | void *(*lttng__vmalloc_node_range)(unsigned long size, unsigned long align, |
| 281 | unsigned long start, unsigned long end, gfp_t gfp_mask, |
| 282 | pgprot_t prot, int node, const void *caller); |
| 283 | |
| 284 | lttng__vmalloc_node_range = (void *) kallsyms_lookup_funcptr("__vmalloc_node_range"); |
| 285 | if (lttng__vmalloc_node_range) |
| 286 | return lttng__vmalloc_node_range(size, align, start, end, gfp_mask, prot, |
| 287 | node, caller); |
| 288 | #endif |
| 289 | if (node != NUMA_NO_NODE) |
| 290 | print_vmalloc_node_range_warning(); |
| 291 | return __vmalloc(size, gfp_mask, prot); |
| 292 | } |
| 293 | |
| 294 | /* |
| 295 | * Canary function to check for '__vmalloc_node_range()' at compile time. |
| 296 | * |
| 297 | * From 'include/linux/vmalloc.h': |
| 298 | * |
| 299 | * extern void *__vmalloc_node_range(unsigned long size, unsigned long align, |
| 300 | * unsigned long start, unsigned long end, gfp_t gfp_mask, |
| 301 | * pgprot_t prot, unsigned long vm_flags, int node, |
| 302 | * const void *caller); |
| 303 | */ |
| 304 | static inline |
| 305 | void *__canary____lttng_vmalloc_node_range(unsigned long size, unsigned long align, |
| 306 | unsigned long start, unsigned long end, gfp_t gfp_mask, |
| 307 | pgprot_t prot, int node, const void *caller) |
| 308 | { |
| 309 | return __vmalloc_node_range(size, align, start, end, gfp_mask, prot, |
| 310 | node, caller); |
| 311 | } |
| 312 | |
| 313 | #else /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,6,0)) */ |
| 314 | |
| 315 | /* |
| 316 | * kallsyms wrapper of __vmalloc_node with a fallback to kmalloc_node. |
| 317 | */ |
| 318 | static inline |
| 319 | void *__lttng_vmalloc_node_range(unsigned long size, unsigned long align, |
| 320 | unsigned long start, unsigned long end, gfp_t gfp_mask, |
| 321 | pgprot_t prot, unsigned long vm_flags, int node, |
| 322 | void *caller) |
| 323 | { |
| 324 | #ifdef CONFIG_KALLSYMS |
| 325 | /* |
| 326 | * If we have KALLSYMS, get * __vmalloc_node_range which is not exported. |
| 327 | */ |
| 328 | void *(*lttng__vmalloc_node_range)(unsigned long size, unsigned long align, |
| 329 | unsigned long start, unsigned long end, gfp_t gfp_mask, |
| 330 | pgprot_t prot, int node, void *caller); |
| 331 | |
| 332 | lttng__vmalloc_node_range = (void *) kallsyms_lookup_funcptr("__vmalloc_node_range"); |
| 333 | if (lttng__vmalloc_node_range) |
| 334 | return lttng__vmalloc_node_range(size, align, start, end, gfp_mask, prot, |
| 335 | node, caller); |
| 336 | #endif |
| 337 | if (node != NUMA_NO_NODE) |
| 338 | print_vmalloc_node_range_warning(); |
| 339 | return __vmalloc(size, gfp_mask, prot); |
| 340 | } |
| 341 | |
| 342 | /* |
| 343 | * Canary function to check for '__vmalloc_node_range()' at compile time. |
| 344 | * |
| 345 | * From 'include/linux/vmalloc.h': |
| 346 | * |
| 347 | * extern void *__vmalloc_node_range(unsigned long size, unsigned long align, |
| 348 | * unsigned long start, unsigned long end, gfp_t gfp_mask, |
| 349 | * pgprot_t prot, unsigned long vm_flags, int node, |
| 350 | * void *caller); |
| 351 | */ |
| 352 | static inline |
| 353 | void *__canary____lttng_vmalloc_node_range(unsigned long size, unsigned long align, |
| 354 | unsigned long start, unsigned long end, gfp_t gfp_mask, |
| 355 | pgprot_t prot, int node, void *caller) |
| 356 | { |
| 357 | return __vmalloc_node_range(size, align, start, end, gfp_mask, prot, |
| 358 | node, caller); |
| 359 | } |
| 360 | |
| 361 | #endif |
| 362 | |
| 363 | /** |
| 364 | * lttng_kvmalloc_node - attempt to allocate physically contiguous memory, but upon |
| 365 | * failure, fall back to non-contiguous (vmalloc) allocation. |
| 366 | * @size: size of the request. |
| 367 | * @flags: gfp mask for the allocation - must be compatible with GFP_KERNEL. |
| 368 | * |
| 369 | * Uses kmalloc to get the memory but if the allocation fails then falls back |
| 370 | * to the vmalloc allocator. Use lttng_kvfree to free the memory. |
| 371 | * |
| 372 | * Reclaim modifiers - __GFP_NORETRY, __GFP_REPEAT and __GFP_NOFAIL are not supported |
| 373 | */ |
| 374 | static inline |
| 375 | void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node) |
| 376 | { |
| 377 | void *ret; |
| 378 | |
| 379 | /* |
| 380 | * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables) |
| 381 | * so the given set of flags has to be compatible. |
| 382 | */ |
| 383 | WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL); |
| 384 | |
| 385 | /* |
| 386 | * If the allocation fits in a single page, do not fallback. |
| 387 | */ |
| 388 | if (size <= PAGE_SIZE) { |
| 389 | return kmalloc_node(size, flags, node); |
| 390 | } |
| 391 | |
| 392 | /* |
| 393 | * Make sure that larger requests are not too disruptive - no OOM |
| 394 | * killer and no allocation failure warnings as we have a fallback |
| 395 | */ |
| 396 | ret = kmalloc_node(size, flags | __GFP_NOWARN | __GFP_NORETRY, node); |
| 397 | if (!ret) { |
| 398 | ret = __lttng_vmalloc_node_range(size, 1, |
| 399 | VMALLOC_START, VMALLOC_END, |
| 400 | flags | __GFP_HIGHMEM, PAGE_KERNEL, 0, |
| 401 | node, __builtin_return_address(0)); |
| 402 | /* |
| 403 | * Make sure we don't trigger recursive page faults in the |
| 404 | * tracing fast path. |
| 405 | */ |
| 406 | wrapper_vmalloc_sync_mappings(); |
| 407 | } |
| 408 | return ret; |
| 409 | } |
| 410 | |
| 411 | static inline |
| 412 | void *lttng_kvzalloc_node(unsigned long size, gfp_t flags, int node) |
| 413 | { |
| 414 | return lttng_kvmalloc_node(size, flags | __GFP_ZERO, node); |
| 415 | } |
| 416 | |
| 417 | static inline |
| 418 | void *lttng_kvmalloc(unsigned long size, gfp_t flags) |
| 419 | { |
| 420 | return lttng_kvmalloc_node(size, flags, NUMA_NO_NODE); |
| 421 | } |
| 422 | |
| 423 | static inline |
| 424 | void *lttng_kvzalloc(unsigned long size, gfp_t flags) |
| 425 | { |
| 426 | return lttng_kvzalloc_node(size, flags, NUMA_NO_NODE); |
| 427 | } |
| 428 | |
| 429 | static inline |
| 430 | void lttng_kvfree(const void *addr) |
| 431 | { |
| 432 | if (is_vmalloc_addr(addr)) { |
| 433 | vfree(addr); |
| 434 | } else { |
| 435 | kfree(addr); |
| 436 | } |
| 437 | } |
| 438 | #endif |
| 439 | |
| 440 | #endif /* _LTTNG_WRAPPER_VMALLOC_H */ |