| 1 | #ifndef _LTT_WRAPPER_VMALLOC_H |
| 2 | #define _LTT_WRAPPER_VMALLOC_H |
| 3 | |
| 4 | /* |
| 5 | * Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com) |
| 6 | * |
| 7 | * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when |
| 8 | * available, else we need to have a kernel that exports this function to GPL |
| 9 | * modules. |
| 10 | * |
| 11 | * Dual LGPL v2.1/GPL v2 license. |
| 12 | */ |
| 13 | |
| 14 | #ifdef CONFIG_KALLSYMS |
| 15 | |
| 16 | #include <linux/kallsyms.h> |
| 17 | |
| 18 | static inline |
| 19 | void wrapper_vmalloc_sync_all(void) |
| 20 | { |
| 21 | void (*vmalloc_sync_all_sym)(void); |
| 22 | |
| 23 | vmalloc_sync_all_sym = (void *) kallsyms_lookup_name("vmalloc_sync_all"); |
| 24 | if (vmalloc_sync_all_sym) { |
| 25 | vmalloc_sync_all_sym(); |
| 26 | } else { |
| 27 | #ifdef CONFIG_X86 |
| 28 | /* |
| 29 | * Only x86 needs vmalloc_sync_all to make sure LTTng does not |
| 30 | * trigger recursive page faults. |
| 31 | */ |
| 32 | printk(KERN_WARNING "LTTng: vmalloc_sync_all symbol lookup failed.\n"); |
| 33 | printk(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n"); |
| 34 | #endif |
| 35 | } |
| 36 | } |
| 37 | #else |
| 38 | |
| 39 | #include <linux/vmalloc.h> |
| 40 | |
| 41 | static inline |
| 42 | void wrapper_vmalloc_sync_all(void) |
| 43 | { |
| 44 | return vmalloc_sync_all(); |
| 45 | } |
| 46 | #endif |
| 47 | |
| 48 | #endif /* _LTT_WRAPPER_VMALLOC_H */ |