1 /* SPDX-License-Identifier: GPL-2.0 */
2 #if !defined(LTTNG_TRACE_KVM_MMU_H) || defined(TRACE_HEADER_MULTI_READ)
3 #define LTTNG_TRACE_KVM_MMU_H
5 #include <probes/lttng-tracepoint-event.h>
6 #include <linux/version.h>
8 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0))
9 #include <linux/trace_events.h>
10 #else /* if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0)) */
11 #include <linux/ftrace_event.h>
12 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0)) */
15 #define TRACE_SYSTEM kvm_mmu
17 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0))
19 #define LTTNG_KVM_MMU_PAGE_FIELDS \
20 ctf_integer(__u64, gfn, (sp)->gfn) \
21 ctf_integer(__u32, role, (sp)->role.word) \
22 ctf_integer(__u32, root_count, (sp)->root_count) \
23 ctf_integer(bool, unsync, (sp)->unsync)
25 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
27 #define LTTNG_KVM_MMU_PAGE_FIELDS \
28 ctf_integer(unsigned long, mmu_valid_gen, (sp)->mmu_valid_gen) \
29 ctf_integer(__u64, gfn, (sp)->gfn) \
30 ctf_integer(__u32, role, (sp)->role.word) \
31 ctf_integer(__u32, root_count, (sp)->root_count) \
32 ctf_integer(bool, unsync, (sp)->unsync)
34 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
36 #define LTTNG_KVM_MMU_PAGE_FIELDS \
37 ctf_integer(__u64, gfn, (sp)->gfn) \
38 ctf_integer(__u32, role, (sp)->role.word) \
39 ctf_integer(__u32, root_count, (sp)->root_count) \
40 ctf_integer(bool, unsync, (sp)->unsync)
42 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
44 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
46 * A pagetable walk has started
48 LTTNG_TRACEPOINT_EVENT(
49 kvm_mmu_pagetable_walk
,
50 TP_PROTO(u64 addr
, u32 pferr
),
54 ctf_integer(__u64
, addr
, addr
)
55 ctf_integer(__u32
, pferr
, pferr
)
58 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)) */
60 * A pagetable walk has started
62 LTTNG_TRACEPOINT_EVENT(
63 kvm_mmu_pagetable_walk
,
64 TP_PROTO(u64 addr
, int write_fault
, int user_fault
, int fetch_fault
),
65 TP_ARGS(addr
, write_fault
, user_fault
, fetch_fault
),
68 ctf_integer(__u64
, addr
, addr
)
69 ctf_integer(__u32
, pferr
,
70 (!!write_fault
<< 1) | (!!user_fault
<< 2)
71 | (!!fetch_fault
<< 4))
74 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)) */
76 /* We just walked a paging element */
77 LTTNG_TRACEPOINT_EVENT(
78 kvm_mmu_paging_element
,
79 TP_PROTO(u64 pte
, int level
),
83 ctf_integer(__u64
, pte
, pte
)
84 ctf_integer(__u32
, level
, level
)
88 LTTNG_TRACEPOINT_EVENT_CLASS(kvm_mmu_set_bit_class
,
90 TP_PROTO(unsigned long table_gfn
, unsigned index
, unsigned size
),
92 TP_ARGS(table_gfn
, index
, size
),
95 ctf_integer(__u64
, gpa
,
96 ((u64
)table_gfn
<< PAGE_SHIFT
) + index
* size
)
100 /* We set a pte accessed bit */
101 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_set_bit_class
, kvm_mmu_set_accessed_bit
,
103 TP_PROTO(unsigned long table_gfn
, unsigned index
, unsigned size
),
105 TP_ARGS(table_gfn
, index
, size
)
108 /* We set a pte dirty bit */
109 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_set_bit_class
, kvm_mmu_set_dirty_bit
,
111 TP_PROTO(unsigned long table_gfn
, unsigned index
, unsigned size
),
113 TP_ARGS(table_gfn
, index
, size
)
116 LTTNG_TRACEPOINT_EVENT(
117 kvm_mmu_walker_error
,
122 ctf_integer(__u32
, pferr
, pferr
)
126 LTTNG_TRACEPOINT_EVENT(
128 TP_PROTO(struct kvm_mmu_page
*sp
, bool created
),
129 TP_ARGS(sp
, created
),
132 LTTNG_KVM_MMU_PAGE_FIELDS
133 ctf_integer(bool, created
, created
)
137 LTTNG_TRACEPOINT_EVENT_CLASS(kvm_mmu_page_class
,
139 TP_PROTO(struct kvm_mmu_page
*sp
),
143 LTTNG_KVM_MMU_PAGE_FIELDS
147 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class
, kvm_mmu_sync_page
,
148 TP_PROTO(struct kvm_mmu_page
*sp
),
153 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class
, kvm_mmu_unsync_page
,
154 TP_PROTO(struct kvm_mmu_page
*sp
),
159 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class
, kvm_mmu_prepare_zap_page
,
160 TP_PROTO(struct kvm_mmu_page
*sp
),
165 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
167 LTTNG_TRACEPOINT_EVENT_MAP(
170 kvm_mmu_mark_mmio_spte
,
172 TP_PROTO(u64
*sptep
, gfn_t gfn
, unsigned access
, unsigned int gen
),
173 TP_ARGS(sptep
, gfn
, access
, gen
),
176 ctf_integer_hex(void *, sptep
, sptep
)
177 ctf_integer(gfn_t
, gfn
, gfn
)
178 ctf_integer(unsigned, access
, access
)
179 ctf_integer(unsigned int, gen
, gen
)
183 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
185 LTTNG_TRACEPOINT_EVENT_MAP(
188 kvm_mmu_mark_mmio_spte
,
190 TP_PROTO(u64
*sptep
, gfn_t gfn
, unsigned access
),
191 TP_ARGS(sptep
, gfn
, access
),
194 ctf_integer_hex(void *, sptep
, sptep
)
195 ctf_integer(gfn_t
, gfn
, gfn
)
196 ctf_integer(unsigned, access
, access
)
200 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
202 LTTNG_TRACEPOINT_EVENT_MAP(
203 handle_mmio_page_fault
,
205 kvm_mmu_handle_mmio_page_fault
,
207 TP_PROTO(u64 addr
, gfn_t gfn
, unsigned access
),
208 TP_ARGS(addr
, gfn
, access
),
211 ctf_integer(u64
, addr
, addr
)
212 ctf_integer(gfn_t
, gfn
, gfn
)
213 ctf_integer(unsigned, access
, access
)
217 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) || \
218 LTTNG_KERNEL_RANGE(4,19,103, 4,20,0) || \
219 LTTNG_KERNEL_RANGE(5,4,19, 5,5,0) || \
220 LTTNG_KERNEL_RANGE(5,5,3, 5,6,0) || \
221 LTTNG_UBUNTU_KERNEL_RANGE(4,15,18,92, 4,16,0,0) || \
222 LTTNG_UBUNTU_KERNEL_RANGE(5,3,18,43, 5,3,18,45) || \
223 LTTNG_UBUNTU_KERNEL_RANGE(5,3,18,46, 5,4,0,0))
224 LTTNG_TRACEPOINT_EVENT_MAP(
227 kvm_mmu_fast_page_fault
,
229 TP_PROTO(struct kvm_vcpu
*vcpu
, gpa_t cr2_or_gpa
, u32 error_code
,
230 u64
*sptep
, u64 old_spte
, bool retry
),
231 TP_ARGS(vcpu
, cr2_or_gpa
, error_code
, sptep
, old_spte
, retry
),
234 ctf_integer(int, vcpu_id
, vcpu
->vcpu_id
)
235 ctf_integer(gpa_t
, cr2_or_gpa
, cr2_or_gpa
)
236 ctf_integer(u32
, error_code
, error_code
)
237 ctf_integer_hex(u64
*, sptep
, sptep
)
238 ctf_integer(u64
, old_spte
, old_spte
)
239 ctf_integer(u64
, new_spte
, *sptep
)
240 ctf_integer(bool, retry
, retry
)
244 LTTNG_TRACEPOINT_EVENT_MAP(
247 kvm_mmu_fast_page_fault
,
249 TP_PROTO(struct kvm_vcpu
*vcpu
, gva_t gva
, u32 error_code
,
250 u64
*sptep
, u64 old_spte
, bool retry
),
251 TP_ARGS(vcpu
, gva
, error_code
, sptep
, old_spte
, retry
),
254 ctf_integer(int, vcpu_id
, vcpu
->vcpu_id
)
255 ctf_integer(gva_t
, gva
, gva
)
256 ctf_integer(u32
, error_code
, error_code
)
257 ctf_integer_hex(u64
*, sptep
, sptep
)
258 ctf_integer(u64
, old_spte
, old_spte
)
259 ctf_integer(u64
, new_spte
, *sptep
)
260 ctf_integer(bool, retry
, retry
)
265 #endif /* LTTNG_TRACE_KVM_MMU_H */
267 #undef TRACE_INCLUDE_PATH
268 #define TRACE_INCLUDE_PATH instrumentation/events/lttng-module/arch/x86/kvm
269 #undef TRACE_INCLUDE_FILE
270 #define TRACE_INCLUDE_FILE mmutrace
272 /* This part must be outside protection */
273 #include <probes/define_trace.h>