fix: KVM: MMU: change tracepoints arguments to kvm_page_fault (v5.16)
[lttng-modules.git] / include / instrumentation / events / arch / x86 / kvm / mmutrace.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 #if !defined(LTTNG_TRACE_KVM_MMU_H) || defined(TRACE_HEADER_MULTI_READ)
3 #define LTTNG_TRACE_KVM_MMU_H
4
5 #include <lttng/tracepoint-event.h>
6 #include <lttng/kernel-version.h>
7
8 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,2,0))
9 #include <linux/trace_events.h>
10 #else /* if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,2,0)) */
11 #include <linux/ftrace_event.h>
12 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,2,0)) */
13
14 #undef TRACE_SYSTEM
15 #define TRACE_SYSTEM kvm_mmu
16
17 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,1,0) || \
18 LTTNG_RHEL_KERNEL_RANGE(4,18,0,147,0,0, 4,19,0,0,0,0))
19
20 #define LTTNG_KVM_MMU_PAGE_FIELDS \
21 ctf_integer(__u64, gfn, (sp)->gfn) \
22 ctf_integer(__u32, role, (sp)->role.word) \
23 ctf_integer(__u32, root_count, (sp)->root_count) \
24 ctf_integer(bool, unsync, (sp)->unsync)
25
26 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,11,0))
27
28 #define LTTNG_KVM_MMU_PAGE_FIELDS \
29 ctf_integer(unsigned long, mmu_valid_gen, (sp)->mmu_valid_gen) \
30 ctf_integer(__u64, gfn, (sp)->gfn) \
31 ctf_integer(__u32, role, (sp)->role.word) \
32 ctf_integer(__u32, root_count, (sp)->root_count) \
33 ctf_integer(bool, unsync, (sp)->unsync)
34
35 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,11,0)) */
36
37 #define LTTNG_KVM_MMU_PAGE_FIELDS \
38 ctf_integer(__u64, gfn, (sp)->gfn) \
39 ctf_integer(__u32, role, (sp)->role.word) \
40 ctf_integer(__u32, root_count, (sp)->root_count) \
41 ctf_integer(bool, unsync, (sp)->unsync)
42
43 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,11,0)) */
44
45 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,6,0))
46 /*
47 * A pagetable walk has started
48 */
49 LTTNG_TRACEPOINT_EVENT(
50 kvm_mmu_pagetable_walk,
51 TP_PROTO(u64 addr, u32 pferr),
52 TP_ARGS(addr, pferr),
53
54 TP_FIELDS(
55 ctf_integer_hex(__u64, addr, addr)
56 ctf_integer(__u32, pferr, pferr)
57 )
58 )
59 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,6,0)) */
60 /*
61 * A pagetable walk has started
62 */
63 LTTNG_TRACEPOINT_EVENT(
64 kvm_mmu_pagetable_walk,
65 TP_PROTO(u64 addr, int write_fault, int user_fault, int fetch_fault),
66 TP_ARGS(addr, write_fault, user_fault, fetch_fault),
67
68 TP_FIELDS(
69 ctf_integer_hex(__u64, addr, addr)
70 ctf_integer(__u32, pferr,
71 (!!write_fault << 1) | (!!user_fault << 2)
72 | (!!fetch_fault << 4))
73 )
74 )
75 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,6,0)) */
76
77 /* We just walked a paging element */
78 LTTNG_TRACEPOINT_EVENT(
79 kvm_mmu_paging_element,
80 TP_PROTO(u64 pte, int level),
81 TP_ARGS(pte, level),
82
83 TP_FIELDS(
84 ctf_integer(__u64, pte, pte)
85 ctf_integer(__u32, level, level)
86 )
87 )
88
89 LTTNG_TRACEPOINT_EVENT_CLASS(kvm_mmu_set_bit_class,
90
91 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
92
93 TP_ARGS(table_gfn, index, size),
94
95 TP_FIELDS(
96 ctf_integer(__u64, gpa,
97 ((u64)table_gfn << PAGE_SHIFT) + index * size)
98 )
99 )
100
101 /* We set a pte accessed bit */
102 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_set_bit_class, kvm_mmu_set_accessed_bit,
103
104 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
105
106 TP_ARGS(table_gfn, index, size)
107 )
108
109 /* We set a pte dirty bit */
110 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_set_bit_class, kvm_mmu_set_dirty_bit,
111
112 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
113
114 TP_ARGS(table_gfn, index, size)
115 )
116
117 LTTNG_TRACEPOINT_EVENT(
118 kvm_mmu_walker_error,
119 TP_PROTO(u32 pferr),
120 TP_ARGS(pferr),
121
122 TP_FIELDS(
123 ctf_integer(__u32, pferr, pferr)
124 )
125 )
126
127 LTTNG_TRACEPOINT_EVENT(
128 kvm_mmu_get_page,
129 TP_PROTO(struct kvm_mmu_page *sp, bool created),
130 TP_ARGS(sp, created),
131
132 TP_FIELDS(
133 LTTNG_KVM_MMU_PAGE_FIELDS
134 ctf_integer(bool, created, created)
135 )
136 )
137
138 LTTNG_TRACEPOINT_EVENT_CLASS(kvm_mmu_page_class,
139
140 TP_PROTO(struct kvm_mmu_page *sp),
141 TP_ARGS(sp),
142
143 TP_FIELDS(
144 LTTNG_KVM_MMU_PAGE_FIELDS
145 )
146 )
147
148 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class, kvm_mmu_sync_page,
149 TP_PROTO(struct kvm_mmu_page *sp),
150
151 TP_ARGS(sp)
152 )
153
154 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class, kvm_mmu_unsync_page,
155 TP_PROTO(struct kvm_mmu_page *sp),
156
157 TP_ARGS(sp)
158 )
159
160 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class, kvm_mmu_prepare_zap_page,
161 TP_PROTO(struct kvm_mmu_page *sp),
162
163 TP_ARGS(sp)
164 )
165
166 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,10,0))
167
168 LTTNG_TRACEPOINT_EVENT_MAP(
169 mark_mmio_spte,
170
171 kvm_mmu_mark_mmio_spte,
172
173 TP_PROTO(u64 *sptep, gfn_t gfn, u64 spte),
174 TP_ARGS(sptep, gfn, spte),
175
176 TP_FIELDS(
177 ctf_integer_hex(void *, sptep, sptep)
178 ctf_integer(gfn_t, gfn, gfn)
179 ctf_integer(unsigned, access, spte & ACC_ALL)
180 ctf_integer(unsigned int, gen, get_mmio_spte_generation(spte))
181 )
182 )
183
184 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,11,0))
185
186 LTTNG_TRACEPOINT_EVENT_MAP(
187 mark_mmio_spte,
188
189 kvm_mmu_mark_mmio_spte,
190
191 TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access, unsigned int gen),
192 TP_ARGS(sptep, gfn, access, gen),
193
194 TP_FIELDS(
195 ctf_integer_hex(void *, sptep, sptep)
196 ctf_integer(gfn_t, gfn, gfn)
197 ctf_integer(unsigned, access, access)
198 ctf_integer(unsigned int, gen, gen)
199 )
200 )
201
202 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,11,0)) */
203
204 LTTNG_TRACEPOINT_EVENT_MAP(
205 mark_mmio_spte,
206
207 kvm_mmu_mark_mmio_spte,
208
209 TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access),
210 TP_ARGS(sptep, gfn, access),
211
212 TP_FIELDS(
213 ctf_integer_hex(void *, sptep, sptep)
214 ctf_integer(gfn_t, gfn, gfn)
215 ctf_integer(unsigned, access, access)
216 )
217 )
218
219 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,11,0)) */
220
221 LTTNG_TRACEPOINT_EVENT_MAP(
222 handle_mmio_page_fault,
223
224 kvm_mmu_handle_mmio_page_fault,
225
226 TP_PROTO(u64 addr, gfn_t gfn, unsigned access),
227 TP_ARGS(addr, gfn, access),
228
229 TP_FIELDS(
230 ctf_integer_hex(u64, addr, addr)
231 ctf_integer(gfn_t, gfn, gfn)
232 ctf_integer(unsigned, access, access)
233 )
234 )
235
236 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,16,0))
237 LTTNG_TRACEPOINT_EVENT_MAP(
238 fast_page_fault,
239
240 kvm_mmu_fast_page_fault,
241
242 TP_PROTO(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
243 u64 *sptep, u64 old_spte, int ret),
244 TP_ARGS(vcpu, fault, sptep, old_spte, ret),
245
246 TP_FIELDS(
247 ctf_integer(int, vcpu_id, vcpu->vcpu_id)
248 ctf_integer(gpa_t, cr2_or_gpa, fault->addr)
249 ctf_integer(u32, error_code, fault->error_code)
250 ctf_integer_hex(u64 *, sptep, sptep)
251 ctf_integer(u64, old_spte, old_spte)
252 ctf_integer(u64, new_spte, *sptep)
253 ctf_integer(int, ret, ret)
254 )
255 )
256 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,10,0))
257 LTTNG_TRACEPOINT_EVENT_MAP(
258 fast_page_fault,
259
260 kvm_mmu_fast_page_fault,
261
262 TP_PROTO(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 error_code,
263 u64 *sptep, u64 old_spte, int ret),
264 TP_ARGS(vcpu, cr2_or_gpa, error_code, sptep, old_spte, ret),
265
266 TP_FIELDS(
267 ctf_integer(int, vcpu_id, vcpu->vcpu_id)
268 ctf_integer(gpa_t, cr2_or_gpa, cr2_or_gpa)
269 ctf_integer(u32, error_code, error_code)
270 ctf_integer_hex(u64 *, sptep, sptep)
271 ctf_integer(u64, old_spte, old_spte)
272 ctf_integer(u64, new_spte, *sptep)
273 ctf_integer(int, ret, ret)
274 )
275 )
276 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0) || \
277 LTTNG_KERNEL_RANGE(4,19,103, 4,20,0) || \
278 LTTNG_KERNEL_RANGE(5,4,19, 5,5,0) || \
279 LTTNG_KERNEL_RANGE(5,5,3, 5,6,0) || \
280 LTTNG_UBUNTU_KERNEL_RANGE(4,15,18,92, 4,16,0,0) || \
281 LTTNG_UBUNTU_KERNEL_RANGE(5,0,21,44, 5,1,0,0) || \
282 LTTNG_UBUNTU_KERNEL_RANGE(5,3,18,43, 5,3,18,45) || \
283 LTTNG_UBUNTU_KERNEL_RANGE(5,3,18,46, 5,4,0,0))
284 LTTNG_TRACEPOINT_EVENT_MAP(
285 fast_page_fault,
286
287 kvm_mmu_fast_page_fault,
288
289 TP_PROTO(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 error_code,
290 u64 *sptep, u64 old_spte, bool retry),
291 TP_ARGS(vcpu, cr2_or_gpa, error_code, sptep, old_spte, retry),
292
293 TP_FIELDS(
294 ctf_integer(int, vcpu_id, vcpu->vcpu_id)
295 ctf_integer(gpa_t, cr2_or_gpa, cr2_or_gpa)
296 ctf_integer(u32, error_code, error_code)
297 ctf_integer_hex(u64 *, sptep, sptep)
298 ctf_integer(u64, old_spte, old_spte)
299 ctf_integer(u64, new_spte, *sptep)
300 ctf_integer(bool, retry, retry)
301 )
302 )
303 #else
304 LTTNG_TRACEPOINT_EVENT_MAP(
305 fast_page_fault,
306
307 kvm_mmu_fast_page_fault,
308
309 TP_PROTO(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
310 u64 *sptep, u64 old_spte, bool retry),
311 TP_ARGS(vcpu, gva, error_code, sptep, old_spte, retry),
312
313 TP_FIELDS(
314 ctf_integer(int, vcpu_id, vcpu->vcpu_id)
315 ctf_integer(gva_t, gva, gva)
316 ctf_integer(u32, error_code, error_code)
317 ctf_integer_hex(u64 *, sptep, sptep)
318 ctf_integer(u64, old_spte, old_spte)
319 ctf_integer(u64, new_spte, *sptep)
320 ctf_integer(bool, retry, retry)
321 )
322 )
323 #endif
324
325 #endif /* LTTNG_TRACE_KVM_MMU_H */
326
327 #undef TRACE_INCLUDE_PATH
328 #define TRACE_INCLUDE_PATH instrumentation/events/arch/x86/kvm
329 #undef TRACE_INCLUDE_FILE
330 #define TRACE_INCLUDE_FILE mmutrace
331
332 /* This part must be outside protection */
333 #include <lttng/define_trace.h>
This page took 0.044471 seconds and 5 git commands to generate.