5116bf0e6c32b4f33bad843f837a93056a3bd2ce
[lttng-modules.git] / include / instrumentation / events / arch / x86 / kvm / mmutrace.h
1 // SPDX-FileCopyrightText: 2013 Mohamad Gebai <mohamad.gebai@polymtl.ca>
2 //
3 // SPDX-License-Identifier: GPL-2.0-only
4
5 #if !defined(LTTNG_TRACE_KVM_MMU_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define LTTNG_TRACE_KVM_MMU_H
7
8 #include <lttng/tracepoint-event.h>
9 #include <lttng/kernel-version.h>
10
11 #include <linux/trace_events.h>
12
13 #undef TRACE_SYSTEM
14 #define TRACE_SYSTEM kvm_mmu
15
16 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,1,0) || \
17 LTTNG_RHEL_KERNEL_RANGE(4,18,0,147,0,0, 4,19,0,0,0,0))
18
19 #define LTTNG_KVM_MMU_PAGE_FIELDS \
20 ctf_integer(__u64, gfn, (sp)->gfn) \
21 ctf_integer(__u32, role, (sp)->role.word) \
22 ctf_integer(__u32, root_count, (sp)->root_count) \
23 ctf_integer(bool, unsync, (sp)->unsync)
24
25 #else
26
27 #define LTTNG_KVM_MMU_PAGE_FIELDS \
28 ctf_integer(unsigned long, mmu_valid_gen, (sp)->mmu_valid_gen) \
29 ctf_integer(__u64, gfn, (sp)->gfn) \
30 ctf_integer(__u32, role, (sp)->role.word) \
31 ctf_integer(__u32, root_count, (sp)->root_count) \
32 ctf_integer(bool, unsync, (sp)->unsync)
33
34 #endif
35
36 /*
37 * A pagetable walk has started
38 */
39 LTTNG_TRACEPOINT_EVENT(
40 kvm_mmu_pagetable_walk,
41 TP_PROTO(u64 addr, u32 pferr),
42 TP_ARGS(addr, pferr),
43
44 TP_FIELDS(
45 ctf_integer_hex(__u64, addr, addr)
46 ctf_integer(__u32, pferr, pferr)
47 )
48 )
49
50 /* We just walked a paging element */
51 LTTNG_TRACEPOINT_EVENT(
52 kvm_mmu_paging_element,
53 TP_PROTO(u64 pte, int level),
54 TP_ARGS(pte, level),
55
56 TP_FIELDS(
57 ctf_integer(__u64, pte, pte)
58 ctf_integer(__u32, level, level)
59 )
60 )
61
62 LTTNG_TRACEPOINT_EVENT_CLASS(kvm_mmu_set_bit_class,
63
64 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
65
66 TP_ARGS(table_gfn, index, size),
67
68 TP_FIELDS(
69 ctf_integer(__u64, gpa,
70 ((u64)table_gfn << PAGE_SHIFT) + index * size)
71 )
72 )
73
74 /* We set a pte accessed bit */
75 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_set_bit_class, kvm_mmu_set_accessed_bit,
76
77 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
78
79 TP_ARGS(table_gfn, index, size)
80 )
81
82 /* We set a pte dirty bit */
83 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_set_bit_class, kvm_mmu_set_dirty_bit,
84
85 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
86
87 TP_ARGS(table_gfn, index, size)
88 )
89
90 LTTNG_TRACEPOINT_EVENT(
91 kvm_mmu_walker_error,
92 TP_PROTO(u32 pferr),
93 TP_ARGS(pferr),
94
95 TP_FIELDS(
96 ctf_integer(__u32, pferr, pferr)
97 )
98 )
99
100 LTTNG_TRACEPOINT_EVENT(
101 kvm_mmu_get_page,
102 TP_PROTO(struct kvm_mmu_page *sp, bool created),
103 TP_ARGS(sp, created),
104
105 TP_FIELDS(
106 LTTNG_KVM_MMU_PAGE_FIELDS
107 ctf_integer(bool, created, created)
108 )
109 )
110
111 LTTNG_TRACEPOINT_EVENT_CLASS(kvm_mmu_page_class,
112
113 TP_PROTO(struct kvm_mmu_page *sp),
114 TP_ARGS(sp),
115
116 TP_FIELDS(
117 LTTNG_KVM_MMU_PAGE_FIELDS
118 )
119 )
120
121 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class, kvm_mmu_sync_page,
122 TP_PROTO(struct kvm_mmu_page *sp),
123
124 TP_ARGS(sp)
125 )
126
127 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class, kvm_mmu_unsync_page,
128 TP_PROTO(struct kvm_mmu_page *sp),
129
130 TP_ARGS(sp)
131 )
132
133 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class, kvm_mmu_prepare_zap_page,
134 TP_PROTO(struct kvm_mmu_page *sp),
135
136 TP_ARGS(sp)
137 )
138
139 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,10,0) || \
140 LTTNG_RHEL_KERNEL_RANGE(4,18,0,305,0,0, 4,19,0,0,0,0))
141
142 LTTNG_TRACEPOINT_EVENT_MAP(
143 mark_mmio_spte,
144
145 kvm_mmu_mark_mmio_spte,
146
147 TP_PROTO(u64 *sptep, gfn_t gfn, u64 spte),
148 TP_ARGS(sptep, gfn, spte),
149
150 TP_FIELDS(
151 ctf_integer_hex(void *, sptep, sptep)
152 ctf_integer(gfn_t, gfn, gfn)
153 ctf_integer(unsigned, access, spte & ACC_ALL)
154 ctf_integer(unsigned int, gen, get_mmio_spte_generation(spte))
155 )
156 )
157
158 #else
159
160 LTTNG_TRACEPOINT_EVENT_MAP(
161 mark_mmio_spte,
162
163 kvm_mmu_mark_mmio_spte,
164
165 TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access, unsigned int gen),
166 TP_ARGS(sptep, gfn, access, gen),
167
168 TP_FIELDS(
169 ctf_integer_hex(void *, sptep, sptep)
170 ctf_integer(gfn_t, gfn, gfn)
171 ctf_integer(unsigned, access, access)
172 ctf_integer(unsigned int, gen, gen)
173 )
174 )
175 #endif
176
177 LTTNG_TRACEPOINT_EVENT_MAP(
178 handle_mmio_page_fault,
179
180 kvm_mmu_handle_mmio_page_fault,
181
182 TP_PROTO(u64 addr, gfn_t gfn, unsigned access),
183 TP_ARGS(addr, gfn, access),
184
185 TP_FIELDS(
186 ctf_integer_hex(u64, addr, addr)
187 ctf_integer(gfn_t, gfn, gfn)
188 ctf_integer(unsigned, access, access)
189 )
190 )
191
192 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,16,0))
193 LTTNG_TRACEPOINT_EVENT_MAP(
194 fast_page_fault,
195
196 kvm_mmu_fast_page_fault,
197
198 TP_PROTO(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
199 u64 *sptep, u64 old_spte, int ret),
200 TP_ARGS(vcpu, fault, sptep, old_spte, ret),
201
202 TP_FIELDS(
203 ctf_integer(int, vcpu_id, vcpu->vcpu_id)
204 ctf_integer(gpa_t, cr2_or_gpa, fault->addr)
205 ctf_integer(u32, error_code, fault->error_code)
206 ctf_integer_hex(u64 *, sptep, sptep)
207 ctf_integer(u64, old_spte, old_spte)
208 ctf_integer(u64, new_spte, *sptep)
209 ctf_integer(int, ret, ret)
210 )
211 )
212 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,10,0) || \
213 LTTNG_RHEL_KERNEL_RANGE(4,18,0,305,0,0, 4,19,0,0,0,0))
214 LTTNG_TRACEPOINT_EVENT_MAP(
215 fast_page_fault,
216
217 kvm_mmu_fast_page_fault,
218
219 TP_PROTO(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 error_code,
220 u64 *sptep, u64 old_spte, int ret),
221 TP_ARGS(vcpu, cr2_or_gpa, error_code, sptep, old_spte, ret),
222
223 TP_FIELDS(
224 ctf_integer(int, vcpu_id, vcpu->vcpu_id)
225 ctf_integer(gpa_t, cr2_or_gpa, cr2_or_gpa)
226 ctf_integer(u32, error_code, error_code)
227 ctf_integer_hex(u64 *, sptep, sptep)
228 ctf_integer(u64, old_spte, old_spte)
229 ctf_integer(u64, new_spte, *sptep)
230 ctf_integer(int, ret, ret)
231 )
232 )
233 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0) || \
234 LTTNG_KERNEL_RANGE(4,19,103, 4,20,0) || \
235 LTTNG_KERNEL_RANGE(5,4,19, 5,5,0) || \
236 LTTNG_KERNEL_RANGE(5,5,3, 5,6,0) || \
237 LTTNG_UBUNTU_KERNEL_RANGE(4,15,18,92, 4,16,0,0) || \
238 LTTNG_UBUNTU_KERNEL_RANGE(5,0,21,44, 5,1,0,0) || \
239 LTTNG_UBUNTU_KERNEL_RANGE(5,3,18,43, 5,3,18,45) || \
240 LTTNG_UBUNTU_KERNEL_RANGE(5,3,18,46, 5,4,0,0))
241 LTTNG_TRACEPOINT_EVENT_MAP(
242 fast_page_fault,
243
244 kvm_mmu_fast_page_fault,
245
246 TP_PROTO(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 error_code,
247 u64 *sptep, u64 old_spte, bool retry),
248 TP_ARGS(vcpu, cr2_or_gpa, error_code, sptep, old_spte, retry),
249
250 TP_FIELDS(
251 ctf_integer(int, vcpu_id, vcpu->vcpu_id)
252 ctf_integer(gpa_t, cr2_or_gpa, cr2_or_gpa)
253 ctf_integer(u32, error_code, error_code)
254 ctf_integer_hex(u64 *, sptep, sptep)
255 ctf_integer(u64, old_spte, old_spte)
256 ctf_integer(u64, new_spte, *sptep)
257 ctf_integer(bool, retry, retry)
258 )
259 )
260 #else
261 LTTNG_TRACEPOINT_EVENT_MAP(
262 fast_page_fault,
263
264 kvm_mmu_fast_page_fault,
265
266 TP_PROTO(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
267 u64 *sptep, u64 old_spte, bool retry),
268 TP_ARGS(vcpu, gva, error_code, sptep, old_spte, retry),
269
270 TP_FIELDS(
271 ctf_integer(int, vcpu_id, vcpu->vcpu_id)
272 ctf_integer(gva_t, gva, gva)
273 ctf_integer(u32, error_code, error_code)
274 ctf_integer_hex(u64 *, sptep, sptep)
275 ctf_integer(u64, old_spte, old_spte)
276 ctf_integer(u64, new_spte, *sptep)
277 ctf_integer(bool, retry, retry)
278 )
279 )
280 #endif
281
282 #endif /* LTTNG_TRACE_KVM_MMU_H */
283
284 #undef TRACE_INCLUDE_PATH
285 #define TRACE_INCLUDE_PATH instrumentation/events/arch/x86/kvm
286 #undef TRACE_INCLUDE_FILE
287 #define TRACE_INCLUDE_FILE mmutrace
288
289 /* This part must be outside protection */
290 #include <lttng/define_trace.h>
This page took 0.036793 seconds and 5 git commands to generate.