769dde254e0d3ceb4715493bc6f9c8f7ade66d85
[lttng-modules.git] / instrumentation / events / lttng-module / arch / x86 / kvm / mmutrace.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #if !defined(LTTNG_TRACE_KVM_MMU_H) || defined(TRACE_HEADER_MULTI_READ)
3 #define LTTNG_TRACE_KVM_MMU_H
4
5 #include <probes/lttng-tracepoint-event.h>
6 #include <linux/version.h>
7
8 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0))
9 #include <linux/trace_events.h>
10 #else /* if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0)) */
11 #include <linux/ftrace_event.h>
12 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0)) */
13
14 #undef TRACE_SYSTEM
15 #define TRACE_SYSTEM kvm_mmu
16
17 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0))
18
19 #define LTTNG_KVM_MMU_PAGE_FIELDS \
20 ctf_integer(__u64, gfn, (sp)->gfn) \
21 ctf_integer(__u32, role, (sp)->role.word) \
22 ctf_integer(__u32, root_count, (sp)->root_count) \
23 ctf_integer(bool, unsync, (sp)->unsync)
24
25 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
26
27 #define LTTNG_KVM_MMU_PAGE_FIELDS \
28 ctf_integer(unsigned long, mmu_valid_gen, (sp)->mmu_valid_gen) \
29 ctf_integer(__u64, gfn, (sp)->gfn) \
30 ctf_integer(__u32, role, (sp)->role.word) \
31 ctf_integer(__u32, root_count, (sp)->root_count) \
32 ctf_integer(bool, unsync, (sp)->unsync)
33
34 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
35
36 #define LTTNG_KVM_MMU_PAGE_FIELDS \
37 ctf_integer(__u64, gfn, (sp)->gfn) \
38 ctf_integer(__u32, role, (sp)->role.word) \
39 ctf_integer(__u32, root_count, (sp)->root_count) \
40 ctf_integer(bool, unsync, (sp)->unsync)
41
42 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
43
44 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
45 /*
46 * A pagetable walk has started
47 */
48 LTTNG_TRACEPOINT_EVENT(
49 kvm_mmu_pagetable_walk,
50 TP_PROTO(u64 addr, u32 pferr),
51 TP_ARGS(addr, pferr),
52
53 TP_FIELDS(
54 ctf_integer(__u64, addr, addr)
55 ctf_integer(__u32, pferr, pferr)
56 )
57 )
58 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)) */
59 /*
60 * A pagetable walk has started
61 */
62 LTTNG_TRACEPOINT_EVENT(
63 kvm_mmu_pagetable_walk,
64 TP_PROTO(u64 addr, int write_fault, int user_fault, int fetch_fault),
65 TP_ARGS(addr, write_fault, user_fault, fetch_fault),
66
67 TP_FIELDS(
68 ctf_integer(__u64, addr, addr)
69 ctf_integer(__u32, pferr,
70 (!!write_fault << 1) | (!!user_fault << 2)
71 | (!!fetch_fault << 4))
72 )
73 )
74 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)) */
75
76 /* We just walked a paging element */
77 LTTNG_TRACEPOINT_EVENT(
78 kvm_mmu_paging_element,
79 TP_PROTO(u64 pte, int level),
80 TP_ARGS(pte, level),
81
82 TP_FIELDS(
83 ctf_integer(__u64, pte, pte)
84 ctf_integer(__u32, level, level)
85 )
86 )
87
88 LTTNG_TRACEPOINT_EVENT_CLASS(kvm_mmu_set_bit_class,
89
90 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
91
92 TP_ARGS(table_gfn, index, size),
93
94 TP_FIELDS(
95 ctf_integer(__u64, gpa,
96 ((u64)table_gfn << PAGE_SHIFT) + index * size)
97 )
98 )
99
100 /* We set a pte accessed bit */
101 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_set_bit_class, kvm_mmu_set_accessed_bit,
102
103 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
104
105 TP_ARGS(table_gfn, index, size)
106 )
107
108 /* We set a pte dirty bit */
109 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_set_bit_class, kvm_mmu_set_dirty_bit,
110
111 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
112
113 TP_ARGS(table_gfn, index, size)
114 )
115
116 LTTNG_TRACEPOINT_EVENT(
117 kvm_mmu_walker_error,
118 TP_PROTO(u32 pferr),
119 TP_ARGS(pferr),
120
121 TP_FIELDS(
122 ctf_integer(__u32, pferr, pferr)
123 )
124 )
125
126 LTTNG_TRACEPOINT_EVENT(
127 kvm_mmu_get_page,
128 TP_PROTO(struct kvm_mmu_page *sp, bool created),
129 TP_ARGS(sp, created),
130
131 TP_FIELDS(
132 LTTNG_KVM_MMU_PAGE_FIELDS
133 ctf_integer(bool, created, created)
134 )
135 )
136
137 LTTNG_TRACEPOINT_EVENT_CLASS(kvm_mmu_page_class,
138
139 TP_PROTO(struct kvm_mmu_page *sp),
140 TP_ARGS(sp),
141
142 TP_FIELDS(
143 LTTNG_KVM_MMU_PAGE_FIELDS
144 )
145 )
146
147 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class, kvm_mmu_sync_page,
148 TP_PROTO(struct kvm_mmu_page *sp),
149
150 TP_ARGS(sp)
151 )
152
153 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class, kvm_mmu_unsync_page,
154 TP_PROTO(struct kvm_mmu_page *sp),
155
156 TP_ARGS(sp)
157 )
158
159 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class, kvm_mmu_prepare_zap_page,
160 TP_PROTO(struct kvm_mmu_page *sp),
161
162 TP_ARGS(sp)
163 )
164
165 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
166
167 LTTNG_TRACEPOINT_EVENT_MAP(
168 mark_mmio_spte,
169
170 kvm_mmu_mark_mmio_spte,
171
172 TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access, unsigned int gen),
173 TP_ARGS(sptep, gfn, access, gen),
174
175 TP_FIELDS(
176 ctf_integer_hex(void *, sptep, sptep)
177 ctf_integer(gfn_t, gfn, gfn)
178 ctf_integer(unsigned, access, access)
179 ctf_integer(unsigned int, gen, gen)
180 )
181 )
182
183 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
184
185 LTTNG_TRACEPOINT_EVENT_MAP(
186 mark_mmio_spte,
187
188 kvm_mmu_mark_mmio_spte,
189
190 TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access),
191 TP_ARGS(sptep, gfn, access),
192
193 TP_FIELDS(
194 ctf_integer_hex(void *, sptep, sptep)
195 ctf_integer(gfn_t, gfn, gfn)
196 ctf_integer(unsigned, access, access)
197 )
198 )
199
200 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
201
202 LTTNG_TRACEPOINT_EVENT_MAP(
203 handle_mmio_page_fault,
204
205 kvm_mmu_handle_mmio_page_fault,
206
207 TP_PROTO(u64 addr, gfn_t gfn, unsigned access),
208 TP_ARGS(addr, gfn, access),
209
210 TP_FIELDS(
211 ctf_integer(u64, addr, addr)
212 ctf_integer(gfn_t, gfn, gfn)
213 ctf_integer(unsigned, access, access)
214 )
215 )
216
217 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) || \
218 LTTNG_KERNEL_RANGE(4,19,103, 4,20,0) || \
219 LTTNG_KERNEL_RANGE(5,4,19, 5,5,0) || \
220 LTTNG_KERNEL_RANGE(5,5,3, 5,6,0) || \
221 LTTNG_UBUNTU_KERNEL_RANGE(4,15,18,92, 4,16,0,0) || \
222 LTTNG_UBUNTU_KERNEL_RANGE(5,3,18,43, 5,3,18,45) || \
223 LTTNG_UBUNTU_KERNEL_RANGE(5,3,18,46, 5,4,0,0))
224 LTTNG_TRACEPOINT_EVENT_MAP(
225 fast_page_fault,
226
227 kvm_mmu_fast_page_fault,
228
229 TP_PROTO(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 error_code,
230 u64 *sptep, u64 old_spte, bool retry),
231 TP_ARGS(vcpu, cr2_or_gpa, error_code, sptep, old_spte, retry),
232
233 TP_FIELDS(
234 ctf_integer(int, vcpu_id, vcpu->vcpu_id)
235 ctf_integer(gpa_t, cr2_or_gpa, cr2_or_gpa)
236 ctf_integer(u32, error_code, error_code)
237 ctf_integer_hex(u64 *, sptep, sptep)
238 ctf_integer(u64, old_spte, old_spte)
239 ctf_integer(u64, new_spte, *sptep)
240 ctf_integer(bool, retry, retry)
241 )
242 )
243 #else
244 LTTNG_TRACEPOINT_EVENT_MAP(
245 fast_page_fault,
246
247 kvm_mmu_fast_page_fault,
248
249 TP_PROTO(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
250 u64 *sptep, u64 old_spte, bool retry),
251 TP_ARGS(vcpu, gva, error_code, sptep, old_spte, retry),
252
253 TP_FIELDS(
254 ctf_integer(int, vcpu_id, vcpu->vcpu_id)
255 ctf_integer(gva_t, gva, gva)
256 ctf_integer(u32, error_code, error_code)
257 ctf_integer_hex(u64 *, sptep, sptep)
258 ctf_integer(u64, old_spte, old_spte)
259 ctf_integer(u64, new_spte, *sptep)
260 ctf_integer(bool, retry, retry)
261 )
262 )
263 #endif
264
265 #endif /* LTTNG_TRACE_KVM_MMU_H */
266
267 #undef TRACE_INCLUDE_PATH
268 #define TRACE_INCLUDE_PATH instrumentation/events/lttng-module/arch/x86/kvm
269 #undef TRACE_INCLUDE_FILE
270 #define TRACE_INCLUDE_FILE mmutrace
271
272 /* This part must be outside protection */
273 #include <probes/define_trace.h>
This page took 0.0353 seconds and 3 git commands to generate.