35b6279fc1a26a4fc2d20e9a1b14bb4b916c7d1c
[lttng-modules.git] / include / instrumentation / events / rcu.h
1 // SPDX-FileCopyrightText: 2012 Andrew Gabbasov <andrew_gabbasov@mentor.com>
2 //
3 // SPDX-License-Identifier: GPL-2.0-only
4
5 #undef TRACE_SYSTEM
6 #define TRACE_SYSTEM rcu
7
8 #if !defined(LTTNG_TRACE_RCU_H) || defined(TRACE_HEADER_MULTI_READ)
9 #define LTTNG_TRACE_RCU_H
10
11 #include <lttng/tracepoint-event.h>
12 #include <lttng/kernel-version.h>
13
14 /*
15 * Tracepoint for start/end markers used for utilization calculations.
16 * By convention, the string is of the following forms:
17 *
18 * "Start <activity>" -- Mark the start of the specified activity,
19 * such as "context switch". Nesting is permitted.
20 * "End <activity>" -- Mark the end of the specified activity.
21 *
22 * An "@" character within "<activity>" is a comment character: Data
23 * reduction scripts will ignore the "@" and the remainder of the line.
24 */
25 LTTNG_TRACEPOINT_EVENT(rcu_utilization,
26
27 TP_PROTO(const char *s),
28
29 TP_ARGS(s),
30
31 TP_FIELDS(
32 ctf_string(s, s)
33 )
34 )
35
36 #ifdef CONFIG_RCU_TRACE
37
38 #if defined(CONFIG_TREE_RCU) \
39 || defined(CONFIG_PREEMPT_RCU) \
40 || defined(CONFIG_TREE_PREEMPT_RCU)
41
42 /*
43 * Tracepoint for grace-period events: starting and ending a grace
44 * period ("start" and "end", respectively), a CPU noting the start
45 * of a new grace period or the end of an old grace period ("cpustart"
46 * and "cpuend", respectively), a CPU passing through a quiescent
47 * state ("cpuqs"), a CPU coming online or going offline ("cpuonl"
48 * and "cpuofl", respectively), and a CPU being kicked for being too
49 * long in dyntick-idle mode ("kick").
50 */
51 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,19,0) || \
52 LTTNG_RHEL_KERNEL_RANGE(4,18,0,80,0,0, 4,19,0,0,0,0))
53 LTTNG_TRACEPOINT_EVENT(rcu_grace_period,
54
55 TP_PROTO(const char *rcuname, unsigned long gp_seq, const char *gpevent),
56
57 TP_ARGS(rcuname, gp_seq, gpevent),
58
59 TP_FIELDS(
60 ctf_string(rcuname, rcuname)
61 ctf_integer(unsigned long, gp_seq, gp_seq)
62 ctf_string(gpevent, gpevent)
63 )
64 )
65 #else
66 LTTNG_TRACEPOINT_EVENT(rcu_grace_period,
67
68 TP_PROTO(const char *rcuname, unsigned long gpnum, const char *gpevent),
69
70 TP_ARGS(rcuname, gpnum, gpevent),
71
72 TP_FIELDS(
73 ctf_string(rcuname, rcuname)
74 ctf_integer(unsigned long, gpnum, gpnum)
75 ctf_string(gpevent, gpevent)
76 )
77 )
78 #endif
79
80 /*
81 * Tracepoint for grace-period-initialization events. These are
82 * distinguished by the type of RCU, the new grace-period number, the
83 * rcu_node structure level, the starting and ending CPU covered by the
84 * rcu_node structure, and the mask of CPUs that will be waited for.
85 * All but the type of RCU are extracted from the rcu_node structure.
86 */
87 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,19,0) || \
88 LTTNG_RHEL_KERNEL_RANGE(4,18,0,80,0,0, 4,19,0,0,0,0))
89 LTTNG_TRACEPOINT_EVENT(rcu_grace_period_init,
90
91 TP_PROTO(const char *rcuname, unsigned long gp_seq, u8 level,
92 int grplo, int grphi, unsigned long qsmask),
93
94 TP_ARGS(rcuname, gp_seq, level, grplo, grphi, qsmask),
95
96 TP_FIELDS(
97 ctf_string(rcuname, rcuname)
98 ctf_integer(unsigned long, gp_seq, gp_seq)
99 ctf_integer(u8, level, level)
100 ctf_integer(int, grplo, grplo)
101 ctf_integer(int, grphi, grphi)
102 ctf_integer(unsigned long, qsmask, qsmask)
103 )
104 )
105 #else
106 LTTNG_TRACEPOINT_EVENT(rcu_grace_period_init,
107
108 TP_PROTO(const char *rcuname, unsigned long gpnum, u8 level,
109 int grplo, int grphi, unsigned long qsmask),
110
111 TP_ARGS(rcuname, gpnum, level, grplo, grphi, qsmask),
112
113 TP_FIELDS(
114 ctf_string(rcuname, rcuname)
115 ctf_integer(unsigned long, gpnum, gpnum)
116 ctf_integer(u8, level, level)
117 ctf_integer(int, grplo, grplo)
118 ctf_integer(int, grphi, grphi)
119 ctf_integer(unsigned long, qsmask, qsmask)
120 )
121 )
122 #endif
123
124 /*
125 * Tracepoint for tasks blocking within preemptible-RCU read-side
126 * critical sections. Track the type of RCU (which one day might
127 * include SRCU), the grace-period number that the task is blocking
128 * (the current or the next), and the task's PID.
129 */
130 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,19,0) || \
131 LTTNG_RHEL_KERNEL_RANGE(4,18,0,80,0,0, 4,19,0,0,0,0))
132 LTTNG_TRACEPOINT_EVENT(rcu_preempt_task,
133
134 TP_PROTO(const char *rcuname, int pid, unsigned long gp_seq),
135
136 TP_ARGS(rcuname, pid, gp_seq),
137
138 TP_FIELDS(
139 ctf_string(rcuname, rcuname)
140 ctf_integer(unsigned long, gp_seq, gp_seq)
141 ctf_integer(int, pid, pid)
142 )
143 )
144 #else
145 LTTNG_TRACEPOINT_EVENT(rcu_preempt_task,
146
147 TP_PROTO(const char *rcuname, int pid, unsigned long gpnum),
148
149 TP_ARGS(rcuname, pid, gpnum),
150
151 TP_FIELDS(
152 ctf_string(rcuname, rcuname)
153 ctf_integer(unsigned long, gpnum, gpnum)
154 ctf_integer(int, pid, pid)
155 )
156 )
157 #endif
158
159 /*
160 * Tracepoint for tasks that blocked within a given preemptible-RCU
161 * read-side critical section exiting that critical section. Track the
162 * type of RCU (which one day might include SRCU) and the task's PID.
163 */
164 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,19,0) || \
165 LTTNG_RHEL_KERNEL_RANGE(4,18,0,80,0,0, 4,19,0,0,0,0))
166 LTTNG_TRACEPOINT_EVENT(rcu_unlock_preempted_task,
167
168 TP_PROTO(const char *rcuname, unsigned long gp_seq, int pid),
169
170 TP_ARGS(rcuname, gp_seq, pid),
171
172 TP_FIELDS(
173 ctf_string(rcuname, rcuname)
174 ctf_integer(unsigned long, gp_seq, gp_seq)
175 ctf_integer(int, pid, pid)
176 )
177 )
178 #else
179 LTTNG_TRACEPOINT_EVENT(rcu_unlock_preempted_task,
180
181 TP_PROTO(const char *rcuname, unsigned long gpnum, int pid),
182
183 TP_ARGS(rcuname, gpnum, pid),
184
185 TP_FIELDS(
186 ctf_string(rcuname, rcuname)
187 ctf_integer(unsigned long, gpnum, gpnum)
188 ctf_integer(int, pid, pid)
189 )
190 )
191 #endif
192
193 /*
194 * Tracepoint for quiescent-state-reporting events. These are
195 * distinguished by the type of RCU, the grace-period number, the
196 * mask of quiescent lower-level entities, the rcu_node structure level,
197 * the starting and ending CPU covered by the rcu_node structure, and
198 * whether there are any blocked tasks blocking the current grace period.
199 * All but the type of RCU are extracted from the rcu_node structure.
200 */
201 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,19,0) || \
202 LTTNG_RHEL_KERNEL_RANGE(4,18,0,80,0,0, 4,19,0,0,0,0))
203 LTTNG_TRACEPOINT_EVENT(rcu_quiescent_state_report,
204
205 TP_PROTO(const char *rcuname, unsigned long gp_seq,
206 unsigned long mask, unsigned long qsmask,
207 u8 level, int grplo, int grphi, int gp_tasks),
208
209 TP_ARGS(rcuname, gp_seq, mask, qsmask, level, grplo, grphi, gp_tasks),
210
211 TP_FIELDS(
212 ctf_string(rcuname, rcuname)
213 ctf_integer(unsigned long, gp_seq, gp_seq)
214 ctf_integer(unsigned long, mask, mask)
215 ctf_integer(unsigned long, qsmask, qsmask)
216 ctf_integer(u8, level, level)
217 ctf_integer(int, grplo, grplo)
218 ctf_integer(int, grphi, grphi)
219 ctf_integer(u8, gp_tasks, gp_tasks)
220 )
221 )
222 #else
223 LTTNG_TRACEPOINT_EVENT(rcu_quiescent_state_report,
224
225 TP_PROTO(const char *rcuname, unsigned long gpnum,
226 unsigned long mask, unsigned long qsmask,
227 u8 level, int grplo, int grphi, int gp_tasks),
228
229 TP_ARGS(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks),
230
231 TP_FIELDS(
232 ctf_string(rcuname, rcuname)
233 ctf_integer(unsigned long, gpnum, gpnum)
234 ctf_integer(unsigned long, mask, mask)
235 ctf_integer(unsigned long, qsmask, qsmask)
236 ctf_integer(u8, level, level)
237 ctf_integer(int, grplo, grplo)
238 ctf_integer(int, grphi, grphi)
239 ctf_integer(u8, gp_tasks, gp_tasks)
240 )
241 )
242 #endif
243
244 /*
245 * Tracepoint for quiescent states detected by force_quiescent_state().
246 * These trace events include the type of RCU, the grace-period number
247 * that was blocked by the CPU, the CPU itself, and the type of quiescent
248 * state, which can be "dti" for dyntick-idle mode, "ofl" for CPU offline,
249 * or "kick" when kicking a CPU that has been in dyntick-idle mode for
250 * too long.
251 */
252 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,19,0) || \
253 LTTNG_RHEL_KERNEL_RANGE(4,18,0,80,0,0, 4,19,0,0,0,0))
254 LTTNG_TRACEPOINT_EVENT(rcu_fqs,
255
256 TP_PROTO(const char *rcuname, unsigned long gp_seq, int cpu, const char *qsevent),
257
258 TP_ARGS(rcuname, gp_seq, cpu, qsevent),
259
260 TP_FIELDS(
261 ctf_integer(unsigned long, gp_seq, gp_seq)
262 ctf_integer(int, cpu, cpu)
263 ctf_string(rcuname, rcuname)
264 ctf_string(qsevent, qsevent)
265 )
266 )
267 #else
268 LTTNG_TRACEPOINT_EVENT(rcu_fqs,
269
270 TP_PROTO(const char *rcuname, unsigned long gpnum, int cpu, const char *qsevent),
271
272 TP_ARGS(rcuname, gpnum, cpu, qsevent),
273
274 TP_FIELDS(
275 ctf_integer(unsigned long, gpnum, gpnum)
276 ctf_integer(int, cpu, cpu)
277 ctf_string(rcuname, rcuname)
278 ctf_string(qsevent, qsevent)
279 )
280 )
281 #endif
282
283 #endif /*
284 * #if defined(CONFIG_TREE_RCU)
285 * || (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,19,0)
286 * && defined(CONFIG_PREEMPT_RCU))
287 * || defined(CONFIG_TREE_PREEMPT_RCU)
288 */
289
290 /*
291 * Tracepoint for dyntick-idle entry/exit events. These take a string
292 * as argument: "Start" for entering dyntick-idle mode, "End" for
293 * leaving it, "--=" for events moving towards idle, and "++=" for events
294 * moving away from idle. "Error on entry: not idle task" and "Error on
295 * exit: not idle task" indicate that a non-idle task is erroneously
296 * toying with the idle loop.
297 *
298 * These events also take a pair of numbers, which indicate the nesting
299 * depth before and after the event of interest. Note that task-related
300 * events use the upper bits of each number, while interrupt-related
301 * events use the lower bits.
302 */
303 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0)) \
304 || LTTNG_KERNEL_RANGE(5,5,6, 5,6,0) \
305 || LTTNG_KERNEL_RANGE(5,4,22, 5,5,0) \
306 || LTTNG_UBUNTU_KERNEL_RANGE(5,0,21,46, 5,1,0,0)
307 LTTNG_TRACEPOINT_EVENT(rcu_dyntick,
308
309 TP_PROTO(const char *polarity, long oldnesting, long newnesting, int dynticks),
310
311 TP_ARGS(polarity, oldnesting, newnesting, dynticks),
312
313 TP_FIELDS(
314 ctf_string(polarity, polarity)
315 ctf_integer(long, oldnesting, oldnesting)
316 ctf_integer(long, newnesting, newnesting)
317 ctf_integer(int, dynticks, dynticks)
318 )
319 )
320
321 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,16,0))
322 LTTNG_TRACEPOINT_EVENT(rcu_dyntick,
323
324 TP_PROTO(const char *polarity, long oldnesting, long newnesting, atomic_t dynticks),
325
326 TP_ARGS(polarity, oldnesting, newnesting, dynticks),
327
328 TP_FIELDS(
329 ctf_string(polarity, polarity)
330 ctf_integer(long, oldnesting, oldnesting)
331 ctf_integer(long, newnesting, newnesting)
332 ctf_integer(int, dynticks, atomic_read(&dynticks))
333 )
334 )
335
336 #else
337 LTTNG_TRACEPOINT_EVENT(rcu_dyntick,
338
339 TP_PROTO(const char *polarity, long long oldnesting, long long newnesting),
340
341 TP_ARGS(polarity, oldnesting, newnesting),
342
343 TP_FIELDS(
344 ctf_string(polarity, polarity)
345 ctf_integer(long long, oldnesting, oldnesting)
346 ctf_integer(long long, newnesting, newnesting)
347 )
348 )
349 #endif
350
351
352 /*
353 * Tracepoint for RCU preparation for idle, the goal being to get RCU
354 * processing done so that the current CPU can shut off its scheduling
355 * clock and enter dyntick-idle mode. One way to accomplish this is
356 * to drain all RCU callbacks from this CPU, and the other is to have
357 * done everything RCU requires for the current grace period. In this
358 * latter case, the CPU will be awakened at the end of the current grace
359 * period in order to process the remainder of its callbacks.
360 *
361 * These tracepoints take a string as argument:
362 *
363 * "No callbacks": Nothing to do, no callbacks on this CPU.
364 * "In holdoff": Nothing to do, holding off after unsuccessful attempt.
365 * "Begin holdoff": Attempt failed, don't retry until next jiffy.
366 * "Dyntick with callbacks": Entering dyntick-idle despite callbacks.
367 * "Dyntick with lazy callbacks": Entering dyntick-idle w/lazy callbacks.
368 * "More callbacks": Still more callbacks, try again to clear them out.
369 * "Callbacks drained": All callbacks processed, off to dyntick idle!
370 * "Timer": Timer fired to cause CPU to continue processing callbacks.
371 * "Demigrate": Timer fired on wrong CPU, woke up correct CPU.
372 * "Cleanup after idle": Idle exited, timer canceled.
373 */
374 LTTNG_TRACEPOINT_EVENT(rcu_prep_idle,
375
376 TP_PROTO(const char *reason),
377
378 TP_ARGS(reason),
379
380 TP_FIELDS(
381 ctf_string(reason, reason)
382 )
383 )
384
385 /*
386 * Tracepoint for the registration of a single RCU callback function.
387 * The first argument is the type of RCU, the second argument is
388 * a pointer to the RCU callback itself, the third element is the
389 * number of lazy callbacks queued, and the fourth element is the
390 * total number of callbacks queued.
391 */
392 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0))
393 LTTNG_TRACEPOINT_EVENT(rcu_callback,
394
395 TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen),
396
397 TP_ARGS(rcuname, rhp, qlen),
398
399 TP_FIELDS(
400 ctf_string(rcuname, rcuname)
401 ctf_integer_hex(void *, rhp, rhp)
402 ctf_integer_hex(void *, func, rhp->func)
403 ctf_integer(long, qlen, qlen)
404 )
405 )
406 #else
407 LTTNG_TRACEPOINT_EVENT(rcu_callback,
408
409 TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen_lazy,
410 long qlen),
411
412 TP_ARGS(rcuname, rhp, qlen_lazy, qlen),
413
414 TP_FIELDS(
415 ctf_string(rcuname, rcuname)
416 ctf_integer_hex(void *, rhp, rhp)
417 ctf_integer_hex(void *, func, rhp->func)
418 ctf_integer(long, qlen_lazy, qlen_lazy)
419 ctf_integer(long, qlen, qlen)
420 )
421 )
422 #endif
423
424
425 /*
426 * Tracepoint for the registration of a single RCU callback of the special
427 * kfree() form. The first argument is the RCU type, the second argument
428 * is a pointer to the RCU callback, the third argument is the offset
429 * of the callback within the enclosing RCU-protected data structure,
430 * the fourth argument is the number of lazy callbacks queued, and the
431 * fifth argument is the total number of callbacks queued.
432 */
433 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0))
434 LTTNG_TRACEPOINT_EVENT(rcu_kfree_callback,
435
436 TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset,
437 long qlen),
438
439 TP_ARGS(rcuname, rhp, offset, qlen),
440
441 TP_FIELDS(
442 ctf_string(rcuname, rcuname)
443 ctf_integer_hex(void *, rhp, rhp)
444 ctf_integer_hex(unsigned long, offset, offset)
445 ctf_integer(long, qlen, qlen)
446 )
447 )
448 #else
449 LTTNG_TRACEPOINT_EVENT(rcu_kfree_callback,
450
451 TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset,
452 long qlen_lazy, long qlen),
453
454 TP_ARGS(rcuname, rhp, offset, qlen_lazy, qlen),
455
456 TP_FIELDS(
457 ctf_string(rcuname, rcuname)
458 ctf_integer_hex(void *, rhp, rhp)
459 ctf_integer_hex(unsigned long, offset, offset)
460 ctf_integer(long, qlen_lazy, qlen_lazy)
461 ctf_integer(long, qlen, qlen)
462 )
463 )
464 #endif
465
466 /*
467 * Tracepoint for marking the beginning rcu_do_batch, performed to start
468 * RCU callback invocation. The first argument is the RCU flavor,
469 * the second is the number of lazy callbacks queued, the third is
470 * the total number of callbacks queued, and the fourth argument is
471 * the current RCU-callback batch limit.
472 */
473 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0))
474 LTTNG_TRACEPOINT_EVENT(rcu_batch_start,
475
476 TP_PROTO(const char *rcuname, long qlen, long blimit),
477
478 TP_ARGS(rcuname, qlen, blimit),
479
480 TP_FIELDS(
481 ctf_string(rcuname, rcuname)
482 ctf_integer(long, qlen, qlen)
483 ctf_integer(long, blimit, blimit)
484 )
485 )
486 #else
487 LTTNG_TRACEPOINT_EVENT(rcu_batch_start,
488
489 TP_PROTO(const char *rcuname, long qlen_lazy, long qlen, long blimit),
490
491 TP_ARGS(rcuname, qlen_lazy, qlen, blimit),
492
493 TP_FIELDS(
494 ctf_string(rcuname, rcuname)
495 ctf_integer(long, qlen_lazy, qlen_lazy)
496 ctf_integer(long, qlen, qlen)
497 ctf_integer(long, blimit, blimit)
498 )
499 )
500 #endif
501
502 /*
503 * Tracepoint for the invocation of a single RCU callback function.
504 * The first argument is the type of RCU, and the second argument is
505 * a pointer to the RCU callback itself.
506 */
507 LTTNG_TRACEPOINT_EVENT(rcu_invoke_callback,
508
509 TP_PROTO(const char *rcuname, struct rcu_head *rhp),
510
511 TP_ARGS(rcuname, rhp),
512
513 TP_FIELDS(
514 ctf_string(rcuname, rcuname)
515 ctf_integer_hex(void *, rhp, rhp)
516 ctf_integer_hex(void *, func, rhp->func)
517 )
518 )
519
520 /*
521 * Tracepoint for the invocation of a single RCU callback of the special
522 * kfree() form. The first argument is the RCU flavor, the second
523 * argument is a pointer to the RCU callback, and the third argument
524 * is the offset of the callback within the enclosing RCU-protected
525 * data structure.
526 */
527 LTTNG_TRACEPOINT_EVENT(rcu_invoke_kfree_callback,
528
529 TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset),
530
531 TP_ARGS(rcuname, rhp, offset),
532
533 TP_FIELDS(
534 ctf_string(rcuname, rcuname)
535 ctf_integer_hex(void *, rhp, rhp)
536 ctf_integer(unsigned long, offset, offset)
537 )
538 )
539
540 /*
541 * Tracepoint for exiting rcu_do_batch after RCU callbacks have been
542 * invoked. The first argument is the name of the RCU flavor,
543 * the second argument is number of callbacks actually invoked,
544 * the third argument (cb) is whether or not any of the callbacks that
545 * were ready to invoke at the beginning of this batch are still
546 * queued, the fourth argument (nr) is the return value of need_resched(),
547 * the fifth argument (iit) is 1 if the current task is the idle task,
548 * and the sixth argument (risk) is the return value from
549 * rcu_is_callbacks_kthread().
550 */
551 LTTNG_TRACEPOINT_EVENT(rcu_batch_end,
552
553 TP_PROTO(const char *rcuname, int callbacks_invoked,
554 char cb, char nr, char iit, char risk),
555
556 TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk),
557
558 TP_FIELDS(
559 ctf_string(rcuname, rcuname)
560 ctf_integer(int, callbacks_invoked, callbacks_invoked)
561 ctf_integer(char, cb, cb)
562 ctf_integer(char, nr, nr)
563 ctf_integer(char, iit, iit)
564 ctf_integer(char, risk, risk)
565 )
566 )
567
568 /*
569 * Tracepoint for rcutorture readers. The first argument is the name
570 * of the RCU flavor from rcutorture's viewpoint and the second argument
571 * is the callback address.
572 */
573 LTTNG_TRACEPOINT_EVENT(rcu_torture_read,
574
575 TP_PROTO(const char *rcutorturename, struct rcu_head *rhp,
576 unsigned long secs, unsigned long c_old, unsigned long c),
577
578 TP_ARGS(rcutorturename, rhp, secs, c_old, c),
579
580 TP_FIELDS(
581 ctf_string(rcutorturename, rcutorturename)
582 ctf_integer_hex(struct rcu_head *, rhp, rhp)
583 ctf_integer(unsigned long, secs, secs)
584 ctf_integer(unsigned long, c_old, c_old)
585 ctf_integer(unsigned long, c, c)
586 )
587 )
588
589 /*
590 * Tracepoint for _rcu_barrier() execution. The string "s" describes
591 * the _rcu_barrier phase:
592 * "Begin": rcu_barrier_callback() started.
593 * "Check": rcu_barrier_callback() checking for piggybacking.
594 * "EarlyExit": rcu_barrier_callback() piggybacked, thus early exit.
595 * "Inc1": rcu_barrier_callback() piggyback check counter incremented.
596 * "Offline": rcu_barrier_callback() found offline CPU
597 * "OnlineQ": rcu_barrier_callback() found online CPU with callbacks.
598 * "OnlineNQ": rcu_barrier_callback() found online CPU, no callbacks.
599 * "IRQ": An rcu_barrier_callback() callback posted on remote CPU.
600 * "CB": An rcu_barrier_callback() invoked a callback, not the last.
601 * "LastCB": An rcu_barrier_callback() invoked the last callback.
602 * "Inc2": rcu_barrier_callback() piggyback check counter incremented.
603 * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
604 * is the count of remaining callbacks, and "done" is the piggybacking count.
605 */
606 LTTNG_TRACEPOINT_EVENT(rcu_barrier,
607
608 TP_PROTO(const char *rcuname, const char *s, int cpu, int cnt, unsigned long done),
609
610 TP_ARGS(rcuname, s, cpu, cnt, done),
611
612 TP_FIELDS(
613 ctf_string(rcuname, rcuname)
614 ctf_string(s, s)
615 ctf_integer(int, cpu, cpu)
616 ctf_integer(int, cnt, cnt)
617 ctf_integer(unsigned long, done, done)
618 )
619 )
620
621 #else /* #ifdef CONFIG_RCU_TRACE */
622
623 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,19,0) || \
624 LTTNG_RHEL_KERNEL_RANGE(4,18,0,80,0,0, 4,19,0,0,0,0))
625 #define trace_rcu_grace_period(rcuname, gp_seq, gpevent) do { } while (0)
626 #define trace_rcu_grace_period_init(rcuname, gp_seq, level, grplo, grphi, \
627 qsmask) do { } while (0)
628 #define trace_rcu_preempt_task(rcuname, pid, gp_seq) do { } while (0)
629 #define trace_rcu_unlock_preempted_task(rcuname, gp_seq, pid) do { } while (0)
630 #define trace_rcu_quiescent_state_report(rcuname, gp_seq, mask, qsmask, level, \
631 grplo, grphi, gp_tasks) do { } \
632 while (0)
633 #define trace_rcu_fqs(rcuname, gp_seq, cpu, qsevent) do { } while (0)
634 #else
635 #define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
636 #define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \
637 qsmask) do { } while (0)
638 #define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
639 #define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
640 #define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \
641 grplo, grphi, gp_tasks) do { } \
642 while (0)
643 #define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
644 #endif
645
646 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,16,0))
647 #define trace_rcu_dyntick(polarity, oldnesting, newnesting, dyntick) do { } while (0)
648 #else
649 #define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0)
650 #endif
651
652 #define trace_rcu_prep_idle(reason) do { } while (0)
653 #define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
654 #define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
655 do { } while (0)
656 #define trace_rcu_batch_start(rcuname, qlen_lazy, qlen, blimit) \
657 do { } while (0)
658 #define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0)
659 #define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0)
660 #define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \
661 do { } while (0)
662 #define trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
663 do { } while (0)
664 #define trace_rcu_barrier(name, s, cpu, cnt, done) do { } while (0)
665 #endif /* #else #ifdef CONFIG_RCU_TRACE */
666
667 #endif /* LTTNG_TRACE_RCU_H */
668
669 /* This part must be outside protection */
670 #include <lttng/define_trace.h>
This page took 0.044672 seconds and 5 git commands to generate.