1 /* SPDX-License-Identifier: GPL-2.0 */
3 #define TRACE_SYSTEM rcu
5 #if !defined(LTTNG_TRACE_RCU_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define LTTNG_TRACE_RCU_H
8 #include <probes/lttng-tracepoint-event.h>
9 #include <linux/version.h>
12 * Tracepoint for start/end markers used for utilization calculations.
13 * By convention, the string is of the following forms:
15 * "Start <activity>" -- Mark the start of the specified activity,
16 * such as "context switch". Nesting is permitted.
17 * "End <activity>" -- Mark the end of the specified activity.
19 * An "@" character within "<activity>" is a comment character: Data
20 * reduction scripts will ignore the "@" and the remainder of the line.
22 LTTNG_TRACEPOINT_EVENT(rcu_utilization
,
24 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
25 TP_PROTO(const char *s
),
26 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
28 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
37 #ifdef CONFIG_RCU_TRACE
39 #if defined(CONFIG_TREE_RCU) \
40 || (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0) \
41 && defined(CONFIG_PREEMPT_RCU)) \
42 || defined(CONFIG_TREE_PREEMPT_RCU)
45 * Tracepoint for grace-period events: starting and ending a grace
46 * period ("start" and "end", respectively), a CPU noting the start
47 * of a new grace period or the end of an old grace period ("cpustart"
48 * and "cpuend", respectively), a CPU passing through a quiescent
49 * state ("cpuqs"), a CPU coming online or going offline ("cpuonl"
50 * and "cpuofl", respectively), and a CPU being kicked for being too
51 * long in dyntick-idle mode ("kick").
53 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0) || \
54 LTTNG_RHEL_KERNEL_RANGE(4,18,0,80,0,0, 4,19,0,0,0,0))
55 LTTNG_TRACEPOINT_EVENT(rcu_grace_period
,
57 TP_PROTO(const char *rcuname
, unsigned long gp_seq
, const char *gpevent
),
59 TP_ARGS(rcuname
, gp_seq
, gpevent
),
62 ctf_string(rcuname
, rcuname
)
63 ctf_integer(unsigned long, gp_seq
, gp_seq
)
64 ctf_string(gpevent
, gpevent
)
67 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
68 LTTNG_TRACEPOINT_EVENT(rcu_grace_period
,
70 TP_PROTO(const char *rcuname
, unsigned long gpnum
, const char *gpevent
),
72 TP_ARGS(rcuname
, gpnum
, gpevent
),
75 ctf_string(rcuname
, rcuname
)
76 ctf_integer(unsigned long, gpnum
, gpnum
)
77 ctf_string(gpevent
, gpevent
)
81 LTTNG_TRACEPOINT_EVENT(rcu_grace_period
,
83 TP_PROTO(char *rcuname
, unsigned long gpnum
, char *gpevent
),
85 TP_ARGS(rcuname
, gpnum
, gpevent
),
88 ctf_string(rcuname
, rcuname
)
89 ctf_integer(unsigned long, gpnum
, gpnum
)
90 ctf_string(gpevent
, gpevent
)
96 * Tracepoint for grace-period-initialization events. These are
97 * distinguished by the type of RCU, the new grace-period number, the
98 * rcu_node structure level, the starting and ending CPU covered by the
99 * rcu_node structure, and the mask of CPUs that will be waited for.
100 * All but the type of RCU are extracted from the rcu_node structure.
102 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0) || \
103 LTTNG_RHEL_KERNEL_RANGE(4,18,0,80,0,0, 4,19,0,0,0,0))
104 LTTNG_TRACEPOINT_EVENT(rcu_grace_period_init
,
106 TP_PROTO(const char *rcuname
, unsigned long gp_seq
, u8 level
,
107 int grplo
, int grphi
, unsigned long qsmask
),
109 TP_ARGS(rcuname
, gp_seq
, level
, grplo
, grphi
, qsmask
),
112 ctf_string(rcuname
, rcuname
)
113 ctf_integer(unsigned long, gp_seq
, gp_seq
)
114 ctf_integer(u8
, level
, level
)
115 ctf_integer(int, grplo
, grplo
)
116 ctf_integer(int, grphi
, grphi
)
117 ctf_integer(unsigned long, qsmask
, qsmask
)
120 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
121 LTTNG_TRACEPOINT_EVENT(rcu_grace_period_init
,
123 TP_PROTO(const char *rcuname
, unsigned long gpnum
, u8 level
,
124 int grplo
, int grphi
, unsigned long qsmask
),
126 TP_ARGS(rcuname
, gpnum
, level
, grplo
, grphi
, qsmask
),
129 ctf_string(rcuname
, rcuname
)
130 ctf_integer(unsigned long, gpnum
, gpnum
)
131 ctf_integer(u8
, level
, level
)
132 ctf_integer(int, grplo
, grplo
)
133 ctf_integer(int, grphi
, grphi
)
134 ctf_integer(unsigned long, qsmask
, qsmask
)
138 LTTNG_TRACEPOINT_EVENT(rcu_grace_period_init
,
140 TP_PROTO(char *rcuname
, unsigned long gpnum
, u8 level
,
141 int grplo
, int grphi
, unsigned long qsmask
),
143 TP_ARGS(rcuname
, gpnum
, level
, grplo
, grphi
, qsmask
),
146 ctf_string(rcuname
, rcuname
)
147 ctf_integer(unsigned long, gpnum
, gpnum
)
148 ctf_integer(u8
, level
, level
)
149 ctf_integer(int, grplo
, grplo
)
150 ctf_integer(int, grphi
, grphi
)
151 ctf_integer(unsigned long, qsmask
, qsmask
)
157 * Tracepoint for tasks blocking within preemptible-RCU read-side
158 * critical sections. Track the type of RCU (which one day might
159 * include SRCU), the grace-period number that the task is blocking
160 * (the current or the next), and the task's PID.
162 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0) || \
163 LTTNG_RHEL_KERNEL_RANGE(4,18,0,80,0,0, 4,19,0,0,0,0))
164 LTTNG_TRACEPOINT_EVENT(rcu_preempt_task
,
166 TP_PROTO(const char *rcuname
, int pid
, unsigned long gp_seq
),
168 TP_ARGS(rcuname
, pid
, gp_seq
),
171 ctf_string(rcuname
, rcuname
)
172 ctf_integer(unsigned long, gp_seq
, gp_seq
)
173 ctf_integer(int, pid
, pid
)
176 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
177 LTTNG_TRACEPOINT_EVENT(rcu_preempt_task
,
179 TP_PROTO(const char *rcuname
, int pid
, unsigned long gpnum
),
181 TP_ARGS(rcuname
, pid
, gpnum
),
184 ctf_string(rcuname
, rcuname
)
185 ctf_integer(unsigned long, gpnum
, gpnum
)
186 ctf_integer(int, pid
, pid
)
190 LTTNG_TRACEPOINT_EVENT(rcu_preempt_task
,
192 TP_PROTO(char *rcuname
, int pid
, unsigned long gpnum
),
194 TP_ARGS(rcuname
, pid
, gpnum
),
197 ctf_string(rcuname
, rcuname
)
198 ctf_integer(unsigned long, gpnum
, gpnum
)
199 ctf_integer(int, pid
, pid
)
205 * Tracepoint for tasks that blocked within a given preemptible-RCU
206 * read-side critical section exiting that critical section. Track the
207 * type of RCU (which one day might include SRCU) and the task's PID.
209 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0) || \
210 LTTNG_RHEL_KERNEL_RANGE(4,18,0,80,0,0, 4,19,0,0,0,0))
211 LTTNG_TRACEPOINT_EVENT(rcu_unlock_preempted_task
,
213 TP_PROTO(const char *rcuname
, unsigned long gp_seq
, int pid
),
215 TP_ARGS(rcuname
, gp_seq
, pid
),
218 ctf_string(rcuname
, rcuname
)
219 ctf_integer(unsigned long, gp_seq
, gp_seq
)
220 ctf_integer(int, pid
, pid
)
223 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
224 LTTNG_TRACEPOINT_EVENT(rcu_unlock_preempted_task
,
226 TP_PROTO(const char *rcuname
, unsigned long gpnum
, int pid
),
228 TP_ARGS(rcuname
, gpnum
, pid
),
231 ctf_string(rcuname
, rcuname
)
232 ctf_integer(unsigned long, gpnum
, gpnum
)
233 ctf_integer(int, pid
, pid
)
237 LTTNG_TRACEPOINT_EVENT(rcu_unlock_preempted_task
,
239 TP_PROTO(char *rcuname
, unsigned long gpnum
, int pid
),
241 TP_ARGS(rcuname
, gpnum
, pid
),
244 ctf_string(rcuname
, rcuname
)
245 ctf_integer(unsigned long, gpnum
, gpnum
)
246 ctf_integer(int, pid
, pid
)
252 * Tracepoint for quiescent-state-reporting events. These are
253 * distinguished by the type of RCU, the grace-period number, the
254 * mask of quiescent lower-level entities, the rcu_node structure level,
255 * the starting and ending CPU covered by the rcu_node structure, and
256 * whether there are any blocked tasks blocking the current grace period.
257 * All but the type of RCU are extracted from the rcu_node structure.
259 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0) || \
260 LTTNG_RHEL_KERNEL_RANGE(4,18,0,80,0,0, 4,19,0,0,0,0))
261 LTTNG_TRACEPOINT_EVENT(rcu_quiescent_state_report
,
263 TP_PROTO(const char *rcuname
, unsigned long gp_seq
,
264 unsigned long mask
, unsigned long qsmask
,
265 u8 level
, int grplo
, int grphi
, int gp_tasks
),
267 TP_ARGS(rcuname
, gp_seq
, mask
, qsmask
, level
, grplo
, grphi
, gp_tasks
),
270 ctf_string(rcuname
, rcuname
)
271 ctf_integer(unsigned long, gp_seq
, gp_seq
)
272 ctf_integer(unsigned long, mask
, mask
)
273 ctf_integer(unsigned long, qsmask
, qsmask
)
274 ctf_integer(u8
, level
, level
)
275 ctf_integer(int, grplo
, grplo
)
276 ctf_integer(int, grphi
, grphi
)
277 ctf_integer(u8
, gp_tasks
, gp_tasks
)
280 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
281 LTTNG_TRACEPOINT_EVENT(rcu_quiescent_state_report
,
283 TP_PROTO(const char *rcuname
, unsigned long gpnum
,
284 unsigned long mask
, unsigned long qsmask
,
285 u8 level
, int grplo
, int grphi
, int gp_tasks
),
287 TP_ARGS(rcuname
, gpnum
, mask
, qsmask
, level
, grplo
, grphi
, gp_tasks
),
290 ctf_string(rcuname
, rcuname
)
291 ctf_integer(unsigned long, gpnum
, gpnum
)
292 ctf_integer(unsigned long, mask
, mask
)
293 ctf_integer(unsigned long, qsmask
, qsmask
)
294 ctf_integer(u8
, level
, level
)
295 ctf_integer(int, grplo
, grplo
)
296 ctf_integer(int, grphi
, grphi
)
297 ctf_integer(u8
, gp_tasks
, gp_tasks
)
301 LTTNG_TRACEPOINT_EVENT(rcu_quiescent_state_report
,
303 TP_PROTO(char *rcuname
, unsigned long gpnum
,
304 unsigned long mask
, unsigned long qsmask
,
305 u8 level
, int grplo
, int grphi
, int gp_tasks
),
307 TP_ARGS(rcuname
, gpnum
, mask
, qsmask
, level
, grplo
, grphi
, gp_tasks
),
310 ctf_string(rcuname
, rcuname
)
311 ctf_integer(unsigned long, gpnum
, gpnum
)
312 ctf_integer(unsigned long, mask
, mask
)
313 ctf_integer(unsigned long, qsmask
, qsmask
)
314 ctf_integer(u8
, level
, level
)
315 ctf_integer(int, grplo
, grplo
)
316 ctf_integer(int, grphi
, grphi
)
317 ctf_integer(u8
, gp_tasks
, gp_tasks
)
323 * Tracepoint for quiescent states detected by force_quiescent_state().
324 * These trace events include the type of RCU, the grace-period number
325 * that was blocked by the CPU, the CPU itself, and the type of quiescent
326 * state, which can be "dti" for dyntick-idle mode, "ofl" for CPU offline,
327 * or "kick" when kicking a CPU that has been in dyntick-idle mode for
330 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0) || \
331 LTTNG_RHEL_KERNEL_RANGE(4,18,0,80,0,0, 4,19,0,0,0,0))
332 LTTNG_TRACEPOINT_EVENT(rcu_fqs
,
334 TP_PROTO(const char *rcuname
, unsigned long gp_seq
, int cpu
, const char *qsevent
),
336 TP_ARGS(rcuname
, gp_seq
, cpu
, qsevent
),
339 ctf_integer(unsigned long, gp_seq
, gp_seq
)
340 ctf_integer(int, cpu
, cpu
)
341 ctf_string(rcuname
, rcuname
)
342 ctf_string(qsevent
, qsevent
)
345 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
346 LTTNG_TRACEPOINT_EVENT(rcu_fqs
,
348 TP_PROTO(const char *rcuname
, unsigned long gpnum
, int cpu
, const char *qsevent
),
350 TP_ARGS(rcuname
, gpnum
, cpu
, qsevent
),
353 ctf_integer(unsigned long, gpnum
, gpnum
)
354 ctf_integer(int, cpu
, cpu
)
355 ctf_string(rcuname
, rcuname
)
356 ctf_string(qsevent
, qsevent
)
360 LTTNG_TRACEPOINT_EVENT(rcu_fqs
,
362 TP_PROTO(char *rcuname
, unsigned long gpnum
, int cpu
, char *qsevent
),
364 TP_ARGS(rcuname
, gpnum
, cpu
, qsevent
),
367 ctf_integer(unsigned long, gpnum
, gpnum
)
368 ctf_integer(int, cpu
, cpu
)
369 ctf_string(rcuname
, rcuname
)
370 ctf_string(qsevent
, qsevent
)
376 * #if defined(CONFIG_TREE_RCU)
377 * || (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0)
378 * && defined(CONFIG_PREEMPT_RCU))
379 * || defined(CONFIG_TREE_PREEMPT_RCU)
383 * Tracepoint for dyntick-idle entry/exit events. These take a string
384 * as argument: "Start" for entering dyntick-idle mode, "End" for
385 * leaving it, "--=" for events moving towards idle, and "++=" for events
386 * moving away from idle. "Error on entry: not idle task" and "Error on
387 * exit: not idle task" indicate that a non-idle task is erroneously
388 * toying with the idle loop.
390 * These events also take a pair of numbers, which indicate the nesting
391 * depth before and after the event of interest. Note that task-related
392 * events use the upper bits of each number, while interrupt-related
393 * events use the lower bits.
395 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) \
396 || LTTNG_KERNEL_RANGE(5,5,6, 5,6,0) \
397 || LTTNG_KERNEL_RANGE(5,4,22, 5,5,0)
398 LTTNG_TRACEPOINT_EVENT(rcu_dyntick
,
400 TP_PROTO(const char *polarity
, long oldnesting
, long newnesting
, int dynticks
),
402 TP_ARGS(polarity
, oldnesting
, newnesting
, dynticks
),
405 ctf_string(polarity
, polarity
)
406 ctf_integer(long, oldnesting
, oldnesting
)
407 ctf_integer(long, newnesting
, newnesting
)
408 ctf_integer(int, dynticks
, dynticks
)
412 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,16,0))
413 LTTNG_TRACEPOINT_EVENT(rcu_dyntick
,
415 TP_PROTO(const char *polarity
, long oldnesting
, long newnesting
, atomic_t dynticks
),
417 TP_ARGS(polarity
, oldnesting
, newnesting
, dynticks
),
420 ctf_string(polarity
, polarity
)
421 ctf_integer(long, oldnesting
, oldnesting
)
422 ctf_integer(long, newnesting
, newnesting
)
423 ctf_integer(int, dynticks
, atomic_read(&dynticks
))
427 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
428 LTTNG_TRACEPOINT_EVENT(rcu_dyntick
,
430 TP_PROTO(const char *polarity
, long long oldnesting
, long long newnesting
),
432 TP_ARGS(polarity
, oldnesting
, newnesting
),
435 ctf_string(polarity
, polarity
)
436 ctf_integer(long long, oldnesting
, oldnesting
)
437 ctf_integer(long long, newnesting
, newnesting
)
440 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
441 LTTNG_TRACEPOINT_EVENT(rcu_dyntick
,
443 TP_PROTO(char *polarity
, long long oldnesting
, long long newnesting
),
445 TP_ARGS(polarity
, oldnesting
, newnesting
),
448 ctf_string(polarity
, polarity
)
449 ctf_integer(long long, oldnesting
, oldnesting
)
450 ctf_integer(long long, newnesting
, newnesting
)
454 LTTNG_TRACEPOINT_EVENT(rcu_dyntick
,
456 TP_PROTO(char *polarity
),
461 ctf_string(polarity
, polarity
)
467 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
469 * Tracepoint for RCU preparation for idle, the goal being to get RCU
470 * processing done so that the current CPU can shut off its scheduling
471 * clock and enter dyntick-idle mode. One way to accomplish this is
472 * to drain all RCU callbacks from this CPU, and the other is to have
473 * done everything RCU requires for the current grace period. In this
474 * latter case, the CPU will be awakened at the end of the current grace
475 * period in order to process the remainder of its callbacks.
477 * These tracepoints take a string as argument:
479 * "No callbacks": Nothing to do, no callbacks on this CPU.
480 * "In holdoff": Nothing to do, holding off after unsuccessful attempt.
481 * "Begin holdoff": Attempt failed, don't retry until next jiffy.
482 * "Dyntick with callbacks": Entering dyntick-idle despite callbacks.
483 * "Dyntick with lazy callbacks": Entering dyntick-idle w/lazy callbacks.
484 * "More callbacks": Still more callbacks, try again to clear them out.
485 * "Callbacks drained": All callbacks processed, off to dyntick idle!
486 * "Timer": Timer fired to cause CPU to continue processing callbacks.
487 * "Demigrate": Timer fired on wrong CPU, woke up correct CPU.
488 * "Cleanup after idle": Idle exited, timer canceled.
490 LTTNG_TRACEPOINT_EVENT(rcu_prep_idle
,
492 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
493 TP_PROTO(const char *reason
),
494 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
495 TP_PROTO(char *reason
),
496 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
501 ctf_string(reason
, reason
)
507 * Tracepoint for the registration of a single RCU callback function.
508 * The first argument is the type of RCU, the second argument is
509 * a pointer to the RCU callback itself, the third element is the
510 * number of lazy callbacks queued, and the fourth element is the
511 * total number of callbacks queued.
513 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0))
514 LTTNG_TRACEPOINT_EVENT(rcu_callback
,
516 TP_PROTO(const char *rcuname
, struct rcu_head
*rhp
, long qlen
),
518 TP_ARGS(rcuname
, rhp
, qlen
),
521 ctf_string(rcuname
, rcuname
)
522 ctf_integer_hex(void *, rhp
, rhp
)
523 ctf_integer_hex(void *, func
, rhp
->func
)
524 ctf_integer(long, qlen
, qlen
)
527 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
528 LTTNG_TRACEPOINT_EVENT(rcu_callback
,
530 TP_PROTO(const char *rcuname
, struct rcu_head
*rhp
, long qlen_lazy
,
533 TP_ARGS(rcuname
, rhp
, qlen_lazy
, qlen
),
536 ctf_string(rcuname
, rcuname
)
537 ctf_integer_hex(void *, rhp
, rhp
)
538 ctf_integer_hex(void *, func
, rhp
->func
)
539 ctf_integer(long, qlen_lazy
, qlen_lazy
)
540 ctf_integer(long, qlen
, qlen
)
543 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
544 LTTNG_TRACEPOINT_EVENT(rcu_callback
,
546 TP_PROTO(char *rcuname
, struct rcu_head
*rhp
, long qlen_lazy
,
549 TP_ARGS(rcuname
, rhp
, qlen_lazy
, qlen
),
552 ctf_string(rcuname
, rcuname
)
553 ctf_integer_hex(void *, rhp
, rhp
)
554 ctf_integer_hex(void *, func
, rhp
->func
)
555 ctf_integer(long, qlen_lazy
, qlen_lazy
)
556 ctf_integer(long, qlen
, qlen
)
560 LTTNG_TRACEPOINT_EVENT(rcu_callback
,
562 TP_PROTO(char *rcuname
, struct rcu_head
*rhp
, long qlen
),
564 TP_ARGS(rcuname
, rhp
, qlen
),
567 ctf_string(rcuname
, rcuname
)
568 ctf_integer_hex(void *, rhp
, rhp
)
569 ctf_integer_hex(void *, func
, rhp
->func
)
570 ctf_integer(long, qlen
, qlen
)
577 * Tracepoint for the registration of a single RCU callback of the special
578 * kfree() form. The first argument is the RCU type, the second argument
579 * is a pointer to the RCU callback, the third argument is the offset
580 * of the callback within the enclosing RCU-protected data structure,
581 * the fourth argument is the number of lazy callbacks queued, and the
582 * fifth argument is the total number of callbacks queued.
584 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0))
585 LTTNG_TRACEPOINT_EVENT(rcu_kfree_callback
,
587 TP_PROTO(const char *rcuname
, struct rcu_head
*rhp
, unsigned long offset
,
590 TP_ARGS(rcuname
, rhp
, offset
, qlen
),
593 ctf_string(rcuname
, rcuname
)
594 ctf_integer_hex(void *, rhp
, rhp
)
595 ctf_integer_hex(unsigned long, offset
, offset
)
596 ctf_integer(long, qlen
, qlen
)
599 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
600 LTTNG_TRACEPOINT_EVENT(rcu_kfree_callback
,
602 TP_PROTO(const char *rcuname
, struct rcu_head
*rhp
, unsigned long offset
,
603 long qlen_lazy
, long qlen
),
605 TP_ARGS(rcuname
, rhp
, offset
, qlen_lazy
, qlen
),
608 ctf_string(rcuname
, rcuname
)
609 ctf_integer_hex(void *, rhp
, rhp
)
610 ctf_integer_hex(unsigned long, offset
, offset
)
611 ctf_integer(long, qlen_lazy
, qlen_lazy
)
612 ctf_integer(long, qlen
, qlen
)
615 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
616 LTTNG_TRACEPOINT_EVENT(rcu_kfree_callback
,
618 TP_PROTO(char *rcuname
, struct rcu_head
*rhp
, unsigned long offset
,
619 long qlen_lazy
, long qlen
),
621 TP_ARGS(rcuname
, rhp
, offset
, qlen_lazy
, qlen
),
624 ctf_string(rcuname
, rcuname
)
625 ctf_integer_hex(void *, rhp
, rhp
)
626 ctf_integer_hex(unsigned long, offset
, offset
)
627 ctf_integer(long, qlen_lazy
, qlen_lazy
)
628 ctf_integer(long, qlen
, qlen
)
632 LTTNG_TRACEPOINT_EVENT(rcu_kfree_callback
,
634 TP_PROTO(char *rcuname
, struct rcu_head
*rhp
, unsigned long offset
,
637 TP_ARGS(rcuname
, rhp
, offset
, qlen
),
640 ctf_string(rcuname
, rcuname
)
641 ctf_integer_hex(void *, rhp
, rhp
)
642 ctf_integer_hex(unsigned long, offset
, offset
)
643 ctf_integer(long, qlen
, qlen
)
649 * Tracepoint for marking the beginning rcu_do_batch, performed to start
650 * RCU callback invocation. The first argument is the RCU flavor,
651 * the second is the number of lazy callbacks queued, the third is
652 * the total number of callbacks queued, and the fourth argument is
653 * the current RCU-callback batch limit.
655 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0))
656 LTTNG_TRACEPOINT_EVENT(rcu_batch_start
,
658 TP_PROTO(const char *rcuname
, long qlen
, long blimit
),
660 TP_ARGS(rcuname
, qlen
, blimit
),
663 ctf_string(rcuname
, rcuname
)
664 ctf_integer(long, qlen
, qlen
)
665 ctf_integer(long, blimit
, blimit
)
668 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
669 LTTNG_TRACEPOINT_EVENT(rcu_batch_start
,
671 TP_PROTO(const char *rcuname
, long qlen_lazy
, long qlen
, long blimit
),
673 TP_ARGS(rcuname
, qlen_lazy
, qlen
, blimit
),
676 ctf_string(rcuname
, rcuname
)
677 ctf_integer(long, qlen_lazy
, qlen_lazy
)
678 ctf_integer(long, qlen
, qlen
)
679 ctf_integer(long, blimit
, blimit
)
682 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
683 LTTNG_TRACEPOINT_EVENT(rcu_batch_start
,
685 TP_PROTO(char *rcuname
, long qlen_lazy
, long qlen
, long blimit
),
687 TP_ARGS(rcuname
, qlen_lazy
, qlen
, blimit
),
690 ctf_string(rcuname
, rcuname
)
691 ctf_integer(long, qlen_lazy
, qlen_lazy
)
692 ctf_integer(long, qlen
, qlen
)
693 ctf_integer(long, blimit
, blimit
)
696 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
697 LTTNG_TRACEPOINT_EVENT(rcu_batch_start
,
699 TP_PROTO(char *rcuname
, long qlen_lazy
, long qlen
, int blimit
),
701 TP_ARGS(rcuname
, qlen_lazy
, qlen
, blimit
),
704 ctf_string(rcuname
, rcuname
)
705 ctf_integer(long, qlen_lazy
, qlen_lazy
)
706 ctf_integer(long, qlen
, qlen
)
707 ctf_integer(int, blimit
, blimit
)
711 LTTNG_TRACEPOINT_EVENT(rcu_batch_start
,
713 TP_PROTO(char *rcuname
, long qlen
, int blimit
),
715 TP_ARGS(rcuname
, qlen
, blimit
),
718 ctf_string(rcuname
, rcuname
)
719 ctf_integer(long, qlen
, qlen
)
720 ctf_integer(int, blimit
, blimit
)
726 * Tracepoint for the invocation of a single RCU callback function.
727 * The first argument is the type of RCU, and the second argument is
728 * a pointer to the RCU callback itself.
730 LTTNG_TRACEPOINT_EVENT(rcu_invoke_callback
,
732 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
733 TP_PROTO(const char *rcuname
, struct rcu_head
*rhp
),
734 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
735 TP_PROTO(char *rcuname
, struct rcu_head
*rhp
),
736 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
738 TP_ARGS(rcuname
, rhp
),
741 ctf_string(rcuname
, rcuname
)
742 ctf_integer_hex(void *, rhp
, rhp
)
743 ctf_integer_hex(void *, func
, rhp
->func
)
748 * Tracepoint for the invocation of a single RCU callback of the special
749 * kfree() form. The first argument is the RCU flavor, the second
750 * argument is a pointer to the RCU callback, and the third argument
751 * is the offset of the callback within the enclosing RCU-protected
754 LTTNG_TRACEPOINT_EVENT(rcu_invoke_kfree_callback
,
756 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
757 TP_PROTO(const char *rcuname
, struct rcu_head
*rhp
, unsigned long offset
),
758 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
759 TP_PROTO(char *rcuname
, struct rcu_head
*rhp
, unsigned long offset
),
760 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
762 TP_ARGS(rcuname
, rhp
, offset
),
765 ctf_string(rcuname
, rcuname
)
766 ctf_integer_hex(void *, rhp
, rhp
)
767 ctf_integer(unsigned long, offset
, offset
)
772 * Tracepoint for exiting rcu_do_batch after RCU callbacks have been
773 * invoked. The first argument is the name of the RCU flavor,
774 * the second argument is number of callbacks actually invoked,
775 * the third argument (cb) is whether or not any of the callbacks that
776 * were ready to invoke at the beginning of this batch are still
777 * queued, the fourth argument (nr) is the return value of need_resched(),
778 * the fifth argument (iit) is 1 if the current task is the idle task,
779 * and the sixth argument (risk) is the return value from
780 * rcu_is_callbacks_kthread().
782 LTTNG_TRACEPOINT_EVENT(rcu_batch_end
,
784 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0))
785 TP_PROTO(const char *rcuname
, int callbacks_invoked
,
786 char cb
, char nr
, char iit
, char risk
),
788 TP_ARGS(rcuname
, callbacks_invoked
, cb
, nr
, iit
, risk
),
789 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
790 TP_PROTO(const char *rcuname
, int callbacks_invoked
,
791 bool cb
, bool nr
, bool iit
, bool risk
),
793 TP_ARGS(rcuname
, callbacks_invoked
, cb
, nr
, iit
, risk
),
794 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
795 TP_PROTO(char *rcuname
, int callbacks_invoked
,
796 bool cb
, bool nr
, bool iit
, bool risk
),
798 TP_ARGS(rcuname
, callbacks_invoked
, cb
, nr
, iit
, risk
),
800 TP_PROTO(char *rcuname
, int callbacks_invoked
),
802 TP_ARGS(rcuname
, callbacks_invoked
),
806 ctf_string(rcuname
, rcuname
)
807 ctf_integer(int, callbacks_invoked
, callbacks_invoked
)
808 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0))
809 ctf_integer(char, cb
, cb
)
810 ctf_integer(char, nr
, nr
)
811 ctf_integer(char, iit
, iit
)
812 ctf_integer(char, risk
, risk
)
813 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
814 ctf_integer(bool, cb
, cb
)
815 ctf_integer(bool, nr
, nr
)
816 ctf_integer(bool, iit
, iit
)
817 ctf_integer(bool, risk
, risk
)
822 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
824 * Tracepoint for rcutorture readers. The first argument is the name
825 * of the RCU flavor from rcutorture's viewpoint and the second argument
826 * is the callback address.
828 LTTNG_TRACEPOINT_EVENT(rcu_torture_read
,
830 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
831 TP_PROTO(const char *rcutorturename
, struct rcu_head
*rhp
,
832 unsigned long secs
, unsigned long c_old
, unsigned long c
),
834 TP_ARGS(rcutorturename
, rhp
, secs
, c_old
, c
),
835 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
836 TP_PROTO(char *rcutorturename
, struct rcu_head
*rhp
,
837 unsigned long secs
, unsigned long c_old
, unsigned long c
),
839 TP_ARGS(rcutorturename
, rhp
, secs
, c_old
, c
),
841 TP_PROTO(char *rcutorturename
, struct rcu_head
*rhp
),
843 TP_ARGS(rcutorturename
, rhp
),
847 ctf_string(rcutorturename
, rcutorturename
)
848 ctf_integer_hex(struct rcu_head
*, rhp
, rhp
)
849 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
850 ctf_integer(unsigned long, secs
, secs
)
851 ctf_integer(unsigned long, c_old
, c_old
)
852 ctf_integer(unsigned long, c
, c
)
858 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
860 * Tracepoint for _rcu_barrier() execution. The string "s" describes
861 * the _rcu_barrier phase:
862 * "Begin": rcu_barrier_callback() started.
863 * "Check": rcu_barrier_callback() checking for piggybacking.
864 * "EarlyExit": rcu_barrier_callback() piggybacked, thus early exit.
865 * "Inc1": rcu_barrier_callback() piggyback check counter incremented.
866 * "Offline": rcu_barrier_callback() found offline CPU
867 * "OnlineQ": rcu_barrier_callback() found online CPU with callbacks.
868 * "OnlineNQ": rcu_barrier_callback() found online CPU, no callbacks.
869 * "IRQ": An rcu_barrier_callback() callback posted on remote CPU.
870 * "CB": An rcu_barrier_callback() invoked a callback, not the last.
871 * "LastCB": An rcu_barrier_callback() invoked the last callback.
872 * "Inc2": rcu_barrier_callback() piggyback check counter incremented.
873 * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
874 * is the count of remaining callbacks, and "done" is the piggybacking count.
876 LTTNG_TRACEPOINT_EVENT(rcu_barrier
,
878 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
879 TP_PROTO(const char *rcuname
, const char *s
, int cpu
, int cnt
, unsigned long done
),
880 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
881 TP_PROTO(char *rcuname
, char *s
, int cpu
, int cnt
, unsigned long done
),
882 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
884 TP_ARGS(rcuname
, s
, cpu
, cnt
, done
),
887 ctf_string(rcuname
, rcuname
)
889 ctf_integer(int, cpu
, cpu
)
890 ctf_integer(int, cnt
, cnt
)
891 ctf_integer(unsigned long, done
, done
)
896 #else /* #ifdef CONFIG_RCU_TRACE */
898 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0) || \
899 LTTNG_RHEL_KERNEL_RANGE(4,18,0,80,0,0, 4,19,0,0,0,0))
900 #define trace_rcu_grace_period(rcuname, gp_seq, gpevent) do { } while (0)
901 #define trace_rcu_grace_period_init(rcuname, gp_seq, level, grplo, grphi, \
902 qsmask) do { } while (0)
903 #define trace_rcu_preempt_task(rcuname, pid, gp_seq) do { } while (0)
904 #define trace_rcu_unlock_preempted_task(rcuname, gp_seq, pid) do { } while (0)
905 #define trace_rcu_quiescent_state_report(rcuname, gp_seq, mask, qsmask, level, \
906 grplo, grphi, gp_tasks) do { } \
908 #define trace_rcu_fqs(rcuname, gp_seq, cpu, qsevent) do { } while (0)
910 #define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
911 #define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \
912 qsmask) do { } while (0)
913 #define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
914 #define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
915 #define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \
916 grplo, grphi, gp_tasks) do { } \
918 #define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
921 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,16,0))
922 #define trace_rcu_dyntick(polarity, oldnesting, newnesting, dyntick) do { } while (0)
923 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
924 #define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0)
926 #define trace_rcu_dyntick(polarity) do { } while (0)
928 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
929 #define trace_rcu_prep_idle(reason) do { } while (0)
931 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
932 #define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
933 #define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
935 #define trace_rcu_batch_start(rcuname, qlen_lazy, qlen, blimit) \
938 #define trace_rcu_callback(rcuname, rhp, qlen) do { } while (0)
939 #define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen) do { } while (0)
940 #define trace_rcu_batch_start(rcuname, qlen, blimit) do { } while (0)
942 #define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0)
943 #define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0)
944 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
945 #define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \
948 #define trace_rcu_batch_end(rcuname, callbacks_invoked) do { } while (0)
950 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
951 #define trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
953 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
954 #define trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
956 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
957 #define trace_rcu_barrier(name, s, cpu, cnt, done) do { } while (0)
959 #endif /* #else #ifdef CONFIG_RCU_TRACE */
961 #endif /* LTTNG_TRACE_RCU_H */
963 /* This part must be outside protection */
964 #include <probes/define_trace.h>