Fix: update sched instrumentation for kernel 4.12
[lttng-modules.git] / instrumentation / events / lttng-module / block.h
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM block
3
4 #if !defined(LTTNG_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define LTTNG_TRACE_BLOCK_H
6
7 #include <probes/lttng-tracepoint-event.h>
8 #include <linux/blktrace_api.h>
9 #include <linux/blkdev.h>
10 #include <linux/trace_seq.h>
11 #include <linux/version.h>
12
13 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0))
14 #include <scsi/scsi_request.h>
15 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0)) */
16
17 #ifndef _TRACE_BLOCK_DEF_
18 #define _TRACE_BLOCK_DEF_
19
20 enum {
21 RWBS_FLAG_WRITE = (1 << 0),
22 RWBS_FLAG_DISCARD = (1 << 1),
23 RWBS_FLAG_READ = (1 << 2),
24 RWBS_FLAG_RAHEAD = (1 << 3),
25 RWBS_FLAG_BARRIER = (1 << 4),
26 RWBS_FLAG_SYNC = (1 << 5),
27 RWBS_FLAG_META = (1 << 6),
28 RWBS_FLAG_SECURE = (1 << 7),
29 RWBS_FLAG_FLUSH = (1 << 8),
30 RWBS_FLAG_FUA = (1 << 9),
31 RWBS_FLAG_PREFLUSH = (1 << 10),
32 };
33
34 #endif /* _TRACE_BLOCK_DEF_ */
35
36 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0))
37
38 #define lttng_req_op(rq) req_op(rq)
39 #define lttng_req_rw(rq) ((rq)->cmd_flags)
40 #define lttng_bio_op(bio) bio_op(bio)
41 #define lttng_bio_rw(bio) ((bio)->bi_opf)
42
43 #define blk_rwbs_ctf_integer(type, rwbs, op, rw, bytes) \
44 ctf_integer(type, rwbs, \
45 (((op) == REQ_OP_WRITE || (op) == REQ_OP_WRITE_SAME) ? RWBS_FLAG_WRITE : \
46 ( (op) == REQ_OP_DISCARD ? RWBS_FLAG_DISCARD : \
47 ( (op) == REQ_OP_SECURE_ERASE ? (RWBS_FLAG_DISCARD | RWBS_FLAG_SECURE) : \
48 ( (op) == REQ_OP_FLUSH ? RWBS_FLAG_FLUSH : \
49 ( (op) == REQ_OP_READ ? RWBS_FLAG_READ : \
50 ( 0 )))))) \
51 | ((rw) & REQ_RAHEAD ? RWBS_FLAG_RAHEAD : 0) \
52 | ((rw) & REQ_SYNC ? RWBS_FLAG_SYNC : 0) \
53 | ((rw) & REQ_META ? RWBS_FLAG_META : 0) \
54 | ((rw) & REQ_PREFLUSH ? RWBS_FLAG_PREFLUSH : 0) \
55 | ((rw) & REQ_FUA ? RWBS_FLAG_FUA : 0))
56
57 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))
58
59 #define lttng_req_op(rq)
60 #define lttng_req_rw(rq) ((rq)->cmd_flags)
61 #define lttng_bio_op(bio)
62 #define lttng_bio_rw(bio) ((bio)->bi_rw)
63
64 #define blk_rwbs_ctf_integer(type, rwbs, op, rw, bytes) \
65 ctf_integer(type, rwbs, ((rw) & WRITE ? RWBS_FLAG_WRITE : \
66 ( (rw) & REQ_DISCARD ? RWBS_FLAG_DISCARD : \
67 ( (bytes) ? RWBS_FLAG_READ : \
68 ( 0 )))) \
69 | ((rw) & REQ_RAHEAD ? RWBS_FLAG_RAHEAD : 0) \
70 | ((rw) & REQ_SYNC ? RWBS_FLAG_SYNC : 0) \
71 | ((rw) & REQ_META ? RWBS_FLAG_META : 0) \
72 | ((rw) & REQ_SECURE ? RWBS_FLAG_SECURE : 0) \
73 | ((rw) & REQ_FLUSH ? RWBS_FLAG_FLUSH : 0) \
74 | ((rw) & REQ_FUA ? RWBS_FLAG_FUA : 0))
75
76 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
77
78 #define lttng_req_op(rq)
79 #define lttng_req_rw(rq) ((rq)->cmd_flags)
80 #define lttng_bio_op(bio)
81 #define lttng_bio_rw(bio) ((bio)->bi_rw)
82
83 #define blk_rwbs_ctf_integer(type, rwbs, op, rw, bytes) \
84 ctf_integer(type, rwbs, ((rw) & WRITE ? RWBS_FLAG_WRITE : \
85 ( (rw) & REQ_DISCARD ? RWBS_FLAG_DISCARD : \
86 ( (bytes) ? RWBS_FLAG_READ : \
87 ( 0 )))) \
88 | ((rw) & REQ_RAHEAD ? RWBS_FLAG_RAHEAD : 0) \
89 | ((rw) & REQ_SYNC ? RWBS_FLAG_SYNC : 0) \
90 | ((rw) & REQ_META ? RWBS_FLAG_META : 0) \
91 | ((rw) & REQ_SECURE ? RWBS_FLAG_SECURE : 0))
92
93 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
94
95 #define lttng_req_op(rq)
96 #define lttng_req_rw(rq) ((rq)->cmd_flags)
97 #define lttng_bio_op(bio)
98 #define lttng_bio_rw(bio) ((bio)->bi_rw)
99
100 #define blk_rwbs_ctf_integer(type, rwbs, op, rw, bytes) \
101 ctf_integer(type, rwbs, ((rw) & WRITE ? RWBS_FLAG_WRITE : \
102 ( (rw) & REQ_DISCARD ? RWBS_FLAG_DISCARD : \
103 ( (bytes) ? RWBS_FLAG_READ : \
104 ( 0 )))) \
105 | ((rw) & REQ_RAHEAD ? RWBS_FLAG_RAHEAD : 0) \
106 | ((rw) & REQ_HARDBARRIER ? RWBS_FLAG_BARRIER : 0) \
107 | ((rw) & REQ_SYNC ? RWBS_FLAG_SYNC : 0) \
108 | ((rw) & REQ_META ? RWBS_FLAG_META : 0) \
109 | ((rw) & REQ_SECURE ? RWBS_FLAG_SECURE : 0))
110
111 #else
112
113 #define lttng_req_op(rq)
114 #define lttng_req_rw(rq) ((rq)->cmd_flags)
115 #define lttng_bio_op(bio)
116 #define lttng_bio_rw(bio) ((bio)->bi_rw)
117
118 #define blk_rwbs_ctf_integer(type, rwbs, op, rw, bytes) \
119 ctf_integer(type, rwbs, ((rw) & WRITE ? RWBS_FLAG_WRITE : \
120 ( (rw) & (1 << BIO_RW_DISCARD) ? RWBS_FLAG_DISCARD : \
121 ( (bytes) ? RWBS_FLAG_READ : \
122 ( 0 )))) \
123 | ((rw) & (1 << BIO_RW_AHEAD) ? RWBS_FLAG_RAHEAD : 0) \
124 | ((rw) & (1 << BIO_RW_SYNCIO) ? RWBS_FLAG_SYNC : 0) \
125 | ((rw) & (1 << BIO_RW_META) ? RWBS_FLAG_META : 0) \
126 | ((rw) & (1 << BIO_RW_BARRIER) ? RWBS_FLAG_BARRIER : 0))
127
128 #endif
129
130 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
131 LTTNG_TRACEPOINT_EVENT_CLASS(block_buffer,
132
133 TP_PROTO(struct buffer_head *bh),
134
135 TP_ARGS(bh),
136
137 TP_FIELDS (
138 ctf_integer(dev_t, dev, bh->b_bdev->bd_dev)
139 ctf_integer(sector_t, sector, bh->b_blocknr)
140 ctf_integer(size_t, size, bh->b_size)
141 )
142 )
143
144 /**
145 * block_touch_buffer - mark a buffer accessed
146 * @bh: buffer_head being touched
147 *
148 * Called from touch_buffer().
149 */
150 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_buffer, block_touch_buffer,
151
152 TP_PROTO(struct buffer_head *bh),
153
154 TP_ARGS(bh)
155 )
156
157 /**
158 * block_dirty_buffer - mark a buffer dirty
159 * @bh: buffer_head being dirtied
160 *
161 * Called from mark_buffer_dirty().
162 */
163 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_buffer, block_dirty_buffer,
164
165 TP_PROTO(struct buffer_head *bh),
166
167 TP_ARGS(bh)
168 )
169 #endif
170
171 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0))
172 LTTNG_TRACEPOINT_EVENT_CLASS_CODE(block_rq_with_error,
173
174 TP_PROTO(struct request_queue *q, struct request *rq),
175
176 TP_ARGS(q, rq),
177
178 TP_locvar(
179 sector_t sector;
180 unsigned int nr_sector;
181 unsigned char *cmd;
182 size_t cmd_len;
183 ),
184
185 TP_code_pre(
186 if (blk_rq_is_scsi(rq)) {
187 struct scsi_request *scsi_rq = scsi_req(rq);
188 tp_locvar->sector = 0;
189 tp_locvar->nr_sector = 0;
190 tp_locvar->cmd = scsi_rq->cmd;
191 tp_locvar->cmd_len = scsi_rq->cmd_len;
192 } else {
193 tp_locvar->sector = blk_rq_pos(rq);
194 tp_locvar->nr_sector = blk_rq_sectors(rq);
195 tp_locvar->cmd = NULL;
196 tp_locvar->cmd_len = 0;
197 }
198 ),
199
200 TP_FIELDS(
201 ctf_integer(dev_t, dev,
202 rq->rq_disk ? disk_devt(rq->rq_disk) : 0)
203 ctf_integer(sector_t, sector, tp_locvar->sector)
204 ctf_integer(unsigned int, nr_sector, tp_locvar->nr_sector)
205 ctf_integer(int, errors, rq->errors)
206 blk_rwbs_ctf_integer(unsigned int, rwbs,
207 lttng_req_op(rq), lttng_req_rw(rq), blk_rq_bytes(rq))
208 ctf_sequence_hex(unsigned char, cmd,
209 tp_locvar->cmd, size_t, tp_locvar->cmd_len)
210 ),
211
212 TP_code_post()
213 )
214 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0)) */
215 LTTNG_TRACEPOINT_EVENT_CLASS_CODE(block_rq_with_error,
216
217 TP_PROTO(struct request_queue *q, struct request *rq),
218
219 TP_ARGS(q, rq),
220
221 TP_locvar(
222 sector_t sector;
223 unsigned int nr_sector;
224 unsigned char *cmd;
225 size_t cmd_len;
226 ),
227
228 TP_code_pre(
229
230 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
231 tp_locvar->sector = 0;
232 tp_locvar->nr_sector = 0;
233 tp_locvar->cmd = rq->cmd;
234 tp_locvar->cmd_len = rq->cmd_len;
235 } else {
236 tp_locvar->sector = blk_rq_pos(rq);
237 tp_locvar->nr_sector = blk_rq_sectors(rq);
238 tp_locvar->cmd = NULL;
239 tp_locvar->cmd_len = 0;
240 }
241 ),
242
243 TP_FIELDS(
244 ctf_integer(dev_t, dev,
245 rq->rq_disk ? disk_devt(rq->rq_disk) : 0)
246 ctf_integer(sector_t, sector, tp_locvar->sector)
247 ctf_integer(unsigned int, nr_sector, tp_locvar->nr_sector)
248 ctf_integer(int, errors, rq->errors)
249 blk_rwbs_ctf_integer(unsigned int, rwbs,
250 lttng_req_op(rq), lttng_req_rw(rq), blk_rq_bytes(rq))
251 ctf_sequence_hex(unsigned char, cmd,
252 tp_locvar->cmd, size_t, tp_locvar->cmd_len)
253 ),
254
255 TP_code_post()
256 )
257 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0)) */
258
259 /**
260 * block_rq_abort - abort block operation request
261 * @q: queue containing the block operation request
262 * @rq: block IO operation request
263 *
264 * Called immediately after pending block IO operation request @rq in
265 * queue @q is aborted. The fields in the operation request @rq
266 * can be examined to determine which device and sectors the pending
267 * operation would access.
268 */
269 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_rq_with_error, block_rq_abort,
270
271 TP_PROTO(struct request_queue *q, struct request *rq),
272
273 TP_ARGS(q, rq)
274 )
275
276 /**
277 * block_rq_requeue - place block IO request back on a queue
278 * @q: queue holding operation
279 * @rq: block IO operation request
280 *
281 * The block operation request @rq is being placed back into queue
282 * @q. For some reason the request was not completed and needs to be
283 * put back in the queue.
284 */
285 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_rq_with_error, block_rq_requeue,
286
287 TP_PROTO(struct request_queue *q, struct request *rq),
288
289 TP_ARGS(q, rq)
290 )
291
292 /**
293 * block_rq_complete - block IO operation completed by device driver
294 * @q: queue containing the block operation request
295 * @rq: block operations request
296 * @nr_bytes: number of completed bytes
297 *
298 * The block_rq_complete tracepoint event indicates that some portion
299 * of operation request has been completed by the device driver. If
300 * the @rq->bio is %NULL, then there is absolutely no additional work to
301 * do for the request. If @rq->bio is non-NULL then there is
302 * additional work required to complete the request.
303 */
304 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0))
305 LTTNG_TRACEPOINT_EVENT_CODE(block_rq_complete,
306
307 TP_PROTO(struct request_queue *q, struct request *rq,
308 unsigned int nr_bytes),
309
310 TP_ARGS(q, rq, nr_bytes),
311
312 TP_locvar(
313 unsigned char *cmd;
314 size_t cmd_len;
315 ),
316
317 TP_code_pre(
318 if (blk_rq_is_scsi(rq)) {
319 struct scsi_request *scsi_rq = scsi_req(rq);
320 tp_locvar->cmd = scsi_rq->cmd;
321 tp_locvar->cmd_len = scsi_rq->cmd_len;
322 } else {
323 tp_locvar->cmd = NULL;
324 tp_locvar->cmd_len = 0;
325 }
326 ),
327
328 TP_FIELDS(
329 ctf_integer(dev_t, dev,
330 rq->rq_disk ? disk_devt(rq->rq_disk) : 0)
331 ctf_integer(sector_t, sector, blk_rq_pos(rq))
332 ctf_integer(unsigned int, nr_sector, nr_bytes >> 9)
333 ctf_integer(int, errors, rq->errors)
334 blk_rwbs_ctf_integer(unsigned int, rwbs,
335 lttng_req_op(rq), lttng_req_rw(rq), nr_bytes)
336 ctf_sequence_hex(unsigned char, cmd,
337 tp_locvar->cmd, size_t, tp_locvar->cmd_len)
338 ),
339
340 TP_code_post()
341 )
342 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,5) \
343 || LTTNG_KERNEL_RANGE(3,12,21, 3,13,0) \
344 || LTTNG_KERNEL_RANGE(3,10,41, 3,11,0) \
345 || LTTNG_KERNEL_RANGE(3,4,91, 3,5,0) \
346 || LTTNG_KERNEL_RANGE(3,2,58, 3,3,0) \
347 || LTTNG_UBUNTU_KERNEL_RANGE(3,13,11,28, 3,14,0,0) \
348 || LTTNG_RHEL_KERNEL_RANGE(3,10,0,229,0,0, 3,11,0,0,0,0))
349
350 LTTNG_TRACEPOINT_EVENT_CODE(block_rq_complete,
351
352 TP_PROTO(struct request_queue *q, struct request *rq,
353 unsigned int nr_bytes),
354
355 TP_ARGS(q, rq, nr_bytes),
356
357 TP_locvar(
358 unsigned char *cmd;
359 size_t cmd_len;
360 ),
361
362 TP_code_pre(
363 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
364 tp_locvar->cmd = rq->cmd;
365 tp_locvar->cmd_len = rq->cmd_len;
366 } else {
367 tp_locvar->cmd = NULL;
368 tp_locvar->cmd_len = 0;
369 }
370 ),
371
372 TP_FIELDS(
373 ctf_integer(dev_t, dev,
374 rq->rq_disk ? disk_devt(rq->rq_disk) : 0)
375 ctf_integer(sector_t, sector, blk_rq_pos(rq))
376 ctf_integer(unsigned int, nr_sector, nr_bytes >> 9)
377 ctf_integer(int, errors, rq->errors)
378 blk_rwbs_ctf_integer(unsigned int, rwbs,
379 lttng_req_op(rq), lttng_req_rw(rq), nr_bytes)
380 ctf_sequence_hex(unsigned char, cmd,
381 tp_locvar->cmd, size_t, tp_locvar->cmd_len)
382 ),
383
384 TP_code_post()
385 )
386
387 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,15,0)) */
388
389 /**
390 * block_rq_complete - block IO operation completed by device driver
391 * @q: queue containing the block operation request
392 * @rq: block operations request
393 *
394 * The block_rq_complete tracepoint event indicates that some portion
395 * of operation request has been completed by the device driver. If
396 * the @rq->bio is %NULL, then there is absolutely no additional work to
397 * do for the request. If @rq->bio is non-NULL then there is
398 * additional work required to complete the request.
399 */
400 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_rq_with_error, block_rq_complete,
401
402 TP_PROTO(struct request_queue *q, struct request *rq),
403
404 TP_ARGS(q, rq)
405 )
406
407 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,15,0)) */
408
409 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0))
410 LTTNG_TRACEPOINT_EVENT_CLASS_CODE(block_rq,
411
412 TP_PROTO(struct request_queue *q, struct request *rq),
413
414 TP_ARGS(q, rq),
415
416 TP_locvar(
417 sector_t sector;
418 unsigned int nr_sector;
419 unsigned int bytes;
420 unsigned char *cmd;
421 size_t cmd_len;
422 ),
423
424 TP_code_pre(
425 if (blk_rq_is_scsi(rq)) {
426 struct scsi_request *scsi_rq = scsi_req(rq);
427 tp_locvar->sector = 0;
428 tp_locvar->nr_sector = 0;
429 tp_locvar->bytes = scsi_rq->resid_len;
430 tp_locvar->cmd = scsi_rq->cmd;
431 tp_locvar->cmd_len = scsi_rq->cmd_len;
432 } else {
433 tp_locvar->sector = blk_rq_pos(rq);
434 tp_locvar->nr_sector = blk_rq_sectors(rq);
435 tp_locvar->bytes = 0;
436 tp_locvar->cmd = NULL;
437 tp_locvar->cmd_len = 0;
438 }
439 ),
440
441 TP_FIELDS(
442 ctf_integer(dev_t, dev,
443 rq->rq_disk ? disk_devt(rq->rq_disk) : 0)
444 ctf_integer(sector_t, sector, tp_locvar->sector)
445 ctf_integer(unsigned int, nr_sector, tp_locvar->nr_sector)
446 ctf_integer(unsigned int, bytes, tp_locvar->bytes)
447 ctf_integer(pid_t, tid, current->pid)
448 blk_rwbs_ctf_integer(unsigned int, rwbs,
449 lttng_req_op(rq), lttng_req_rw(rq), blk_rq_bytes(rq))
450 ctf_sequence_hex(unsigned char, cmd,
451 tp_locvar->cmd, size_t, tp_locvar->cmd_len)
452 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
453 ),
454
455 TP_code_post()
456 )
457 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0)) */
458 LTTNG_TRACEPOINT_EVENT_CLASS_CODE(block_rq,
459
460 TP_PROTO(struct request_queue *q, struct request *rq),
461
462 TP_ARGS(q, rq),
463
464 TP_locvar(
465 sector_t sector;
466 unsigned int nr_sector;
467 unsigned int bytes;
468 unsigned char *cmd;
469 size_t cmd_len;
470 ),
471
472 TP_code_pre(
473 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
474 tp_locvar->sector = 0;
475 tp_locvar->nr_sector = 0;
476 tp_locvar->bytes = blk_rq_bytes(rq);
477 tp_locvar->cmd = rq->cmd;
478 tp_locvar->cmd_len = rq->cmd_len;
479 } else {
480 tp_locvar->sector = blk_rq_pos(rq);
481 tp_locvar->nr_sector = blk_rq_sectors(rq);
482 tp_locvar->bytes = 0;
483 tp_locvar->cmd = NULL;
484 tp_locvar->cmd_len = 0;
485 }
486 ),
487
488 TP_FIELDS(
489 ctf_integer(dev_t, dev,
490 rq->rq_disk ? disk_devt(rq->rq_disk) : 0)
491 ctf_integer(sector_t, sector, tp_locvar->sector)
492 ctf_integer(unsigned int, nr_sector, tp_locvar->nr_sector)
493 ctf_integer(unsigned int, bytes, tp_locvar->bytes)
494 ctf_integer(pid_t, tid, current->pid)
495 blk_rwbs_ctf_integer(unsigned int, rwbs,
496 lttng_req_op(rq), lttng_req_rw(rq), blk_rq_bytes(rq))
497 ctf_sequence_hex(unsigned char, cmd,
498 tp_locvar->cmd, size_t, tp_locvar->cmd_len)
499 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
500 ),
501
502 TP_code_post()
503 )
504 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0)) */
505
506 /**
507 * block_rq_insert - insert block operation request into queue
508 * @q: target queue
509 * @rq: block IO operation request
510 *
511 * Called immediately before block operation request @rq is inserted
512 * into queue @q. The fields in the operation request @rq struct can
513 * be examined to determine which device and sectors the pending
514 * operation would access.
515 */
516 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_rq, block_rq_insert,
517
518 TP_PROTO(struct request_queue *q, struct request *rq),
519
520 TP_ARGS(q, rq)
521 )
522
523 /**
524 * block_rq_issue - issue pending block IO request operation to device driver
525 * @q: queue holding operation
526 * @rq: block IO operation operation request
527 *
528 * Called when block operation request @rq from queue @q is sent to a
529 * device driver for processing.
530 */
531 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_rq, block_rq_issue,
532
533 TP_PROTO(struct request_queue *q, struct request *rq),
534
535 TP_ARGS(q, rq)
536 )
537
538 /**
539 * block_bio_bounce - used bounce buffer when processing block operation
540 * @q: queue holding the block operation
541 * @bio: block operation
542 *
543 * A bounce buffer was used to handle the block operation @bio in @q.
544 * This occurs when hardware limitations prevent a direct transfer of
545 * data between the @bio data memory area and the IO device. Use of a
546 * bounce buffer requires extra copying of data and decreases
547 * performance.
548 */
549 LTTNG_TRACEPOINT_EVENT(block_bio_bounce,
550
551 TP_PROTO(struct request_queue *q, struct bio *bio),
552
553 TP_ARGS(q, bio),
554
555 TP_FIELDS(
556 ctf_integer(dev_t, dev, bio->bi_bdev ? bio->bi_bdev->bd_dev : 0)
557 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0))
558 ctf_integer(sector_t, sector, bio->bi_iter.bi_sector)
559 ctf_integer(unsigned int, nr_sector, bio_sectors(bio))
560 blk_rwbs_ctf_integer(unsigned int, rwbs,
561 lttng_bio_op(bio), lttng_bio_rw(bio),
562 bio->bi_iter.bi_size)
563 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
564 ctf_integer(sector_t, sector, bio->bi_sector)
565 ctf_integer(unsigned int, nr_sector, bio->bi_size >> 9)
566 blk_rwbs_ctf_integer(unsigned int, rwbs,
567 lttng_bio_op(bio), lttng_bio_rw(bio),
568 bio->bi_size)
569 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
570 ctf_integer(pid_t, tid, current->pid)
571 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
572 )
573 )
574
575 /**
576 * block_bio_complete - completed all work on the block operation
577 * @q: queue holding the block operation
578 * @bio: block operation completed
579 * @error: io error value
580 *
581 * This tracepoint indicates there is no further work to do on this
582 * block IO operation @bio.
583 */
584 LTTNG_TRACEPOINT_EVENT(block_bio_complete,
585
586 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38))
587 TP_PROTO(struct request_queue *q, struct bio *bio, int error),
588
589 TP_ARGS(q, bio, error),
590 #else
591 TP_PROTO(struct request_queue *q, struct bio *bio),
592
593 TP_ARGS(q, bio),
594 #endif
595
596 TP_FIELDS(
597 ctf_integer(dev_t, dev, bio->bi_bdev->bd_dev)
598 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0))
599 ctf_integer(sector_t, sector, bio->bi_iter.bi_sector)
600 ctf_integer(unsigned int, nr_sector, bio_sectors(bio))
601 ctf_integer(int, error, error)
602 blk_rwbs_ctf_integer(unsigned int, rwbs,
603 lttng_bio_op(bio), lttng_bio_rw(bio),
604 bio->bi_iter.bi_size)
605 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
606 ctf_integer(sector_t, sector, bio->bi_sector)
607 ctf_integer(unsigned int, nr_sector, bio->bi_size >> 9)
608 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38))
609 ctf_integer(int, error, error)
610 #else
611 ctf_integer(int, error, 0)
612 #endif
613 blk_rwbs_ctf_integer(unsigned int, rwbs,
614 lttng_bio_op(bio), lttng_bio_rw(bio), bio->bi_size)
615 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
616 )
617 )
618
619 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
620 LTTNG_TRACEPOINT_EVENT_CLASS(block_bio_merge,
621
622 TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
623
624 TP_ARGS(q, rq, bio),
625
626 TP_FIELDS(
627 ctf_integer(dev_t, dev, bio->bi_bdev->bd_dev)
628 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0))
629 ctf_integer(sector_t, sector, bio->bi_iter.bi_sector)
630 ctf_integer(unsigned int, nr_sector, bio_sectors(bio))
631 blk_rwbs_ctf_integer(unsigned int, rwbs,
632 lttng_bio_op(bio), lttng_bio_rw(bio),
633 bio->bi_iter.bi_size)
634 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
635 ctf_integer(sector_t, sector, bio->bi_sector)
636 ctf_integer(unsigned int, nr_sector, bio->bi_size >> 9)
637 blk_rwbs_ctf_integer(unsigned int, rwbs,
638 lttng_bio_op(bio), lttng_bio_rw(bio), bio->bi_size)
639 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
640 ctf_integer(pid_t, tid, current->pid)
641 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
642 )
643 )
644
645 /**
646 * block_bio_backmerge - merging block operation to the end of an existing operation
647 * @q: queue holding operation
648 * @bio: new block operation to merge
649 *
650 * Merging block request @bio to the end of an existing block request
651 * in queue @q.
652 */
653 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_bio_merge, block_bio_backmerge,
654
655 TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
656
657 TP_ARGS(q, rq, bio)
658 )
659
660 /**
661 * block_bio_frontmerge - merging block operation to the beginning of an existing operation
662 * @q: queue holding operation
663 * @bio: new block operation to merge
664 *
665 * Merging block IO operation @bio to the beginning of an existing block
666 * operation in queue @q.
667 */
668 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_bio_merge, block_bio_frontmerge,
669
670 TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
671
672 TP_ARGS(q, rq, bio)
673 )
674
675 /**
676 * block_bio_queue - putting new block IO operation in queue
677 * @q: queue holding operation
678 * @bio: new block operation
679 *
680 * About to place the block IO operation @bio into queue @q.
681 */
682 LTTNG_TRACEPOINT_EVENT(block_bio_queue,
683
684 TP_PROTO(struct request_queue *q, struct bio *bio),
685
686 TP_ARGS(q, bio),
687
688 TP_FIELDS(
689 ctf_integer(dev_t, dev, bio->bi_bdev->bd_dev)
690 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0))
691 ctf_integer(sector_t, sector, bio->bi_iter.bi_sector)
692 ctf_integer(unsigned int, nr_sector, bio_sectors(bio))
693 blk_rwbs_ctf_integer(unsigned int, rwbs,
694 lttng_bio_op(bio), lttng_bio_rw(bio),
695 bio->bi_iter.bi_size)
696 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
697 ctf_integer(sector_t, sector, bio->bi_sector)
698 ctf_integer(unsigned int, nr_sector, bio->bi_size >> 9)
699 blk_rwbs_ctf_integer(unsigned int, rwbs,
700 lttng_bio_op(bio), lttng_bio_rw(bio), bio->bi_size)
701 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
702 ctf_integer(pid_t, tid, current->pid)
703 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
704 )
705 )
706 #else /* if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)) */
707 LTTNG_TRACEPOINT_EVENT_CLASS(block_bio,
708
709 TP_PROTO(struct request_queue *q, struct bio *bio),
710
711 TP_ARGS(q, bio),
712
713 TP_FIELDS(
714 ctf_integer(dev_t, dev, bio->bi_bdev ? bio->bi_bdev->bd_dev : 0)
715 ctf_integer(sector_t, sector, bio->bi_sector)
716 ctf_integer(unsigned int, nr_sector, bio->bi_size >> 9)
717 blk_rwbs_ctf_integer(unsigned int, rwbs,
718 lttng_bio_op(bio), lttng_bio_rw(bio), bio->bi_size)
719 ctf_integer(pid_t, tid, current->pid)
720 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
721 )
722 )
723
724 /**
725 * block_bio_backmerge - merging block operation to the end of an existing operation
726 * @q: queue holding operation
727 * @bio: new block operation to merge
728 *
729 * Merging block request @bio to the end of an existing block request
730 * in queue @q.
731 */
732 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_bio, block_bio_backmerge,
733
734 TP_PROTO(struct request_queue *q, struct bio *bio),
735
736 TP_ARGS(q, bio)
737 )
738
739 /**
740 * block_bio_frontmerge - merging block operation to the beginning of an existing operation
741 * @q: queue holding operation
742 * @bio: new block operation to merge
743 *
744 * Merging block IO operation @bio to the beginning of an existing block
745 * operation in queue @q.
746 */
747 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_bio, block_bio_frontmerge,
748
749 TP_PROTO(struct request_queue *q, struct bio *bio),
750
751 TP_ARGS(q, bio)
752 )
753
754 /**
755 * block_bio_queue - putting new block IO operation in queue
756 * @q: queue holding operation
757 * @bio: new block operation
758 *
759 * About to place the block IO operation @bio into queue @q.
760 */
761 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_bio, block_bio_queue,
762
763 TP_PROTO(struct request_queue *q, struct bio *bio),
764
765 TP_ARGS(q, bio)
766 )
767 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)) */
768
769 LTTNG_TRACEPOINT_EVENT_CLASS(block_get_rq,
770
771 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
772
773 TP_ARGS(q, bio, rw),
774
775 TP_FIELDS(
776 ctf_integer(dev_t, dev, bio ? bio->bi_bdev->bd_dev : 0)
777 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0))
778 ctf_integer(sector_t, sector, bio ? bio->bi_iter.bi_sector : 0)
779 ctf_integer(unsigned int, nr_sector,
780 bio ? bio_sectors(bio) : 0)
781 blk_rwbs_ctf_integer(unsigned int, rwbs,
782 bio ? lttng_bio_op(bio) : 0,
783 bio ? lttng_bio_rw(bio) : 0,
784 bio ? bio->bi_iter.bi_size : 0)
785 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
786 ctf_integer(sector_t, sector, bio ? bio->bi_sector : 0)
787 ctf_integer(unsigned int, nr_sector,
788 bio ? bio->bi_size >> 9 : 0)
789 blk_rwbs_ctf_integer(unsigned int, rwbs,
790 bio ? lttng_bio_op(bio) : 0,
791 bio ? lttng_bio_rw(bio) : 0,
792 bio ? bio->bi_size : 0)
793 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
794 ctf_integer(pid_t, tid, current->pid)
795 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
796 )
797 )
798
799 /**
800 * block_getrq - get a free request entry in queue for block IO operations
801 * @q: queue for operations
802 * @bio: pending block IO operation
803 * @rw: low bit indicates a read (%0) or a write (%1)
804 *
805 * A request struct for queue @q has been allocated to handle the
806 * block IO operation @bio.
807 */
808 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_get_rq, block_getrq,
809
810 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
811
812 TP_ARGS(q, bio, rw)
813 )
814
815 /**
816 * block_sleeprq - waiting to get a free request entry in queue for block IO operation
817 * @q: queue for operation
818 * @bio: pending block IO operation
819 * @rw: low bit indicates a read (%0) or a write (%1)
820 *
821 * In the case where a request struct cannot be provided for queue @q
822 * the process needs to wait for an request struct to become
823 * available. This tracepoint event is generated each time the
824 * process goes to sleep waiting for request struct become available.
825 */
826 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_get_rq, block_sleeprq,
827
828 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
829
830 TP_ARGS(q, bio, rw)
831 )
832
833 /**
834 * block_plug - keep operations requests in request queue
835 * @q: request queue to plug
836 *
837 * Plug the request queue @q. Do not allow block operation requests
838 * to be sent to the device driver. Instead, accumulate requests in
839 * the queue to improve throughput performance of the block device.
840 */
841 LTTNG_TRACEPOINT_EVENT(block_plug,
842
843 TP_PROTO(struct request_queue *q),
844
845 TP_ARGS(q),
846
847 TP_FIELDS(
848 ctf_integer(pid_t, tid, current->pid)
849 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
850 )
851 )
852
853 LTTNG_TRACEPOINT_EVENT_CLASS(block_unplug,
854
855 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
856 TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
857
858 TP_ARGS(q, depth, explicit),
859 #else
860 TP_PROTO(struct request_queue *q),
861
862 TP_ARGS(q),
863 #endif
864
865 TP_FIELDS(
866 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
867 ctf_integer(int, nr_rq, depth)
868 #else
869 ctf_integer(int, nr_rq, q->rq.count[READ] + q->rq.count[WRITE])
870 #endif
871 ctf_integer(pid_t, tid, current->pid)
872 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
873 )
874 )
875
876 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39))
877 /**
878 * block_unplug_timer - timed release of operations requests in queue to device driver
879 * @q: request queue to unplug
880 *
881 * Unplug the request queue @q because a timer expired and allow block
882 * operation requests to be sent to the device driver.
883 */
884 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_unplug, block_unplug_timer,
885
886 TP_PROTO(struct request_queue *q),
887
888 TP_ARGS(q)
889 )
890 #endif
891
892 /**
893 * block_unplug - release of operations requests in request queue
894 * @q: request queue to unplug
895 * @depth: number of requests just added to the queue
896 * @explicit: whether this was an explicit unplug, or one from schedule()
897 *
898 * Unplug request queue @q because device driver is scheduled to work
899 * on elements in the request queue.
900 */
901 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
902 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_unplug, block_unplug,
903 #else
904 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_unplug, block_unplug_io,
905 #endif
906
907 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
908 TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
909
910 TP_ARGS(q, depth, explicit)
911 #else
912 TP_PROTO(struct request_queue *q),
913
914 TP_ARGS(q)
915 #endif
916 )
917
918 /**
919 * block_split - split a single bio struct into two bio structs
920 * @q: queue containing the bio
921 * @bio: block operation being split
922 * @new_sector: The starting sector for the new bio
923 *
924 * The bio request @bio in request queue @q needs to be split into two
925 * bio requests. The newly created @bio request starts at
926 * @new_sector. This split may be required due to hardware limitation
927 * such as operation crossing device boundaries in a RAID system.
928 */
929 LTTNG_TRACEPOINT_EVENT(block_split,
930
931 TP_PROTO(struct request_queue *q, struct bio *bio,
932 unsigned int new_sector),
933
934 TP_ARGS(q, bio, new_sector),
935
936 TP_FIELDS(
937 ctf_integer(dev_t, dev, bio->bi_bdev->bd_dev)
938 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0))
939 ctf_integer(sector_t, sector, bio->bi_iter.bi_sector)
940 blk_rwbs_ctf_integer(unsigned int, rwbs,
941 lttng_bio_op(bio), lttng_bio_rw(bio),
942 bio->bi_iter.bi_size)
943 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
944 ctf_integer(sector_t, sector, bio->bi_sector)
945 blk_rwbs_ctf_integer(unsigned int, rwbs,
946 lttng_bio_op(bio), lttng_bio_rw(bio), bio->bi_size)
947 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
948 ctf_integer(sector_t, new_sector, new_sector)
949 ctf_integer(pid_t, tid, current->pid)
950 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
951 )
952 )
953
954 /**
955 * block_bio_remap - map request for a logical device to the raw device
956 * @q: queue holding the operation
957 * @bio: revised operation
958 * @dev: device for the operation
959 * @from: original sector for the operation
960 *
961 * An operation for a logical device has been mapped to the
962 * raw block device.
963 */
964 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38))
965 LTTNG_TRACEPOINT_EVENT(block_bio_remap,
966 #else
967 LTTNG_TRACEPOINT_EVENT(block_remap,
968 #endif
969
970 TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
971 sector_t from),
972
973 TP_ARGS(q, bio, dev, from),
974
975 TP_FIELDS(
976 ctf_integer(dev_t, dev, bio->bi_bdev->bd_dev)
977 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0))
978 ctf_integer(sector_t, sector, bio->bi_iter.bi_sector)
979 ctf_integer(unsigned int, nr_sector, bio_sectors(bio))
980 blk_rwbs_ctf_integer(unsigned int, rwbs,
981 lttng_bio_op(bio), lttng_bio_rw(bio),
982 bio->bi_iter.bi_size)
983 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
984 ctf_integer(sector_t, sector, bio->bi_sector)
985 ctf_integer(unsigned int, nr_sector, bio->bi_size >> 9)
986 blk_rwbs_ctf_integer(unsigned int, rwbs,
987 lttng_bio_op(bio), lttng_bio_rw(bio), bio->bi_size)
988 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
989 ctf_integer(dev_t, old_dev, dev)
990 ctf_integer(sector_t, old_sector, from)
991 )
992 )
993
994 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
995 /**
996 * block_rq_remap - map request for a block operation request
997 * @q: queue holding the operation
998 * @rq: block IO operation request
999 * @dev: device for the operation
1000 * @from: original sector for the operation
1001 *
1002 * The block operation request @rq in @q has been remapped. The block
1003 * operation request @rq holds the current information and @from hold
1004 * the original sector.
1005 */
1006 LTTNG_TRACEPOINT_EVENT(block_rq_remap,
1007
1008 TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev,
1009 sector_t from),
1010
1011 TP_ARGS(q, rq, dev, from),
1012
1013 TP_FIELDS(
1014 ctf_integer(dev_t, dev, disk_devt(rq->rq_disk))
1015 ctf_integer(sector_t, sector, blk_rq_pos(rq))
1016 ctf_integer(unsigned int, nr_sector, blk_rq_sectors(rq))
1017 ctf_integer(dev_t, old_dev, dev)
1018 ctf_integer(sector_t, old_sector, from)
1019 blk_rwbs_ctf_integer(unsigned int, rwbs,
1020 lttng_req_op(rq), lttng_req_rw(rq), blk_rq_bytes(rq))
1021 )
1022 )
1023 #endif
1024
1025 #undef __print_rwbs_flags
1026 #undef blk_fill_rwbs
1027
1028 #endif /* LTTNG_TRACE_BLOCK_H */
1029
1030 /* This part must be outside protection */
1031 #include <probes/define_trace.h>
This page took 0.070153 seconds and 4 git commands to generate.