Commit | Line | Data |
---|---|---|
f62b389e MD |
1 | #undef TRACE_SYSTEM |
2 | #define TRACE_SYSTEM block | |
3 | ||
4 | #if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ) | |
5 | #define _TRACE_BLOCK_H | |
6 | ||
7 | #include <linux/blktrace_api.h> | |
8 | #include <linux/blkdev.h> | |
9 | #include <linux/tracepoint.h> | |
10 | ||
11 | DECLARE_EVENT_CLASS(block_rq_with_error, | |
12 | ||
13 | TP_PROTO(struct request_queue *q, struct request *rq), | |
14 | ||
15 | TP_ARGS(q, rq), | |
16 | ||
17 | TP_STRUCT__entry( | |
18 | __field( dev_t, dev ) | |
19 | __field( sector_t, sector ) | |
20 | __field( unsigned int, nr_sector ) | |
21 | __field( int, errors ) | |
22 | __array( char, rwbs, 6 ) | |
23 | __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) | |
24 | ), | |
25 | ||
26 | TP_fast_assign( | |
27 | __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; | |
28 | __entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? | |
29 | 0 : blk_rq_pos(rq); | |
30 | __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? | |
31 | 0 : blk_rq_sectors(rq); | |
32 | __entry->errors = rq->errors; | |
33 | ||
34 | blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); | |
35 | blk_dump_cmd(__get_str(cmd), rq); | |
36 | ), | |
37 | ||
38 | TP_printk("%d,%d %s (%s) %llu + %u [%d]", | |
39 | MAJOR(__entry->dev), MINOR(__entry->dev), | |
40 | __entry->rwbs, __get_str(cmd), | |
41 | (unsigned long long)__entry->sector, | |
42 | __entry->nr_sector, __entry->errors) | |
43 | ); | |
44 | ||
45 | /** | |
46 | * block_rq_abort - abort block operation request | |
47 | * @q: queue containing the block operation request | |
48 | * @rq: block IO operation request | |
49 | * | |
50 | * Called immediately after pending block IO operation request @rq in | |
51 | * queue @q is aborted. The fields in the operation request @rq | |
52 | * can be examined to determine which device and sectors the pending | |
53 | * operation would access. | |
54 | */ | |
55 | DEFINE_EVENT(block_rq_with_error, block_rq_abort, | |
56 | ||
57 | TP_PROTO(struct request_queue *q, struct request *rq), | |
58 | ||
59 | TP_ARGS(q, rq) | |
60 | ); | |
61 | ||
62 | /** | |
63 | * block_rq_requeue - place block IO request back on a queue | |
64 | * @q: queue holding operation | |
65 | * @rq: block IO operation request | |
66 | * | |
67 | * The block operation request @rq is being placed back into queue | |
68 | * @q. For some reason the request was not completed and needs to be | |
69 | * put back in the queue. | |
70 | */ | |
71 | DEFINE_EVENT(block_rq_with_error, block_rq_requeue, | |
72 | ||
73 | TP_PROTO(struct request_queue *q, struct request *rq), | |
74 | ||
75 | TP_ARGS(q, rq) | |
76 | ); | |
77 | ||
78 | /** | |
79 | * block_rq_complete - block IO operation completed by device driver | |
80 | * @q: queue containing the block operation request | |
81 | * @rq: block operations request | |
82 | * | |
83 | * The block_rq_complete tracepoint event indicates that some portion | |
84 | * of operation request has been completed by the device driver. If | |
85 | * the @rq->bio is %NULL, then there is absolutely no additional work to | |
86 | * do for the request. If @rq->bio is non-NULL then there is | |
87 | * additional work required to complete the request. | |
88 | */ | |
89 | DEFINE_EVENT(block_rq_with_error, block_rq_complete, | |
90 | ||
91 | TP_PROTO(struct request_queue *q, struct request *rq), | |
92 | ||
93 | TP_ARGS(q, rq) | |
94 | ); | |
95 | ||
96 | DECLARE_EVENT_CLASS(block_rq, | |
97 | ||
98 | TP_PROTO(struct request_queue *q, struct request *rq), | |
99 | ||
100 | TP_ARGS(q, rq), | |
101 | ||
102 | TP_STRUCT__entry( | |
103 | __field( dev_t, dev ) | |
104 | __field( sector_t, sector ) | |
105 | __field( unsigned int, nr_sector ) | |
106 | __field( unsigned int, bytes ) | |
107 | __array( char, rwbs, 6 ) | |
108 | __array( char, comm, TASK_COMM_LEN ) | |
109 | __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) | |
110 | ), | |
111 | ||
112 | TP_fast_assign( | |
113 | __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; | |
114 | __entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? | |
115 | 0 : blk_rq_pos(rq); | |
116 | __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? | |
117 | 0 : blk_rq_sectors(rq); | |
118 | __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? | |
119 | blk_rq_bytes(rq) : 0; | |
120 | ||
121 | blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); | |
122 | blk_dump_cmd(__get_str(cmd), rq); | |
123 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | |
124 | ), | |
125 | ||
126 | TP_printk("%d,%d %s %u (%s) %llu + %u [%s]", | |
127 | MAJOR(__entry->dev), MINOR(__entry->dev), | |
128 | __entry->rwbs, __entry->bytes, __get_str(cmd), | |
129 | (unsigned long long)__entry->sector, | |
130 | __entry->nr_sector, __entry->comm) | |
131 | ); | |
132 | ||
133 | /** | |
134 | * block_rq_insert - insert block operation request into queue | |
135 | * @q: target queue | |
136 | * @rq: block IO operation request | |
137 | * | |
138 | * Called immediately before block operation request @rq is inserted | |
139 | * into queue @q. The fields in the operation request @rq struct can | |
140 | * be examined to determine which device and sectors the pending | |
141 | * operation would access. | |
142 | */ | |
143 | DEFINE_EVENT(block_rq, block_rq_insert, | |
144 | ||
145 | TP_PROTO(struct request_queue *q, struct request *rq), | |
146 | ||
147 | TP_ARGS(q, rq) | |
148 | ); | |
149 | ||
150 | /** | |
151 | * block_rq_issue - issue pending block IO request operation to device driver | |
152 | * @q: queue holding operation | |
153 | * @rq: block IO operation operation request | |
154 | * | |
155 | * Called when block operation request @rq from queue @q is sent to a | |
156 | * device driver for processing. | |
157 | */ | |
158 | DEFINE_EVENT(block_rq, block_rq_issue, | |
159 | ||
160 | TP_PROTO(struct request_queue *q, struct request *rq), | |
161 | ||
162 | TP_ARGS(q, rq) | |
163 | ); | |
164 | ||
165 | /** | |
166 | * block_bio_bounce - used bounce buffer when processing block operation | |
167 | * @q: queue holding the block operation | |
168 | * @bio: block operation | |
169 | * | |
170 | * A bounce buffer was used to handle the block operation @bio in @q. | |
171 | * This occurs when hardware limitations prevent a direct transfer of | |
172 | * data between the @bio data memory area and the IO device. Use of a | |
173 | * bounce buffer requires extra copying of data and decreases | |
174 | * performance. | |
175 | */ | |
176 | TRACE_EVENT(block_bio_bounce, | |
177 | ||
178 | TP_PROTO(struct request_queue *q, struct bio *bio), | |
179 | ||
180 | TP_ARGS(q, bio), | |
181 | ||
182 | TP_STRUCT__entry( | |
183 | __field( dev_t, dev ) | |
184 | __field( sector_t, sector ) | |
185 | __field( unsigned int, nr_sector ) | |
186 | __array( char, rwbs, 6 ) | |
187 | __array( char, comm, TASK_COMM_LEN ) | |
188 | ), | |
189 | ||
190 | TP_fast_assign( | |
191 | __entry->dev = bio->bi_bdev ? | |
192 | bio->bi_bdev->bd_dev : 0; | |
193 | __entry->sector = bio->bi_sector; | |
194 | __entry->nr_sector = bio->bi_size >> 9; | |
195 | blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); | |
196 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | |
197 | ), | |
198 | ||
199 | TP_printk("%d,%d %s %llu + %u [%s]", | |
200 | MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, | |
201 | (unsigned long long)__entry->sector, | |
202 | __entry->nr_sector, __entry->comm) | |
203 | ); | |
204 | ||
205 | /** | |
206 | * block_bio_complete - completed all work on the block operation | |
207 | * @q: queue holding the block operation | |
208 | * @bio: block operation completed | |
209 | * @error: io error value | |
210 | * | |
211 | * This tracepoint indicates there is no further work to do on this | |
212 | * block IO operation @bio. | |
213 | */ | |
214 | TRACE_EVENT(block_bio_complete, | |
215 | ||
216 | TP_PROTO(struct request_queue *q, struct bio *bio, int error), | |
217 | ||
218 | TP_ARGS(q, bio, error), | |
219 | ||
220 | TP_STRUCT__entry( | |
221 | __field( dev_t, dev ) | |
222 | __field( sector_t, sector ) | |
223 | __field( unsigned, nr_sector ) | |
224 | __field( int, error ) | |
225 | __array( char, rwbs, 6 ) | |
226 | ), | |
227 | ||
228 | TP_fast_assign( | |
229 | __entry->dev = bio->bi_bdev->bd_dev; | |
230 | __entry->sector = bio->bi_sector; | |
231 | __entry->nr_sector = bio->bi_size >> 9; | |
232 | __entry->error = error; | |
233 | blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); | |
234 | ), | |
235 | ||
236 | TP_printk("%d,%d %s %llu + %u [%d]", | |
237 | MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, | |
238 | (unsigned long long)__entry->sector, | |
239 | __entry->nr_sector, __entry->error) | |
240 | ); | |
241 | ||
242 | DECLARE_EVENT_CLASS(block_bio, | |
243 | ||
244 | TP_PROTO(struct request_queue *q, struct bio *bio), | |
245 | ||
246 | TP_ARGS(q, bio), | |
247 | ||
248 | TP_STRUCT__entry( | |
249 | __field( dev_t, dev ) | |
250 | __field( sector_t, sector ) | |
251 | __field( unsigned int, nr_sector ) | |
252 | __array( char, rwbs, 6 ) | |
253 | __array( char, comm, TASK_COMM_LEN ) | |
254 | ), | |
255 | ||
256 | TP_fast_assign( | |
257 | __entry->dev = bio->bi_bdev->bd_dev; | |
258 | __entry->sector = bio->bi_sector; | |
259 | __entry->nr_sector = bio->bi_size >> 9; | |
260 | blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); | |
261 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | |
262 | ), | |
263 | ||
264 | TP_printk("%d,%d %s %llu + %u [%s]", | |
265 | MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, | |
266 | (unsigned long long)__entry->sector, | |
267 | __entry->nr_sector, __entry->comm) | |
268 | ); | |
269 | ||
270 | /** | |
271 | * block_bio_backmerge - merging block operation to the end of an existing operation | |
272 | * @q: queue holding operation | |
273 | * @bio: new block operation to merge | |
274 | * | |
275 | * Merging block request @bio to the end of an existing block request | |
276 | * in queue @q. | |
277 | */ | |
278 | DEFINE_EVENT(block_bio, block_bio_backmerge, | |
279 | ||
280 | TP_PROTO(struct request_queue *q, struct bio *bio), | |
281 | ||
282 | TP_ARGS(q, bio) | |
283 | ); | |
284 | ||
285 | /** | |
286 | * block_bio_frontmerge - merging block operation to the beginning of an existing operation | |
287 | * @q: queue holding operation | |
288 | * @bio: new block operation to merge | |
289 | * | |
290 | * Merging block IO operation @bio to the beginning of an existing block | |
291 | * operation in queue @q. | |
292 | */ | |
293 | DEFINE_EVENT(block_bio, block_bio_frontmerge, | |
294 | ||
295 | TP_PROTO(struct request_queue *q, struct bio *bio), | |
296 | ||
297 | TP_ARGS(q, bio) | |
298 | ); | |
299 | ||
300 | /** | |
301 | * block_bio_queue - putting new block IO operation in queue | |
302 | * @q: queue holding operation | |
303 | * @bio: new block operation | |
304 | * | |
305 | * About to place the block IO operation @bio into queue @q. | |
306 | */ | |
307 | DEFINE_EVENT(block_bio, block_bio_queue, | |
308 | ||
309 | TP_PROTO(struct request_queue *q, struct bio *bio), | |
310 | ||
311 | TP_ARGS(q, bio) | |
312 | ); | |
313 | ||
314 | DECLARE_EVENT_CLASS(block_get_rq, | |
315 | ||
316 | TP_PROTO(struct request_queue *q, struct bio *bio, int rw), | |
317 | ||
318 | TP_ARGS(q, bio, rw), | |
319 | ||
320 | TP_STRUCT__entry( | |
321 | __field( dev_t, dev ) | |
322 | __field( sector_t, sector ) | |
323 | __field( unsigned int, nr_sector ) | |
324 | __array( char, rwbs, 6 ) | |
325 | __array( char, comm, TASK_COMM_LEN ) | |
326 | ), | |
327 | ||
328 | TP_fast_assign( | |
329 | __entry->dev = bio ? bio->bi_bdev->bd_dev : 0; | |
330 | __entry->sector = bio ? bio->bi_sector : 0; | |
331 | __entry->nr_sector = bio ? bio->bi_size >> 9 : 0; | |
332 | blk_fill_rwbs(__entry->rwbs, | |
333 | bio ? bio->bi_rw : 0, __entry->nr_sector); | |
334 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | |
335 | ), | |
336 | ||
337 | TP_printk("%d,%d %s %llu + %u [%s]", | |
338 | MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, | |
339 | (unsigned long long)__entry->sector, | |
340 | __entry->nr_sector, __entry->comm) | |
341 | ); | |
342 | ||
343 | /** | |
344 | * block_getrq - get a free request entry in queue for block IO operations | |
345 | * @q: queue for operations | |
346 | * @bio: pending block IO operation | |
347 | * @rw: low bit indicates a read (%0) or a write (%1) | |
348 | * | |
349 | * A request struct for queue @q has been allocated to handle the | |
350 | * block IO operation @bio. | |
351 | */ | |
352 | DEFINE_EVENT(block_get_rq, block_getrq, | |
353 | ||
354 | TP_PROTO(struct request_queue *q, struct bio *bio, int rw), | |
355 | ||
356 | TP_ARGS(q, bio, rw) | |
357 | ); | |
358 | ||
359 | /** | |
360 | * block_sleeprq - waiting to get a free request entry in queue for block IO operation | |
361 | * @q: queue for operation | |
362 | * @bio: pending block IO operation | |
363 | * @rw: low bit indicates a read (%0) or a write (%1) | |
364 | * | |
365 | * In the case where a request struct cannot be provided for queue @q | |
366 | * the process needs to wait for an request struct to become | |
367 | * available. This tracepoint event is generated each time the | |
368 | * process goes to sleep waiting for request struct become available. | |
369 | */ | |
370 | DEFINE_EVENT(block_get_rq, block_sleeprq, | |
371 | ||
372 | TP_PROTO(struct request_queue *q, struct bio *bio, int rw), | |
373 | ||
374 | TP_ARGS(q, bio, rw) | |
375 | ); | |
376 | ||
377 | /** | |
378 | * block_plug - keep operations requests in request queue | |
379 | * @q: request queue to plug | |
380 | * | |
381 | * Plug the request queue @q. Do not allow block operation requests | |
382 | * to be sent to the device driver. Instead, accumulate requests in | |
383 | * the queue to improve throughput performance of the block device. | |
384 | */ | |
385 | TRACE_EVENT(block_plug, | |
386 | ||
387 | TP_PROTO(struct request_queue *q), | |
388 | ||
389 | TP_ARGS(q), | |
390 | ||
391 | TP_STRUCT__entry( | |
392 | __array( char, comm, TASK_COMM_LEN ) | |
393 | ), | |
394 | ||
395 | TP_fast_assign( | |
396 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | |
397 | ), | |
398 | ||
399 | TP_printk("[%s]", __entry->comm) | |
400 | ); | |
401 | ||
402 | DECLARE_EVENT_CLASS(block_unplug, | |
403 | ||
404 | TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit), | |
405 | ||
406 | TP_ARGS(q, depth, explicit), | |
407 | ||
408 | TP_STRUCT__entry( | |
409 | __field( int, nr_rq ) | |
410 | __array( char, comm, TASK_COMM_LEN ) | |
411 | ), | |
412 | ||
413 | TP_fast_assign( | |
414 | __entry->nr_rq = depth; | |
415 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | |
416 | ), | |
417 | ||
418 | TP_printk("[%s] %d", __entry->comm, __entry->nr_rq) | |
419 | ); | |
420 | ||
421 | /** | |
422 | * block_unplug - release of operations requests in request queue | |
423 | * @q: request queue to unplug | |
424 | * @depth: number of requests just added to the queue | |
425 | * @explicit: whether this was an explicit unplug, or one from schedule() | |
426 | * | |
427 | * Unplug request queue @q because device driver is scheduled to work | |
428 | * on elements in the request queue. | |
429 | */ | |
430 | DEFINE_EVENT(block_unplug, block_unplug, | |
431 | ||
432 | TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit), | |
433 | ||
434 | TP_ARGS(q, depth, explicit) | |
435 | ); | |
436 | ||
437 | /** | |
438 | * block_split - split a single bio struct into two bio structs | |
439 | * @q: queue containing the bio | |
440 | * @bio: block operation being split | |
441 | * @new_sector: The starting sector for the new bio | |
442 | * | |
443 | * The bio request @bio in request queue @q needs to be split into two | |
444 | * bio requests. The newly created @bio request starts at | |
445 | * @new_sector. This split may be required due to hardware limitation | |
446 | * such as operation crossing device boundaries in a RAID system. | |
447 | */ | |
448 | TRACE_EVENT(block_split, | |
449 | ||
450 | TP_PROTO(struct request_queue *q, struct bio *bio, | |
451 | unsigned int new_sector), | |
452 | ||
453 | TP_ARGS(q, bio, new_sector), | |
454 | ||
455 | TP_STRUCT__entry( | |
456 | __field( dev_t, dev ) | |
457 | __field( sector_t, sector ) | |
458 | __field( sector_t, new_sector ) | |
459 | __array( char, rwbs, 6 ) | |
460 | __array( char, comm, TASK_COMM_LEN ) | |
461 | ), | |
462 | ||
463 | TP_fast_assign( | |
464 | __entry->dev = bio->bi_bdev->bd_dev; | |
465 | __entry->sector = bio->bi_sector; | |
466 | __entry->new_sector = new_sector; | |
467 | blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); | |
468 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | |
469 | ), | |
470 | ||
471 | TP_printk("%d,%d %s %llu / %llu [%s]", | |
472 | MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, | |
473 | (unsigned long long)__entry->sector, | |
474 | (unsigned long long)__entry->new_sector, | |
475 | __entry->comm) | |
476 | ); | |
477 | ||
478 | /** | |
479 | * block_bio_remap - map request for a logical device to the raw device | |
480 | * @q: queue holding the operation | |
481 | * @bio: revised operation | |
482 | * @dev: device for the operation | |
483 | * @from: original sector for the operation | |
484 | * | |
485 | * An operation for a logical device has been mapped to the | |
486 | * raw block device. | |
487 | */ | |
488 | TRACE_EVENT(block_bio_remap, | |
489 | ||
490 | TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev, | |
491 | sector_t from), | |
492 | ||
493 | TP_ARGS(q, bio, dev, from), | |
494 | ||
495 | TP_STRUCT__entry( | |
496 | __field( dev_t, dev ) | |
497 | __field( sector_t, sector ) | |
498 | __field( unsigned int, nr_sector ) | |
499 | __field( dev_t, old_dev ) | |
500 | __field( sector_t, old_sector ) | |
501 | __array( char, rwbs, 6 ) | |
502 | ), | |
503 | ||
504 | TP_fast_assign( | |
505 | __entry->dev = bio->bi_bdev->bd_dev; | |
506 | __entry->sector = bio->bi_sector; | |
507 | __entry->nr_sector = bio->bi_size >> 9; | |
508 | __entry->old_dev = dev; | |
509 | __entry->old_sector = from; | |
510 | blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); | |
511 | ), | |
512 | ||
513 | TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", | |
514 | MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, | |
515 | (unsigned long long)__entry->sector, | |
516 | __entry->nr_sector, | |
517 | MAJOR(__entry->old_dev), MINOR(__entry->old_dev), | |
518 | (unsigned long long)__entry->old_sector) | |
519 | ); | |
520 | ||
521 | /** | |
522 | * block_rq_remap - map request for a block operation request | |
523 | * @q: queue holding the operation | |
524 | * @rq: block IO operation request | |
525 | * @dev: device for the operation | |
526 | * @from: original sector for the operation | |
527 | * | |
528 | * The block operation request @rq in @q has been remapped. The block | |
529 | * operation request @rq holds the current information and @from hold | |
530 | * the original sector. | |
531 | */ | |
532 | TRACE_EVENT(block_rq_remap, | |
533 | ||
534 | TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev, | |
535 | sector_t from), | |
536 | ||
537 | TP_ARGS(q, rq, dev, from), | |
538 | ||
539 | TP_STRUCT__entry( | |
540 | __field( dev_t, dev ) | |
541 | __field( sector_t, sector ) | |
542 | __field( unsigned int, nr_sector ) | |
543 | __field( dev_t, old_dev ) | |
544 | __field( sector_t, old_sector ) | |
545 | __array( char, rwbs, 6 ) | |
546 | ), | |
547 | ||
548 | TP_fast_assign( | |
549 | __entry->dev = disk_devt(rq->rq_disk); | |
550 | __entry->sector = blk_rq_pos(rq); | |
551 | __entry->nr_sector = blk_rq_sectors(rq); | |
552 | __entry->old_dev = dev; | |
553 | __entry->old_sector = from; | |
554 | blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); | |
555 | ), | |
556 | ||
557 | TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", | |
558 | MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, | |
559 | (unsigned long long)__entry->sector, | |
560 | __entry->nr_sector, | |
561 | MAJOR(__entry->old_dev), MINOR(__entry->old_dev), | |
562 | (unsigned long long)__entry->old_sector) | |
563 | ); | |
564 | ||
565 | #endif /* _TRACE_BLOCK_H */ | |
566 | ||
567 | /* This part must be outside protection */ | |
568 | #include <trace/define_trace.h> | |
569 |