1 /* This file is part of the Linux Trace Toolkit viewer
2 * Copyright (C) 2005 Mathieu Desnoyers
4 * Complete rewrite from the original version made by XangXiu Yang.
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License Version 2.1 as published by the Free Software Foundation.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the
17 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
18 * Boston, MA 02111-1307, USA.
30 #include <sys/types.h>
35 #include <glib/gprintf.h>
48 #include "ltt-private.h"
49 #include <ltt/trace.h>
50 #include <ltt/event.h>
51 #include <ltt/ltt-types.h>
52 #include <ltt/marker.h>
54 #define DEFAULT_N_BLOCKS 32
57 extern long marker_update_fields_offsets(struct marker_info
*info
, const char *data
);
58 extern void marker_update_event_fields_offsets(GArray
*fields_offsets
,
59 struct marker_info
*info
);
61 /* Tracefile names used in this file */
63 GQuark LTT_TRACEFILE_NAME_METADATA
;
70 #define __UNUSED__ __attribute__((__unused__))
72 #define g_info(format...) g_log (G_LOG_DOMAIN, G_LOG_LEVEL_INFO, format)
75 #define g_debug(format...) g_log (G_LOG_DOMAIN, G_LOG_LEVEL_DEBUG, format)
80 /* Those macros must be called from within a function where page_size is a known
82 #define PAGE_MASK (~(page_size-1))
83 #define PAGE_ALIGN(addr) (((addr)+page_size-1)&PAGE_MASK)
85 /* set the offset of the fields belonging to the event,
86 need the information of the archecture */
87 //void set_fields_offsets(LttTracefile *tf, LttEventType *event_type);
88 //size_t get_fields_offsets(LttTracefile *tf, LttEventType *event_type, void *data);
90 /* map a fixed size or a block information from the file (fd) */
91 static gint
map_block(LttTracefile
* tf
, guint block_num
);
93 /* calculate nsec per cycles for current block */
95 static guint32
calc_nsecs_per_cycle(LttTracefile
* t
);
96 static guint64
cycles_2_ns(LttTracefile
*tf
, guint64 cycles
);
99 /* go to the next event */
100 static int ltt_seek_next_event(LttTracefile
*tf
);
102 static int open_tracefiles(LttTrace
*trace
, gchar
*root_path
,
103 gchar
*relative_path
);
104 static int ltt_process_metadata_tracefile(LttTracefile
*tf
);
105 static void ltt_tracefile_time_span_get(LttTracefile
*tf
,
106 LttTime
*start
, LttTime
*end
);
107 static void group_time_span_get(GQuark name
, gpointer data
, gpointer user_data
);
108 static gint
map_block(LttTracefile
* tf
, guint block_num
);
109 static void ltt_update_event_size(LttTracefile
*tf
);
111 /* Enable event debugging */
112 static int a_event_debug
= 0;
114 void ltt_event_debug(int state
)
116 a_event_debug
= state
;
121 * Return value : 0 success, 1 bad tracefile
123 static int parse_trace_header(ltt_subbuffer_header_t
*header
,
124 LttTracefile
*tf
, LttTrace
*t
)
126 if (header
->magic_number
== LTT_MAGIC_NUMBER
)
128 else if(header
->magic_number
== LTT_REV_MAGIC_NUMBER
)
130 else /* invalid magic number, bad tracefile ! */
134 t
->ltt_major_version
= header
->major_version
;
135 t
->ltt_minor_version
= header
->minor_version
;
136 t
->arch_size
= header
->arch_size
;
138 tf
->alignment
= header
->alignment
;
140 /* Get float byte order : might be different from int byte order
141 * (or is set to 0 if the trace has no float (kernel trace)) */
142 tf
->float_word_order
= 0;
144 switch(header
->major_version
) {
147 g_warning("Unsupported trace version : %hhu.%hhu",
148 header
->major_version
, header
->minor_version
);
152 switch(header
->minor_version
) {
155 struct ltt_subbuffer_header_2_6
*vheader
= header
;
156 tf
->buffer_header_size
= ltt_subbuffer_header_size();
159 tf
->tsc_mask
= ((1ULL << tf
->tscbits
) - 1);
160 tf
->tsc_mask_next_bit
= (1ULL << tf
->tscbits
);
163 t
->start_freq
= ltt_get_uint64(LTT_GET_BO(tf
),
164 &vheader
->start_freq
);
165 t
->freq_scale
= ltt_get_uint32(LTT_GET_BO(tf
),
166 &vheader
->freq_scale
);
167 t
->start_tsc
= ltt_get_uint64(LTT_GET_BO(tf
),
168 &vheader
->cycle_count_begin
);
169 t
->start_monotonic
= 0;
170 t
->start_time
.tv_sec
= ltt_get_uint64(LTT_GET_BO(tf
),
171 &vheader
->start_time_sec
);
172 t
->start_time
.tv_nsec
= ltt_get_uint64(LTT_GET_BO(tf
),
173 &vheader
->start_time_usec
);
174 t
->start_time
.tv_nsec
*= 1000; /* microsec to nanosec */
176 t
->start_time_from_tsc
=
177 ltt_time_from_uint64(tsc_to_uint64(t
->freq_scale
,
178 t
->start_freq
, t
->start_tsc
));
183 g_warning("Unsupported trace version : %hhu.%hhu",
184 header
->major_version
, header
->minor_version
);
189 g_warning("Unsupported trace version : %hhu.%hhu",
190 header
->major_version
, header
->minor_version
);
196 int get_block_offset_size(LttTracefile
*tf
, guint block_num
,
197 uint64_t *offset
, uint32_t *size
)
201 if (unlikely(block_num
>= tf
->num_blocks
))
204 offa
= g_array_index(tf
->buf_index
, uint64_t, block_num
);
205 if (likely(block_num
< tf
->num_blocks
- 1))
206 offb
= g_array_index(tf
->buf_index
, uint64_t, block_num
+ 1);
208 offb
= tf
->file_size
;
214 int ltt_trace_create_block_index(LttTracefile
*tf
)
216 int page_size
= getpagesize();
219 unsigned int header_map_size
= PAGE_ALIGN(ltt_subbuffer_header_size());
221 tf
->buf_index
= g_array_sized_new(FALSE
, TRUE
, sizeof(uint64_t),
224 g_assert(tf
->buf_index
->len
== i
);
226 while (offset
< tf
->file_size
) {
227 ltt_subbuffer_header_t
*header
;
230 tf
->buf_index
= g_array_set_size(tf
->buf_index
, i
+ 1);
231 off
= &g_array_index(tf
->buf_index
, uint64_t, i
);
234 /* map block header */
235 header
= mmap(0, header_map_size
, PROT_READ
,
236 MAP_PRIVATE
, tf
->fd
, (off_t
)offset
);
237 if(header
== MAP_FAILED
) {
238 perror("Error in allocating memory for buffer of tracefile");
242 /* read len, offset += len */
243 offset
+= ltt_get_uint32(LTT_GET_BO(tf
), &header
->sb_size
);
245 /* unmap block header */
246 if(munmap(header
, header_map_size
)) {
247 g_warning("unmap size : %u\n", header_map_size
);
248 perror("munmap error");
258 /*****************************************************************************
260 * ltt_tracefile_open : open a trace file, construct a LttTracefile
262 * t : the trace containing the tracefile
263 * fileName : path name of the trace file
264 * tf : the tracefile structure
266 * : 0 for success, -1 otherwise.
267 ****************************************************************************/
269 static gint
ltt_tracefile_open(LttTrace
*t
, gchar
* fileName
, LttTracefile
*tf
)
271 struct stat lTDFStat
; /* Trace data file status */
272 ltt_subbuffer_header_t
*header
;
273 int page_size
= getpagesize();
276 tf
->long_name
= g_quark_from_string(fileName
);
278 tf
->fd
= open(fileName
, O_RDONLY
);
279 tf
->buf_index
= NULL
;
281 g_warning("Unable to open input data file %s\n", fileName
);
285 // Get the file's status
286 if(fstat(tf
->fd
, &lTDFStat
) < 0){
287 g_warning("Unable to get the status of the input data file %s\n", fileName
);
291 // Is the file large enough to contain a trace
292 if(lTDFStat
.st_size
<
293 (off_t
)(ltt_subbuffer_header_size())){
294 g_print("The input data file %s does not contain a trace\n", fileName
);
298 /* Temporarily map the buffer start header to get trace information */
299 /* Multiple of pages aligned head */
300 tf
->buffer
.head
= mmap(0,
301 PAGE_ALIGN(ltt_subbuffer_header_size()), PROT_READ
,
302 MAP_PRIVATE
, tf
->fd
, 0);
303 if(tf
->buffer
.head
== MAP_FAILED
) {
304 perror("Error in allocating memory for buffer of tracefile");
307 g_assert( ( (gulong
)tf
->buffer
.head
&(8-1) ) == 0); // make sure it's aligned.
309 header
= (ltt_subbuffer_header_t
*)tf
->buffer
.head
;
311 if(parse_trace_header(header
, tf
, NULL
)) {
312 g_warning("parse_trace_header error");
316 //store the size of the file
317 tf
->file_size
= lTDFStat
.st_size
;
319 tf
->subbuf_corrupt
= 0;
321 if(munmap(tf
->buffer
.head
,
322 PAGE_ALIGN(ltt_subbuffer_header_size()))) {
323 g_warning("unmap size : %zu\n",
324 PAGE_ALIGN(ltt_subbuffer_header_size()));
325 perror("munmap error");
328 tf
->buffer
.head
= NULL
;
330 /* Create block index */
331 ltt_trace_create_block_index(tf
);
333 //read the first block
334 if(map_block(tf
,0)) {
335 perror("Cannot map block for tracefile");
339 /* Create fields offset table */
340 tf
->event
.fields_offsets
= g_array_sized_new(FALSE
, FALSE
,
341 sizeof(struct LttField
), 1);
342 if (!tf
->event
.fields_offsets
)
349 if(munmap(tf
->buffer
.head
,
350 PAGE_ALIGN(ltt_subbuffer_header_size()))) {
351 g_warning("unmap size : %zu\n",
352 PAGE_ALIGN(ltt_subbuffer_header_size()));
353 perror("munmap error");
360 g_array_free(tf
->buf_index
, TRUE
);
365 /*****************************************************************************
367 * ltt_tracefile_close: close a trace file,
369 * t : tracefile which will be closed
370 ****************************************************************************/
372 static void ltt_tracefile_close(LttTracefile
*t
)
374 int page_size
= getpagesize();
376 if(t
->buffer
.head
!= NULL
)
377 if(munmap(t
->buffer
.head
, PAGE_ALIGN(t
->buffer
.size
))) {
378 g_warning("unmap size : %u\n",
379 PAGE_ALIGN(t
->buffer
.size
));
380 perror("munmap error");
386 g_array_free(t
->buf_index
, TRUE
);
387 g_array_free(t
->event
.fields_offsets
, TRUE
);
390 /****************************************************************************
391 * get_absolute_pathname
393 * return the unique pathname in the system
395 * MD : Fixed this function so it uses realpath, dealing well with
396 * forgotten cases (.. were not used correctly before).
398 ****************************************************************************/
399 void get_absolute_pathname(const gchar
*pathname
, gchar
* abs_pathname
)
401 abs_pathname
[0] = '\0';
403 if (realpath(pathname
, abs_pathname
) != NULL
)
407 /* error, return the original path unmodified */
408 strcpy(abs_pathname
, pathname
);
414 /* Search for something like : .*_.*
416 * The left side is the name, the right side is the number.
418 * Exclude flight- prefix.
421 static int get_tracefile_name_number(gchar
*raw_name
,
428 guint raw_name_len
= strlen(raw_name
);
429 gchar char_name
[PATH_MAX
];
437 for(i
= 0; i
< raw_name_len
-1;i
++) {
438 if(raw_name
[i
] != '/')
441 raw_name
= &raw_name
[i
];
442 raw_name_len
= strlen(raw_name
);
444 for(i
=raw_name_len
-1;i
>=0;i
--) {
445 if(raw_name
[i
] == '_') break;
447 if(i
==-1) { /* Either not found or name length is 0 */
448 /* This is a userspace tracefile */
449 strncpy(char_name
, raw_name
, raw_name_len
);
450 char_name
[raw_name_len
] = '\0';
451 *name
= g_quark_from_string(char_name
);
452 *num
= 0; /* unknown cpu */
453 for(i
=0;i
<raw_name_len
;i
++) {
454 if(raw_name
[i
] == '/') {
459 for(;i
<raw_name_len
;i
++) {
460 if(raw_name
[i
] == '/') {
465 for(;i
<raw_name_len
;i
++) {
466 if(raw_name
[i
] == '-') {
470 if(i
== raw_name_len
) return -1;
472 tmpptr
= &raw_name
[i
];
473 for(;i
<raw_name_len
;i
++) {
474 if(raw_name
[i
] == '.') {
479 *tid
= strtoul(tmpptr
, &endptr
, 10);
481 return -1; /* No digit */
482 if(*tid
== ULONG_MAX
)
483 return -1; /* underflow / overflow */
485 tmpptr
= &raw_name
[i
];
486 for(;i
<raw_name_len
;i
++) {
487 if(raw_name
[i
] == '.') {
492 *pgid
= strtoul(tmpptr
, &endptr
, 10);
494 return -1; /* No digit */
495 if(*pgid
== ULONG_MAX
)
496 return -1; /* underflow / overflow */
498 tmpptr
= &raw_name
[i
];
499 *creation
= strtoull(tmpptr
, &endptr
, 10);
501 return -1; /* No digit */
502 if(*creation
== G_MAXUINT64
)
503 return -1; /* underflow / overflow */
507 cpu_num
= strtol(raw_name
+underscore_pos
+1, &endptr
, 10);
509 if(endptr
== raw_name
+underscore_pos
+1)
510 return -1; /* No digit */
511 if(cpu_num
== LONG_MIN
|| cpu_num
== LONG_MAX
)
512 return -1; /* underflow / overflow */
514 if (!strncmp(raw_name
, "flight-", sizeof("flight-") - 1)) {
515 raw_name
+= sizeof("flight-") - 1;
516 underscore_pos
-= sizeof("flight-") - 1;
518 strncpy(char_name
, raw_name
, underscore_pos
);
519 char_name
[underscore_pos
] = '\0';
520 *name
= g_quark_from_string(char_name
);
529 GData
**ltt_trace_get_tracefiles_groups(LttTrace
*trace
)
531 return &trace
->tracefiles
;
535 void compute_tracefile_group(GQuark key_id
,
537 struct compute_tracefile_group_args
*args
)
542 for(i
=0; i
<group
->len
; i
++) {
543 tf
= &g_array_index (group
, LttTracefile
, i
);
545 args
->func(tf
, args
->func_args
);
550 static void ltt_tracefile_group_destroy(gpointer data
)
552 GArray
*group
= (GArray
*)data
;
557 destroy_marker_data(g_array_index (group
, LttTracefile
, 0).mdata
);
558 for(i
=0; i
<group
->len
; i
++) {
559 tf
= &g_array_index (group
, LttTracefile
, i
);
561 ltt_tracefile_close(tf
);
563 g_array_free(group
, TRUE
);
566 static __attribute__ ((__unused__
)) gboolean
ltt_tracefile_group_has_cpu_online(gpointer data
)
568 GArray
*group
= (GArray
*)data
;
572 for(i
=0; i
<group
->len
; i
++) {
573 tf
= &g_array_index (group
, LttTracefile
, i
);
581 /* Open each tracefile under a specific directory. Put them in a
582 * GData : permits to access them using their tracefile group pathname.
583 * i.e. access control/modules tracefile group by index :
586 * relative path is the path relative to the trace root
587 * root path is the full path
589 * A tracefile group is simply an array where all the per cpu tracefiles sit.
592 static int open_tracefiles(LttTrace
*trace
, gchar
*root_path
, gchar
*relative_path
)
594 DIR *dir
= opendir(root_path
);
595 struct dirent
*entry
;
596 struct stat stat_buf
;
598 struct marker_data
*mdata
;
600 gchar path
[PATH_MAX
];
605 gchar rel_path
[PATH_MAX
];
614 strncpy(path
, root_path
, PATH_MAX
-1);
615 path_len
= strlen(path
);
616 path
[path_len
] = '/';
618 path_ptr
= path
+ path_len
;
620 strncpy(rel_path
, relative_path
, PATH_MAX
-1);
621 rel_path_len
= strlen(rel_path
);
622 rel_path
[rel_path_len
] = '/';
624 rel_path_ptr
= rel_path
+ rel_path_len
;
626 while((entry
= readdir(dir
)) != NULL
) {
628 if(entry
->d_name
[0] == '.') continue;
630 strncpy(path_ptr
, entry
->d_name
, PATH_MAX
- path_len
);
631 strncpy(rel_path_ptr
, entry
->d_name
, PATH_MAX
- rel_path_len
);
633 ret
= stat(path
, &stat_buf
);
639 g_debug("Tracefile file or directory : %s\n", path
);
641 // if(strcmp(rel_path, "/eventdefs") == 0) continue;
643 if(S_ISDIR(stat_buf
.st_mode
)) {
645 g_debug("Entering subdirectory...\n");
646 ret
= open_tracefiles(trace
, path
, rel_path
);
647 if(ret
< 0) continue;
648 } else if(S_ISREG(stat_buf
.st_mode
)) {
657 if(get_tracefile_name_number(rel_path
, &name
, &num
, &tid
, &pgid
, &creation
))
658 continue; /* invalid name */
660 g_debug("Opening file.\n");
661 if(ltt_tracefile_open(trace
, path
, &tmp_tf
)) {
662 g_info("Error opening tracefile %s", path
);
664 continue; /* error opening the tracefile : bad magic number ? */
667 g_debug("Tracefile name is %s and number is %u",
668 g_quark_to_string(name
), num
);
671 tmp_tf
.cpu_online
= 1;
672 tmp_tf
.cpu_num
= num
;
676 tmp_tf
.creation
= creation
;
677 group
= g_datalist_id_get_data(&trace
->tracefiles
, name
);
679 /* Elements are automatically cleared when the array is allocated.
680 * It makes the cpu_online variable set to 0 : cpu offline, by default.
682 group
= g_array_sized_new (FALSE
, TRUE
, sizeof(LttTracefile
), 10);
683 g_datalist_id_set_data_full(&trace
->tracefiles
, name
,
684 group
, ltt_tracefile_group_destroy
);
685 mdata
= allocate_marker_data();
687 g_error("Error in allocating marker data");
690 /* Add the per cpu tracefile to the named group */
691 unsigned int old_len
= group
->len
;
693 group
= g_array_set_size(group
, num
+1);
695 g_assert(group
->len
> 0);
697 mdata
= g_array_index (group
, LttTracefile
, 0).mdata
;
699 g_array_index (group
, LttTracefile
, num
) = tmp_tf
;
700 g_array_index (group
, LttTracefile
, num
).event
.tracefile
=
701 &g_array_index (group
, LttTracefile
, num
);
702 for (i
= 0; i
< group
->len
; i
++)
703 g_array_index (group
, LttTracefile
, i
).mdata
= mdata
;
713 /* Presumes the tracefile is already seeked at the beginning. It makes sense,
714 * because it must be done just after the opening */
715 static int ltt_process_metadata_tracefile(LttTracefile
*tf
)
720 err
= ltt_tracefile_read_seek(tf
);
721 if(err
== EPERM
) goto seek_error
;
722 else if(err
== ERANGE
) break; /* End of tracefile */
724 err
= ltt_tracefile_read_update_event(tf
);
725 if(err
) goto update_error
;
728 * It contains only core events :
730 * 1 : set_marker_format
732 if(tf
->event
.event_id
>= MARKER_CORE_IDS
) {
733 /* Should only contain core events */
734 g_warning("Error in processing metadata file %s, "
735 "should not contain event id %u.", g_quark_to_string(tf
->name
),
741 const char *channel_name
, *marker_name
, *format
;
743 guint8 int_size
, long_size
, pointer_size
, size_t_size
, alignment
;
745 switch((enum marker_id
)tf
->event
.event_id
) {
746 case MARKER_ID_SET_MARKER_ID
:
747 channel_name
= pos
= tf
->event
.data
;
748 pos
+= strlen(channel_name
) + 1;
750 g_debug("Doing MARKER_ID_SET_MARKER_ID of marker %s.%s",
751 channel_name
, marker_name
);
752 pos
+= strlen(marker_name
) + 1;
753 pos
+= ltt_align((size_t)pos
, sizeof(guint16
), tf
->alignment
);
754 id
= ltt_get_uint16(LTT_GET_BO(tf
), pos
);
755 g_debug("In MARKER_ID_SET_MARKER_ID of marker %s.%s id %hu",
756 channel_name
, marker_name
, id
);
757 pos
+= sizeof(guint16
);
758 int_size
= *(guint8
*)pos
;
759 pos
+= sizeof(guint8
);
760 long_size
= *(guint8
*)pos
;
761 pos
+= sizeof(guint8
);
762 pointer_size
= *(guint8
*)pos
;
763 pos
+= sizeof(guint8
);
764 size_t_size
= *(guint8
*)pos
;
765 pos
+= sizeof(guint8
);
766 alignment
= *(guint8
*)pos
;
767 pos
+= sizeof(guint8
);
768 marker_id_event(tf
->trace
,
769 g_quark_from_string(channel_name
),
770 g_quark_from_string(marker_name
),
771 id
, int_size
, long_size
,
772 pointer_size
, size_t_size
, alignment
);
774 case MARKER_ID_SET_MARKER_FORMAT
:
775 channel_name
= pos
= tf
->event
.data
;
776 pos
+= strlen(channel_name
) + 1;
778 g_debug("Doing MARKER_ID_SET_MARKER_FORMAT of marker %s.%s",
779 channel_name
, marker_name
);
780 pos
+= strlen(marker_name
) + 1;
782 pos
+= strlen(format
) + 1;
783 marker_format_event(tf
->trace
,
784 g_quark_from_string(channel_name
),
785 g_quark_from_string(marker_name
),
787 /* get information from dictionary TODO */
790 g_warning("Error in processing metadata file %s, "
791 "unknown event id %hhu.",
792 g_quark_to_string(tf
->name
),
805 g_warning("An error occured in metadata tracefile parsing");
810 * Open a trace and return its LttTrace handle.
812 * pathname must be the directory of the trace
815 LttTrace
*ltt_trace_open(const gchar
*pathname
)
817 gchar abs_path
[PATH_MAX
];
823 ltt_subbuffer_header_t
*header
;
825 struct dirent
*entry
;
826 struct stat stat_buf
;
827 gchar path
[PATH_MAX
];
829 t
= g_new(LttTrace
, 1);
830 if(!t
) goto alloc_error
;
832 get_absolute_pathname(pathname
, abs_path
);
833 t
->pathname
= g_quark_from_string(abs_path
);
835 g_datalist_init(&t
->tracefiles
);
837 /* Test to see if it looks like a trace */
838 dir
= opendir(abs_path
);
843 while((entry
= readdir(dir
)) != NULL
) {
844 strcpy(path
, abs_path
);
846 strcat(path
, entry
->d_name
);
847 ret
= stat(path
, &stat_buf
);
855 /* Open all the tracefiles */
857 if(open_tracefiles(t
, abs_path
, "")) {
858 g_warning("Error opening tracefile %s", abs_path
);
862 /* Parse each trace metadata_N files : get runtime fac. info */
863 group
= g_datalist_id_get_data(&t
->tracefiles
, LTT_TRACEFILE_NAME_METADATA
);
865 g_warning("Trace %s has no metadata tracefile", abs_path
);
870 * Get the trace information for the metadata_0 tracefile.
871 * Getting a correct trace start_time and start_tsc is insured by the fact
872 * that no subbuffers are supposed to be lost in the metadata channel.
873 * Therefore, the first subbuffer contains the start_tsc timestamp in its
876 g_assert(group
->len
> 0);
877 tf
= &g_array_index (group
, LttTracefile
, 0);
878 header
= (ltt_subbuffer_header_t
*)tf
->buffer
.head
;
879 ret
= parse_trace_header(header
, tf
, t
);
882 t
->num_cpu
= group
->len
;
886 //ret = allocate_marker_data(t);
888 // g_error("Error in allocating marker data");
890 for(i
=0; i
<group
->len
; i
++) {
891 tf
= &g_array_index (group
, LttTracefile
, i
);
893 if(ltt_process_metadata_tracefile(tf
))
895 // goto metadata_error;
902 // destroy_marker_data(t);
904 g_datalist_clear(&t
->tracefiles
);
912 /* Open another, completely independant, instance of a trace.
914 * A read on this new instance will read the first event of the trace.
916 * When we copy a trace, we want all the opening actions to happen again :
917 * the trace will be reopened and totally independant from the original.
918 * That's why we call ltt_trace_open.
920 LttTrace
*ltt_trace_copy(LttTrace
*self
)
922 return ltt_trace_open(g_quark_to_string(self
->pathname
));
929 void ltt_trace_close(LttTrace
*t
)
931 g_datalist_clear(&t
->tracefiles
);
936 /*****************************************************************************
937 * Get the start time and end time of the trace
938 ****************************************************************************/
940 void ltt_tracefile_time_span_get(LttTracefile
*tf
,
941 LttTime
*start
, LttTime
*end
)
945 err
= map_block(tf
, 0);
947 g_error("Can not map block");
948 *start
= ltt_time_infinite
;
950 *start
= tf
->buffer
.begin
.timestamp
;
952 err
= map_block(tf
, tf
->num_blocks
- 1); /* Last block */
954 g_error("Can not map block");
955 *end
= ltt_time_zero
;
957 *end
= tf
->buffer
.end
.timestamp
;
959 g_assert(end
->tv_sec
<= G_MAXUINT
);
962 struct tracefile_time_span_get_args
{
968 static void group_time_span_get(GQuark name
, gpointer data
, gpointer user_data
)
970 struct tracefile_time_span_get_args
*args
=
971 (struct tracefile_time_span_get_args
*)user_data
;
973 GArray
*group
= (GArray
*)data
;
979 for(i
=0; i
<group
->len
; i
++) {
980 tf
= &g_array_index (group
, LttTracefile
, i
);
982 ltt_tracefile_time_span_get(tf
, &tmp_start
, &tmp_end
);
983 if(ltt_time_compare(*args
->start
, tmp_start
)>0) *args
->start
= tmp_start
;
984 if(ltt_time_compare(*args
->end
, tmp_end
)<0) *args
->end
= tmp_end
;
989 /* return the start and end time of a trace */
991 void ltt_trace_time_span_get(LttTrace
*t
, LttTime
*start
, LttTime
*end
)
993 LttTime min_start
= ltt_time_infinite
;
994 LttTime max_end
= ltt_time_zero
;
995 struct tracefile_time_span_get_args args
= { t
, &min_start
, &max_end
};
997 g_datalist_foreach(&t
->tracefiles
, &group_time_span_get
, &args
);
999 if(start
!= NULL
) *start
= min_start
;
1000 if(end
!= NULL
) *end
= max_end
;
1005 /* Seek to the first event in a tracefile that has a time equal or greater than
1006 * the time passed in parameter.
1008 * If the time parameter is outside the tracefile time span, seek to the first
1009 * event or if after, return ERANGE.
1011 * If the time parameter is before the first event, we have to seek specially to
1014 * If the time is after the end of the trace, return ERANGE.
1016 * Do a binary search to find the right block, then a sequential search in the
1017 * block to find the event.
1019 * In the special case where the time requested fits inside a block that has no
1020 * event corresponding to the requested time, the first event of the next block
1023 * IMPORTANT NOTE : // FIXME everywhere...
1025 * You MUST NOT do a ltt_tracefile_read right after a ltt_tracefile_seek_time :
1026 * you will jump over an event if you do.
1028 * Return value : 0 : no error, the tf->event can be used
1029 * ERANGE : time if after the last event of the trace
1030 * otherwise : this is an error.
1034 int ltt_tracefile_seek_time(LttTracefile
*tf
, LttTime time
)
1038 unsigned int block_num
, high
, low
;
1040 /* seek at the beginning of trace */
1041 err
= map_block(tf
, 0); /* First block */
1043 g_error("Can not map block");
1047 /* If the time is lower or equal the beginning of the trace,
1048 * go to the first event. */
1049 if(ltt_time_compare(time
, tf
->buffer
.begin
.timestamp
) <= 0) {
1050 ret
= ltt_tracefile_read(tf
);
1051 if(ret
== ERANGE
) goto range
;
1052 else if (ret
) goto fail
;
1053 goto found
; /* There is either no event in the trace or the event points
1054 to the first event in the trace */
1057 err
= map_block(tf
, tf
->num_blocks
- 1); /* Last block */
1059 g_error("Can not map block");
1063 /* If the time is after the end of the trace, return ERANGE. */
1064 if(ltt_time_compare(time
, tf
->buffer
.end
.timestamp
) > 0) {
1068 /* Binary search the block */
1069 high
= tf
->num_blocks
- 1;
1073 block_num
= ((high
-low
) / 2) + low
;
1075 err
= map_block(tf
, block_num
);
1077 g_error("Can not map block");
1081 /* We cannot divide anymore : this is what would happen if the time
1082 * requested was exactly between two consecutive buffers'end and start
1083 * timestamps. This is also what would happend if we didn't deal with out
1084 * of span cases prior in this function. */
1085 /* The event is right in the buffer!
1086 * (or in the next buffer first event) */
1088 ret
= ltt_tracefile_read(tf
);
1089 if(ret
== ERANGE
) goto range
; /* ERANGE or EPERM */
1090 else if(ret
) goto fail
;
1092 if(ltt_time_compare(time
, tf
->event
.event_time
) <= 0)
1096 } else if(ltt_time_compare(time
, tf
->buffer
.begin
.timestamp
) < 0) {
1097 /* go to lower part */
1098 high
= block_num
- 1;
1099 } else if(ltt_time_compare(time
, tf
->buffer
.end
.timestamp
) > 0) {
1100 /* go to higher part */
1101 low
= block_num
+ 1;
1102 } else {/* The event is right in the buffer!
1103 (or in the next buffer first event) */
1105 ret
= ltt_tracefile_read(tf
);
1106 if(ret
== ERANGE
) goto range
; /* ERANGE or EPERM */
1107 else if(ret
) goto fail
;
1109 if(ltt_time_compare(time
, tf
->event
.event_time
) <= 0)
1121 /* Error handling */
1123 g_error("ltt_tracefile_seek_time failed on tracefile %s",
1124 g_quark_to_string(tf
->name
));
1128 /* Seek to a position indicated by an LttEventPosition
1131 int ltt_tracefile_seek_position(LttTracefile
*tf
, const LttEventPosition
*ep
)
1135 if(ep
->tracefile
!= tf
) {
1139 err
= map_block(tf
, ep
->block
);
1141 g_error("Can not map block");
1145 tf
->event
.offset
= ep
->offset
;
1147 /* Put back the event real tsc */
1148 tf
->event
.tsc
= ep
->tsc
;
1149 tf
->buffer
.tsc
= ep
->tsc
;
1151 err
= ltt_tracefile_read_update_event(tf
);
1154 /* deactivate this, as it does nothing for now
1155 err = ltt_tracefile_read_op(tf);
1162 g_error("ltt_tracefile_seek_time failed on tracefile %s",
1163 g_quark_to_string(tf
->name
));
1168 * Convert a value in "TSC scale" to a value in nanoseconds
1170 guint64
tsc_to_uint64(guint32 freq_scale
, uint64_t start_freq
, guint64 tsc
)
1172 return (double) tsc
* NANOSECONDS_PER_SECOND
* freq_scale
/ start_freq
;
1175 /* Given a TSC value, return the LttTime (seconds,nanoseconds) it
1178 LttTime
ltt_interpolate_time_from_tsc(LttTracefile
*tf
, guint64 tsc
)
1180 return ltt_time_from_uint64(tsc_to_uint64(tf
->trace
->freq_scale
,
1181 tf
->trace
->start_freq
, tf
->trace
->drift
* tsc
+
1182 tf
->trace
->offset
));
1185 /* Calculate the real event time based on the buffer boundaries */
1186 LttTime
ltt_interpolate_time(LttTracefile
*tf
, LttEvent
*event
)
1188 return ltt_interpolate_time_from_tsc(tf
, tf
->buffer
.tsc
);
1192 /* Get the current event of the tracefile : valid until the next read */
1193 LttEvent
*ltt_tracefile_get_event(LttTracefile
*tf
)
1200 /*****************************************************************************
1202 * ltt_tracefile_read : Read the next event in the tracefile
1207 * Returns 0 if an event can be used in tf->event.
1208 * Returns ERANGE on end of trace. The event in tf->event still can be used
1209 * (if the last block was not empty).
1210 * Returns EPERM on error.
1212 * This function does make the tracefile event structure point to the event
1213 * currently pointed to by the tf->event.
1215 * Note : you must call a ltt_tracefile_seek to the beginning of the trace to
1216 * reinitialize it after an error if you want results to be coherent.
1217 * It would be the case if a end of trace last buffer has no event : the end
1218 * of trace wouldn't be returned, but an error.
1219 * We make the assumption there is at least one event per buffer.
1220 ****************************************************************************/
1222 int ltt_tracefile_read(LttTracefile
*tf
)
1226 err
= ltt_tracefile_read_seek(tf
);
1228 err
= ltt_tracefile_read_update_event(tf
);
1231 /* deactivate this, as it does nothing for now
1232 err = ltt_tracefile_read_op(tf);
1239 int ltt_tracefile_read_seek(LttTracefile
*tf
)
1243 /* Get next buffer until we finally have an event, or end of trace */
1245 err
= ltt_seek_next_event(tf
);
1246 if(unlikely(err
== ENOPROTOOPT
)) {
1250 /* Are we at the end of the buffer ? */
1252 if(unlikely(tf
->buffer
.index
== tf
->num_blocks
-1)){ /* end of trace ? */
1255 /* get next block */
1256 err
= map_block(tf
, tf
->buffer
.index
+ 1);
1258 g_error("Can not map block");
1262 } else break; /* We found an event ! */
1268 /* do an operation when reading a new event */
1270 /* This function does nothing for now */
1272 int ltt_tracefile_read_op(LttTracefile
*tf
)
1278 /* do event specific operation */
1286 static void print_debug_event_header(LttEvent
*ev
, void *start_pos
, void *end_pos
)
1288 unsigned int offset
= 0;
1291 g_printf("Event header (tracefile %s offset %" PRIx64
"):\n",
1292 g_quark_to_string(ev
->tracefile
->long_name
),
1293 (uint64_t)ev
->tracefile
->buffer
.offset
+
1294 (long)start_pos
- (long)ev
->tracefile
->buffer
.head
);
1296 while (offset
< (long)end_pos
- (long)start_pos
) {
1297 g_printf("%8lx", (long)start_pos
- (long)ev
->tracefile
->buffer
.head
+ offset
);
1300 for (i
= 0; i
< 4 ; i
++) {
1301 for (j
= 0; j
< 4; j
++) {
1302 if (offset
+ ((i
* 4) + j
) <
1303 (long)end_pos
- (long)start_pos
)
1305 ((char*)start_pos
)[offset
+ ((i
* 4) + j
)]);
1319 /* same as ltt_tracefile_read, but does not seek to the next event nor call
1320 * event specific operation. */
1321 int ltt_tracefile_read_update_event(LttTracefile
*tf
)
1326 guint16 packed_evid
; /* event id reader from the 5 bits in header */
1329 pos
= tf
->buffer
.head
+ event
->offset
;
1331 /* Read event header */
1333 /* Align the head */
1334 pos
+= ltt_align((size_t)pos
, sizeof(guint32
), tf
->alignment
);
1337 event
->timestamp
= ltt_get_uint32(LTT_GET_BO(tf
), pos
);
1338 event
->event_id
= packed_evid
= event
->timestamp
>> tf
->tscbits
;
1339 event
->timestamp
= event
->timestamp
& tf
->tsc_mask
;
1340 pos
+= sizeof(guint32
);
1342 switch (packed_evid
) {
1343 case 29: /* LTT_RFLAG_ID_SIZE_TSC */
1344 event
->event_id
= ltt_get_uint16(LTT_GET_BO(tf
), pos
);
1345 pos
+= sizeof(guint16
);
1346 event
->event_size
= ltt_get_uint16(LTT_GET_BO(tf
), pos
);
1347 pos
+= sizeof(guint16
);
1348 if (event
->event_size
== 0xFFFF) {
1349 event
->event_size
= ltt_get_uint32(LTT_GET_BO(tf
), pos
);
1350 pos
+= sizeof(guint32
);
1352 pos
+= ltt_align((size_t)pos
, sizeof(guint64
), tf
->alignment
);
1353 tf
->buffer
.tsc
= ltt_get_uint64(LTT_GET_BO(tf
), pos
);
1354 pos
+= sizeof(guint64
);
1356 case 30: /* LTT_RFLAG_ID_SIZE */
1357 event
->event_id
= ltt_get_uint16(LTT_GET_BO(tf
), pos
);
1358 pos
+= sizeof(guint16
);
1359 event
->event_size
= ltt_get_uint16(LTT_GET_BO(tf
), pos
);
1360 pos
+= sizeof(guint16
);
1361 if (event
->event_size
== 0xFFFF) {
1362 event
->event_size
= ltt_get_uint32(LTT_GET_BO(tf
), pos
);
1363 pos
+= sizeof(guint32
);
1366 case 31: /* LTT_RFLAG_ID */
1367 event
->event_id
= ltt_get_uint16(LTT_GET_BO(tf
), pos
);
1368 pos
+= sizeof(guint16
);
1369 event
->event_size
= G_MAXUINT
;
1372 event
->event_size
= G_MAXUINT
;
1376 if (likely(packed_evid
!= 29)) {
1377 /* No extended timestamp */
1378 if (event
->timestamp
< (tf
->buffer
.tsc
& tf
->tsc_mask
))
1379 tf
->buffer
.tsc
= ((tf
->buffer
.tsc
& ~tf
->tsc_mask
) /* overflow */
1380 + tf
->tsc_mask_next_bit
)
1381 | (guint64
)event
->timestamp
;
1383 tf
->buffer
.tsc
= (tf
->buffer
.tsc
& ~tf
->tsc_mask
) /* no overflow */
1384 | (guint64
)event
->timestamp
;
1386 event
->tsc
= tf
->buffer
.tsc
;
1388 event
->event_time
= ltt_interpolate_time(tf
, event
);
1391 print_debug_event_header(event
, pos_aligned
, pos
);
1396 * Let ltt_update_event_size update event->data according to the largest
1397 * alignment within the payload.
1398 * Get the data size and update the event fields with the current
1400 ltt_update_event_size(tf
);
1406 /****************************************************************************
1408 * map_block : map a block from the file
1410 * lttdes : ltt trace file
1411 * whichBlock : the block which will be read
1414 * EINVAL : lseek fail
1415 * EIO : can not read from the file
1416 ****************************************************************************/
1418 static gint
map_block(LttTracefile
* tf
, guint block_num
)
1420 int page_size
= getpagesize();
1421 ltt_subbuffer_header_t
*header
;
1426 g_assert(block_num
< tf
->num_blocks
);
1428 if(tf
->buffer
.head
!= NULL
) {
1429 if(munmap(tf
->buffer
.head
, PAGE_ALIGN(tf
->buffer
.size
))) {
1430 g_warning("unmap size : %u\n",
1431 PAGE_ALIGN(tf
->buffer
.size
));
1432 perror("munmap error");
1437 ret
= get_block_offset_size(tf
, block_num
, &offset
, &size
);
1440 g_debug("Map block %u, offset %llu, size %u\n", block_num
,
1441 (unsigned long long)offset
, (unsigned int)size
);
1443 /* Multiple of pages aligned head */
1444 tf
->buffer
.head
= mmap(0, (size_t)size
, PROT_READ
, MAP_PRIVATE
,
1445 tf
->fd
, (off_t
)offset
);
1447 if(tf
->buffer
.head
== MAP_FAILED
) {
1448 perror("Error in allocating memory for buffer of tracefile");
1452 g_assert( ( (gulong
)tf
->buffer
.head
&(8-1) ) == 0); // make sure it's aligned.
1454 tf
->buffer
.index
= block_num
;
1456 header
= (ltt_subbuffer_header_t
*)tf
->buffer
.head
;
1458 tf
->buffer
.begin
.cycle_count
= ltt_get_uint64(LTT_GET_BO(tf
),
1459 &header
->cycle_count_begin
);
1460 tf
->buffer
.end
.cycle_count
= ltt_get_uint64(LTT_GET_BO(tf
),
1461 &header
->cycle_count_end
);
1462 tf
->buffer
.offset
= offset
;
1463 tf
->buffer
.size
= ltt_get_uint32(LTT_GET_BO(tf
),
1465 tf
->buffer
.data_size
= ltt_get_uint32(LTT_GET_BO(tf
),
1466 &header
->data_size
);
1467 tf
->buffer
.tsc
= tf
->buffer
.begin
.cycle_count
;
1468 tf
->event
.tsc
= tf
->buffer
.tsc
;
1469 tf
->buffer
.freq
= tf
->buffer
.begin
.freq
;
1471 g_assert(size
== tf
->buffer
.size
);
1472 g_assert(tf
->buffer
.data_size
<= tf
->buffer
.size
);
1474 if (tf
->trace
->start_freq
)
1476 tf
->buffer
.begin
.freq
= tf
->trace
->start_freq
;
1477 tf
->buffer
.begin
.timestamp
= ltt_interpolate_time_from_tsc(tf
,
1478 tf
->buffer
.begin
.cycle_count
);
1479 tf
->buffer
.end
.freq
= tf
->trace
->start_freq
;
1480 tf
->buffer
.end
.timestamp
= ltt_interpolate_time_from_tsc(tf
,
1481 tf
->buffer
.end
.cycle_count
);
1484 /* Make the current event point to the beginning of the buffer :
1485 * it means that the event read must get the first event. */
1486 tf
->event
.tracefile
= tf
;
1487 tf
->event
.block
= block_num
;
1488 tf
->event
.offset
= 0;
1490 if (header
->events_lost
) {
1491 g_warning("%d events lost so far in tracefile %s at block %u",
1492 (guint
)header
->events_lost
,
1493 g_quark_to_string(tf
->long_name
),
1495 tf
->events_lost
= header
->events_lost
;
1497 if (header
->subbuf_corrupt
) {
1498 g_warning("%d subbuffer(s) corrupted so far in tracefile %s at block %u",
1499 (guint
)header
->subbuf_corrupt
,
1500 g_quark_to_string(tf
->long_name
),
1502 tf
->subbuf_corrupt
= header
->subbuf_corrupt
;
1511 static void print_debug_event_data(LttEvent
*ev
)
1513 unsigned int offset
= 0;
1516 if (!max(ev
->event_size
, ev
->data_size
))
1519 g_printf("Event data (tracefile %s offset %" PRIx64
"):\n",
1520 g_quark_to_string(ev
->tracefile
->long_name
),
1521 (uint64_t)ev
->tracefile
->buffer
.offset
1522 + (long)ev
->data
- (long)ev
->tracefile
->buffer
.head
);
1524 while (offset
< max(ev
->event_size
, ev
->data_size
)) {
1525 g_printf("%8lx", (long)ev
->data
+ offset
1526 - (long)ev
->tracefile
->buffer
.head
);
1529 for (i
= 0; i
< 4 ; i
++) {
1530 for (j
= 0; j
< 4; j
++) {
1531 if (offset
+ ((i
* 4) + j
) < max(ev
->event_size
, ev
->data_size
))
1532 g_printf("%02hhX", ((char*)ev
->data
)[offset
+ ((i
* 4) + j
)]);
1543 for (i
= 0; i
< 4; i
++) {
1544 for (j
= 0; j
< 4; j
++) {
1545 if (offset
+ ((i
* 4) + j
) < max(ev
->event_size
, ev
->data_size
)) {
1546 if (isprint(((char*)ev
->data
)[offset
+ ((i
* 4) + j
)]))
1547 g_printf("%c", ((char*)ev
->data
)[offset
+ ((i
* 4) + j
)]);
1559 /* It will update the fields offsets too */
1560 void ltt_update_event_size(LttTracefile
*tf
)
1563 struct marker_info
*info
;
1565 if (tf
->name
== LTT_TRACEFILE_NAME_METADATA
) {
1566 switch((enum marker_id
)tf
->event
.event_id
) {
1567 case MARKER_ID_SET_MARKER_ID
:
1568 size
= strlen((char*)tf
->event
.data
) + 1;
1569 g_debug("marker %s id set", (char*)tf
->event
.data
+ size
);
1570 size
+= strlen((char*)tf
->event
.data
+ size
) + 1;
1571 size
+= ltt_align(size
, sizeof(guint16
), tf
->alignment
);
1572 size
+= sizeof(guint16
);
1573 size
+= sizeof(guint8
);
1574 size
+= sizeof(guint8
);
1575 size
+= sizeof(guint8
);
1576 size
+= sizeof(guint8
);
1577 size
+= sizeof(guint8
);
1579 case MARKER_ID_SET_MARKER_FORMAT
:
1580 size
= strlen((char*)tf
->event
.data
) + 1;
1581 g_debug("marker %s format set", (char*)tf
->event
.data
);
1582 size
+= strlen((char*)tf
->event
.data
+ size
) + 1;
1583 size
+= strlen((char*)tf
->event
.data
+ size
) + 1;
1588 info
= marker_get_info_from_id(tf
->mdata
, tf
->event
.event_id
);
1590 if (tf
->event
.event_id
>= MARKER_CORE_IDS
)
1591 g_assert(info
!= NULL
);
1593 /* Do not update field offsets of core markers when initially reading the
1594 * metadata tracefile when the infos about these markers do not exist yet.
1596 if (likely(info
&& info
->fields
)) {
1598 tf
->event
.data
+= ltt_align((off_t
)(unsigned long)tf
->event
.data
,
1599 info
->largest_align
,
1601 /* size, dynamically computed */
1602 if (info
->size
!= -1)
1605 size
= marker_update_fields_offsets(info
, tf
->event
.data
);
1606 /* Update per-tracefile offsets */
1607 marker_update_event_fields_offsets(tf
->event
.fields_offsets
, info
);
1610 tf
->event
.data_size
= size
;
1612 /* Check consistency between kernel and LTTV structure sizes */
1613 if(tf
->event
.event_size
== G_MAXUINT
) {
1614 /* Event size too big to fit in the event size field */
1615 tf
->event
.event_size
= tf
->event
.data_size
;
1619 print_debug_event_data(&tf
->event
);
1621 if (tf
->event
.data_size
!= tf
->event
.event_size
) {
1622 struct marker_info
*info
= marker_get_info_from_id(tf
->mdata
,
1623 tf
->event
.event_id
);
1625 g_error("Undescribed event %hhu in channel %s", tf
->event
.event_id
,
1626 g_quark_to_string(tf
->name
));
1627 g_error("Kernel/LTTV event size differs for event %s: kernel %u, LTTV %u",
1628 g_quark_to_string(info
->name
),
1629 tf
->event
.event_size
, tf
->event
.data_size
);
1635 /* Take the tf current event offset and use the event id to figure out where is
1636 * the next event offset.
1638 * This is an internal function not aiming at being used elsewhere : it will
1639 * not jump over the current block limits. Please consider using
1640 * ltt_tracefile_read to do this.
1642 * Returns 0 on success
1643 * ERANGE if we are at the end of the buffer.
1644 * ENOPROTOOPT if an error occured when getting the current event size.
1646 static int ltt_seek_next_event(LttTracefile
*tf
)
1651 /* seek over the buffer header if we are at the buffer start */
1652 if(tf
->event
.offset
== 0) {
1653 tf
->event
.offset
+= tf
->buffer_header_size
;
1655 if(tf
->event
.offset
== tf
->buffer
.data_size
) {
1661 pos
= tf
->event
.data
;
1663 if(tf
->event
.data_size
< 0) goto error
;
1665 pos
+= (size_t)tf
->event
.data_size
;
1667 tf
->event
.offset
= pos
- tf
->buffer
.head
;
1669 if(tf
->event
.offset
== tf
->buffer
.data_size
) {
1673 g_assert(tf
->event
.offset
< tf
->buffer
.data_size
);
1679 g_error("Error in ltt_seek_next_event for tracefile %s",
1680 g_quark_to_string(tf
->name
));
1685 /*****************************************************************************
1687 * ltt_get_int : get an integer number
1689 * reverse_byte_order: must we reverse the byte order ?
1690 * size : the size of the integer
1691 * ptr : the data pointer
1693 * gint64 : a 64 bits integer
1694 ****************************************************************************/
1696 gint64
ltt_get_int(gboolean reverse_byte_order
, gint size
, void *data
)
1701 case 1: val
= *((gint8
*)data
); break;
1702 case 2: val
= ltt_get_int16(reverse_byte_order
, data
); break;
1703 case 4: val
= ltt_get_int32(reverse_byte_order
, data
); break;
1704 case 8: val
= ltt_get_int64(reverse_byte_order
, data
); break;
1705 default: val
= ltt_get_int64(reverse_byte_order
, data
);
1706 g_critical("get_int : integer size %d unknown", size
);
1713 /*****************************************************************************
1715 * ltt_get_uint : get an unsigned integer number
1717 * reverse_byte_order: must we reverse the byte order ?
1718 * size : the size of the integer
1719 * ptr : the data pointer
1721 * guint64 : a 64 bits unsigned integer
1722 ****************************************************************************/
1724 guint64
ltt_get_uint(gboolean reverse_byte_order
, gint size
, void *data
)
1729 case 1: val
= *((gint8
*)data
); break;
1730 case 2: val
= ltt_get_uint16(reverse_byte_order
, data
); break;
1731 case 4: val
= ltt_get_uint32(reverse_byte_order
, data
); break;
1732 case 8: val
= ltt_get_uint64(reverse_byte_order
, data
); break;
1733 default: val
= ltt_get_uint64(reverse_byte_order
, data
);
1734 g_critical("get_uint : unsigned integer size %d unknown",
1743 /* get the node name of the system */
1745 char * ltt_trace_system_description_node_name (LttSystemDescription
* s
)
1747 return s
->node_name
;
1751 /* get the domain name of the system */
1753 char * ltt_trace_system_description_domain_name (LttSystemDescription
* s
)
1755 return s
->domain_name
;
1759 /* get the description of the system */
1761 char * ltt_trace_system_description_description (LttSystemDescription
* s
)
1763 return s
->description
;
1767 /* get the NTP corrected start time of the trace */
1768 LttTime
ltt_trace_start_time(LttTrace
*t
)
1770 return t
->start_time
;
1773 /* get the monotonic start time of the trace */
1774 LttTime
ltt_trace_start_time_monotonic(LttTrace
*t
)
1776 return t
->start_time_from_tsc
;
1779 static __attribute__ ((__unused__
)) LttTracefile
*ltt_tracefile_new()
1782 tf
= g_new(LttTracefile
, 1);
1783 tf
->event
.tracefile
= tf
;
1787 static __attribute__ ((__unused__
)) void ltt_tracefile_destroy(LttTracefile
*tf
)
1792 static __attribute__ ((__unused__
)) void ltt_tracefile_copy(LttTracefile
*dest
, const LttTracefile
*src
)
1797 /* Before library loading... */
1799 static __attribute__((constructor
)) void init(void)
1801 LTT_TRACEFILE_NAME_METADATA
= g_quark_from_string("metadata");
1804 /*****************************************************************************
1806 * ltt_tracefile_open_header : based on ltt_tracefile_open but it stops
1807 * when it gets the header
1809 * fileName : path to the tracefile
1810 * tf : the tracefile (metadata_0) where the header will be read
1812 * ltt_subbuffer_header_t : the header containing the version number
1813 ****************************************************************************/
1814 static ltt_subbuffer_header_t
* ltt_tracefile_open_header(gchar
*fileName
, LttTracefile
*tf
)
1816 struct stat lTDFStat
; /* Trace data file status */
1817 ltt_subbuffer_header_t
*header
;
1818 int page_size
= getpagesize();
1821 tf
->long_name
= g_quark_from_string(fileName
);
1822 tf
->fd
= open(fileName
, O_RDONLY
);
1824 g_warning("Unable to open input data file %s\n", fileName
);
1828 /* Get the file's status */
1829 if(fstat(tf
->fd
, &lTDFStat
) < 0){
1830 g_warning("Unable to get the status of the input data file %s\n", fileName
);
1834 /* Is the file large enough to contain a trace */
1835 if(lTDFStat
.st_size
< (off_t
)(ltt_subbuffer_header_size())) {
1836 g_print("The input data file %s does not contain a trace\n", fileName
);
1840 /* Temporarily map the buffer start header to get trace information */
1841 /* Multiple of pages aligned head */
1842 tf
->buffer
.head
= mmap(0,PAGE_ALIGN(ltt_subbuffer_header_size()), PROT_READ
, MAP_PRIVATE
, tf
->fd
, 0);
1844 if(tf
->buffer
.head
== MAP_FAILED
) {
1845 perror("Error in allocating memory for buffer of tracefile");
1848 g_assert( ( (gulong
)tf
->buffer
.head
&(8-1) ) == 0); // make sure it's aligned.
1850 header
= (ltt_subbuffer_header_t
*)tf
->buffer
.head
;
1861 /*****************************************************************************
1863 * get_version : get the trace version from a metadata_0 trace file
1865 * pathname : path to the trace
1866 * version_number : the struct that will get the version number
1868 * int : 1 if succeed, -1 if error
1869 ****************************************************************************/
1870 int ltt_get_trace_version(const gchar
*pathname
, struct LttTraceVersion
*version_number
)
1872 gchar abs_path
[PATH_MAX
];
1875 struct dirent
*entry
;
1876 struct stat stat_buf
;
1877 gchar path
[PATH_MAX
];
1879 LttTracefile tmp_tf
;
1881 ltt_subbuffer_header_t
*header
;
1883 t
= g_new(LttTrace
, 1);
1885 get_absolute_pathname(pathname
, abs_path
);
1887 /* Test to see if it looks like a trace */
1888 dir
= opendir(abs_path
);
1895 while((entry
= readdir(dir
)) != NULL
) {
1896 strcpy(path
, abs_path
);
1898 strcat(path
, entry
->d_name
);
1899 ret
= stat(path
, &stat_buf
);
1907 dir
= opendir(abs_path
);
1909 while((entry
= readdir(dir
)) != NULL
) {
1910 if(entry
->d_name
[0] == '.') continue;
1911 if(g_strcmp0(entry
->d_name
, "metadata_0") != 0) continue;
1913 strcpy(path
, abs_path
);
1915 strcat(path
, entry
->d_name
);
1921 header
= ltt_tracefile_open_header(path
, &tmp_tf
);
1923 if(header
== NULL
) {
1924 g_info("Error getting the header %s", path
);
1925 continue; /* error opening the tracefile : bad magic number ? */
1928 version_number
->ltt_major_version
= header
->major_version
;
1929 version_number
->ltt_minor_version
= header
->minor_version
;