X-Git-Url: http://git.lttng.org./?a=blobdiff_plain;ds=sidebyside;f=ltt%2Fbranches%2Fpoly%2Fltt%2Ftracefile.c;h=0d03c07d54b28efeb5962910b6fa562ed840e6c8;hb=e45551ac5b4d6b0da5a5688fa8f9945044dc1049;hp=1b7e5ff7dbd231285f87ee1d02f12e4b6f60e642;hpb=27304273a482c99620e992daf3826eb61208fd4b;p=lttv.git diff --git a/ltt/branches/poly/ltt/tracefile.c b/ltt/branches/poly/ltt/tracefile.c index 1b7e5ff7..0d03c07d 100644 --- a/ltt/branches/poly/ltt/tracefile.c +++ b/ltt/branches/poly/ltt/tracefile.c @@ -209,7 +209,7 @@ gint ltt_tracefile_open(LttTrace *t, gchar * fileName, LttTracefile *tf) int page_size = getpagesize(); //open the file - tf->name = g_quark_from_string(fileName); + tf->long_name = g_quark_from_string(fileName); tf->trace = t; tf->fd = open(fileName, O_RDONLY); if(tf->fd < 0){ @@ -251,10 +251,16 @@ gint ltt_tracefile_open(LttTrace *t, gchar * fileName, LttTracefile *tf) //store the size of the file tf->file_size = lTDFStat.st_size; - tf->block_size = ltt_get_uint32(LTT_GET_BO(tf), &header->buf_size); - tf->num_blocks = tf->file_size / tf->block_size; - - munmap(tf->buffer.head, PAGE_ALIGN(sizeof(struct ltt_block_start_header))); + tf->buf_size = ltt_get_uint32(LTT_GET_BO(tf), &header->buf_size); + tf->num_blocks = tf->file_size / tf->buf_size; + + if(munmap(tf->buffer.head, + PAGE_ALIGN(sizeof(struct ltt_block_start_header)))) { + g_warning("unmap size : %u\n", + PAGE_ALIGN(sizeof(struct ltt_block_start_header))); + perror("munmap error"); + g_assert(0); + } tf->buffer.head = NULL; //read the first block @@ -267,13 +273,24 @@ gint ltt_tracefile_open(LttTrace *t, gchar * fileName, LttTracefile *tf) /* Error */ unmap_file: - munmap(tf->buffer.head, PAGE_ALIGN(sizeof(struct ltt_block_start_header))); + if(munmap(tf->buffer.head, + PAGE_ALIGN(sizeof(struct ltt_block_start_header)))) { + g_warning("unmap size : %u\n", + PAGE_ALIGN(sizeof(struct ltt_block_start_header))); + perror("munmap error"); + g_assert(0); + } close_file: close(tf->fd); end: return -1; } +LttTrace *ltt_tracefile_get_trace(LttTracefile *tf) +{ + return tf->trace; +} + #if 0 /***************************************************************************** *Open control and per cpu tracefiles @@ -351,8 +368,16 @@ gint ltt_tracefile_open_control(LttTrace *t, gchar * control_name) void ltt_tracefile_close(LttTracefile *t) { + int page_size = getpagesize(); + if(t->buffer.head != NULL) - munmap(t->buffer.head, t->buf_size); + if(munmap(t->buffer.head, PAGE_ALIGN(t->buf_size))) { + g_warning("unmap size : %u\n", + PAGE_ALIGN(t->buf_size)); + perror("munmap error"); + g_assert(0); + } + close(t->fd); } @@ -537,6 +562,8 @@ int get_tracefile_name_number(const gchar *raw_name, strncpy(char_name, raw_name, underscore_pos); + char_name[underscore_pos] = '\0'; + *name = g_quark_from_string(char_name); *num = cpu_num; @@ -604,21 +631,21 @@ gboolean ltt_tracefile_group_has_cpu_online(gpointer data) * A tracefile group is simply an array where all the per cpu tracefiles sits. */ -static int open_tracefiles(LttTrace *trace, char *root_path, - char *relative_path) +static int open_tracefiles(LttTrace *trace, gchar *root_path, + gchar *relative_path) { DIR *dir = opendir(root_path); struct dirent *entry; struct stat stat_buf; int ret; - char path[PATH_MAX]; + gchar path[PATH_MAX]; int path_len; - char *path_ptr; + gchar *path_ptr; int rel_path_len; - char rel_path[PATH_MAX]; - char *rel_path_ptr; + gchar rel_path[PATH_MAX]; + gchar *rel_path_ptr; LttTracefile tmp_tf; if(dir == NULL) { @@ -680,6 +707,7 @@ static int open_tracefiles(LttTrace *trace, char *root_path, tmp_tf.cpu_online = 1; tmp_tf.cpu_num = num; + tmp_tf.name = name; group = g_datalist_id_get_data(&trace->tracefiles, name); if(group == NULL) { @@ -958,8 +986,10 @@ LttTrace *ltt_trace_open(const gchar *pathname) /* Open all the tracefiles */ g_datalist_init(&t->tracefiles); - if(open_tracefiles(t, abs_path, "")) + if(open_tracefiles(t, abs_path, "")) { + g_warning("Error opening tracefile %s", abs_path); goto open_error; + } /* Prepare the facilities containers : array and mapping */ /* Array is zeroed : the "exists" field is set to false by default */ @@ -1195,6 +1225,12 @@ GQuark ltt_tracefile_name(LttTracefile *tf) return tf->name; } + +guint ltt_tracefile_num(LttTracefile *tf) +{ + return tf->cpu_num; +} + /***************************************************************************** * Get the number of blocks in the tracefile ****************************************************************************/ @@ -1293,7 +1329,7 @@ int ltt_tracefile_seek_time(LttTracefile *tf, LttTime time) else if(ret) goto fail; if(ltt_time_compare(time, tf->event.event_time) >= 0) - break; + goto found; } } else if(ltt_time_compare(time, tf->buffer.begin.timestamp) < 0) { @@ -1365,7 +1401,8 @@ LttTime ltt_interpolate_time(LttTracefile *tf, LttEvent *event) g_assert(tf->trace->has_tsc); time = ltt_time_from_uint64( - (guint64)tf->buffer.tsc*tf->buffer.nsecs_per_cycle); + (guint64)(tf->buffer.tsc - tf->buffer.begin.cycle_count) * + tf->buffer.nsecs_per_cycle); time = ltt_time_add(tf->buffer.begin.timestamp, time); return time; @@ -1388,7 +1425,8 @@ LttEvent *ltt_tracefile_get_event(LttTracefile *tf) *Return value * * Returns 0 if an event can be used in tf->event. - * Returns ERANGE on end of trace. The event in tf->event still can be used. + * Returns ERANGE on end of trace. The event in tf->event still can be used + * (if the last block was not empty). * Returns EPERM on error. * * This function does make the tracefile event structure point to the event @@ -1557,14 +1595,21 @@ static gint map_block(LttTracefile * tf, guint block_num) g_assert(block_num < tf->num_blocks); - if(tf->buffer.head != NULL) - munmap(tf->buffer.head, PAGE_ALIGN(tf->buf_size)); + if(tf->buffer.head != NULL) { + if(munmap(tf->buffer.head, PAGE_ALIGN(tf->buf_size))) { + g_warning("unmap size : %u\n", + PAGE_ALIGN(tf->buf_size)); + perror("munmap error"); + g_assert(0); + } + } + /* Multiple of pages aligned head */ tf->buffer.head = mmap(0, - PAGE_ALIGN(tf->block_size), + PAGE_ALIGN(tf->buf_size), PROT_READ, MAP_PRIVATE, tf->fd, - PAGE_ALIGN((off_t)tf->block_size * (off_t)block_num)); + PAGE_ALIGN((off_t)tf->buf_size * (off_t)block_num)); if(tf->buffer.head == MAP_FAILED) { perror("Error in allocating memory for buffer of tracefile"); @@ -1581,11 +1626,15 @@ static gint map_block(LttTracefile * tf, guint block_num) tf->buffer.begin.timestamp = ltt_get_time(LTT_GET_BO(tf), &header->begin.timestamp); tf->buffer.begin.timestamp.tv_nsec *= NSEC_PER_USEC; + g_debug("block %u begin : %lu.%lu", block_num, + tf->buffer.begin.timestamp.tv_sec, tf->buffer.begin.timestamp.tv_nsec); tf->buffer.begin.cycle_count = ltt_get_uint64(LTT_GET_BO(tf), &header->begin.cycle_count); tf->buffer.end.timestamp = ltt_get_time(LTT_GET_BO(tf), &header->end.timestamp); tf->buffer.end.timestamp.tv_nsec *= NSEC_PER_USEC; + g_debug("block %u end : %lu.%lu", block_num, + tf->buffer.end.timestamp.tv_sec, tf->buffer.end.timestamp.tv_nsec); tf->buffer.end.cycle_count = ltt_get_uint64(LTT_GET_BO(tf), &header->end.cycle_count); tf->buffer.lost_size = ltt_get_uint32(LTT_GET_BO(tf), @@ -1597,7 +1646,7 @@ static gint map_block(LttTracefile * tf, guint block_num) /* FIXME * eventually support variable buffer size : will need a partial pre-read of * the headers to create an index when we open the trace... eventually. */ - g_assert(tf->block_size == ltt_get_uint32(LTT_GET_BO(tf), + g_assert(tf->buf_size == ltt_get_uint32(LTT_GET_BO(tf), &header->buf_size)); /* Now that the buffer is mapped, calculate the time interpolation for the @@ -1683,7 +1732,8 @@ void ltt_update_event_size(LttTracefile *tf) else size = 0; - g_debug("Event root field : f.e %hhu.%hhu size %lu", tf->event.facility_id, + g_debug("Event root field : f.e %hhu.%hhu size %zd", + tf->event.facility_id, tf->event.event_id, size); } @@ -1722,7 +1772,7 @@ static int ltt_seek_next_event(LttTracefile *tf) if(tf->event.offset == 0) { tf->event.offset += sizeof(struct ltt_block_start_header); - if(tf->event.offset == tf->block_size - tf->buffer.lost_size) { + if(tf->event.offset == tf->buf_size - tf->buffer.lost_size) { ret = ERANGE; } goto found; @@ -1737,7 +1787,7 @@ static int ltt_seek_next_event(LttTracefile *tf) tf->event.offset = pos - tf->buffer.head; - if(tf->event.offset == tf->block_size - tf->buffer.lost_size) { + if(tf->event.offset == tf->buf_size - tf->buffer.lost_size) { ret = ERANGE; goto found; } @@ -1766,14 +1816,12 @@ static double calc_nsecs_per_cycle(LttTracefile * tf) LttCycleCount lBufTotalCycle;/* Total cycles for this buffer */ /* Calculate the total time for this buffer */ - lBufTotalTime = ltt_time_sub( - ltt_get_time(LTT_GET_BO(tf), &tf->buffer.end.timestamp), - ltt_get_time(LTT_GET_BO(tf), &tf->buffer.begin.timestamp)); + lBufTotalTime = ltt_time_sub(tf->buffer.end.timestamp, + tf->buffer.begin.timestamp); /* Calculate the total cycles for this bufffer */ - lBufTotalCycle = ltt_get_uint64(LTT_GET_BO(tf), &tf->buffer.end.cycle_count); - lBufTotalCycle -= ltt_get_uint64(LTT_GET_BO(tf), - &tf->buffer.begin.cycle_count); + lBufTotalCycle = tf->buffer.end.cycle_count; + lBufTotalCycle -= tf->buffer.begin.cycle_count; /* Convert the total time to double */ lBufTotalNSec = ltt_time_to_double(lBufTotalTime);