int page_size = getpagesize();
//open the file
- tf->name = g_quark_from_string(fileName);
+ tf->long_name = g_quark_from_string(fileName);
tf->trace = t;
tf->fd = open(fileName, O_RDONLY);
if(tf->fd < 0){
//store the size of the file
tf->file_size = lTDFStat.st_size;
- tf->block_size = ltt_get_uint32(LTT_GET_BO(tf), &header->buf_size);
- tf->num_blocks = tf->file_size / tf->block_size;
-
- munmap(tf->buffer.head, PAGE_ALIGN(sizeof(struct ltt_block_start_header)));
+ tf->buf_size = ltt_get_uint32(LTT_GET_BO(tf), &header->buf_size);
+ tf->num_blocks = tf->file_size / tf->buf_size;
+
+ if(munmap(tf->buffer.head,
+ PAGE_ALIGN(sizeof(struct ltt_block_start_header)))) {
+ g_warning("unmap size : %u\n",
+ PAGE_ALIGN(sizeof(struct ltt_block_start_header)));
+ perror("munmap error");
+ g_assert(0);
+ }
tf->buffer.head = NULL;
//read the first block
/* Error */
unmap_file:
- munmap(tf->buffer.head, PAGE_ALIGN(sizeof(struct ltt_block_start_header)));
+ if(munmap(tf->buffer.head,
+ PAGE_ALIGN(sizeof(struct ltt_block_start_header)))) {
+ g_warning("unmap size : %u\n",
+ PAGE_ALIGN(sizeof(struct ltt_block_start_header)));
+ perror("munmap error");
+ g_assert(0);
+ }
close_file:
close(tf->fd);
end:
return -1;
}
+LttTrace *ltt_tracefile_get_trace(LttTracefile *tf)
+{
+ return tf->trace;
+}
+
#if 0
/*****************************************************************************
*Open control and per cpu tracefiles
void ltt_tracefile_close(LttTracefile *t)
{
+ int page_size = getpagesize();
+
if(t->buffer.head != NULL)
- munmap(t->buffer.head, t->buf_size);
+ if(munmap(t->buffer.head, PAGE_ALIGN(t->buf_size))) {
+ g_warning("unmap size : %u\n",
+ PAGE_ALIGN(t->buf_size));
+ perror("munmap error");
+ g_assert(0);
+ }
+
close(t->fd);
}
strncpy(char_name, raw_name, underscore_pos);
+ char_name[underscore_pos] = '\0';
+
*name = g_quark_from_string(char_name);
*num = cpu_num;
* A tracefile group is simply an array where all the per cpu tracefiles sits.
*/
-static int open_tracefiles(LttTrace *trace, char *root_path,
- char *relative_path)
+static int open_tracefiles(LttTrace *trace, gchar *root_path,
+ gchar *relative_path)
{
DIR *dir = opendir(root_path);
struct dirent *entry;
struct stat stat_buf;
int ret;
- char path[PATH_MAX];
+ gchar path[PATH_MAX];
int path_len;
- char *path_ptr;
+ gchar *path_ptr;
int rel_path_len;
- char rel_path[PATH_MAX];
- char *rel_path_ptr;
+ gchar rel_path[PATH_MAX];
+ gchar *rel_path_ptr;
LttTracefile tmp_tf;
if(dir == NULL) {
tmp_tf.cpu_online = 1;
tmp_tf.cpu_num = num;
+ tmp_tf.name = name;
group = g_datalist_id_get_data(&trace->tracefiles, name);
if(group == NULL) {
/* Open all the tracefiles */
g_datalist_init(&t->tracefiles);
- if(open_tracefiles(t, abs_path, ""))
+ if(open_tracefiles(t, abs_path, "")) {
+ g_warning("Error opening tracefile %s", abs_path);
goto open_error;
+ }
/* Prepare the facilities containers : array and mapping */
/* Array is zeroed : the "exists" field is set to false by default */
return tf->name;
}
+
+guint ltt_tracefile_num(LttTracefile *tf)
+{
+ return tf->cpu_num;
+}
+
/*****************************************************************************
* Get the number of blocks in the tracefile
****************************************************************************/
* the time passed in parameter.
*
* If the time parameter is outside the tracefile time span, seek to the first
- * or the last event of the tracefile.
+ * event or if after, return ERANGE.
*
* If the time parameter is before the first event, we have to seek specially to
* there.
*
- * If the time is after the end of the trace, get the last event.
+ * If the time is after the end of the trace, return ERANGE.
*
* Do a binary search to find the right block, then a sequential search in the
* block to find the event.
* you will jump over an event if you do.
*
* Return value : 0 : no error, the tf->event can be used
+ * ERANGE : time if after the last event of the trace
* otherwise : this is an error.
*
* */
* go to the first event. */
if(ltt_time_compare(time, tf->buffer.begin.timestamp) <= 0) {
ret = ltt_tracefile_read(tf);
+ if(ret == ERANGE) goto range;
+ else if (ret) goto fail;
goto found; /* There is either no event in the trace or the event points
to the first event in the trace */
}
goto fail;
}
- /* If the time is after the end of the trace, get the last event. */
- if(ltt_time_compare(time, tf->buffer.end.timestamp) >= 0) {
- /* While the ltt_tracefile_read doesn't return ERANGE or EPERM,
- * continue reading.
- */
- while(1) {
- ret = ltt_tracefile_read(tf);
- if(ret == ERANGE) goto found; /* ERANGE or EPERM */
- else if(ret) goto fail;
- }
+ /* If the time is after the end of the trace, return ERANGE. */
+ if(ltt_time_compare(time, tf->buffer.end.timestamp) > 0) {
+ goto range;
}
/* Binary search the block */
* (or in the next buffer first event) */
while(1) {
ret = ltt_tracefile_read(tf);
- if(ret == ERANGE) goto found; /* ERANGE or EPERM */
+ if(ret == ERANGE) goto range; /* ERANGE or EPERM */
else if(ret) goto fail;
if(ltt_time_compare(time, tf->event.event_time) >= 0)
- break;
+ goto found;
}
- } if(ltt_time_compare(time, tf->buffer.begin.timestamp) < 0) {
+ } else if(ltt_time_compare(time, tf->buffer.begin.timestamp) < 0) {
/* go to lower part */
high = block_num;
} else if(ltt_time_compare(time, tf->buffer.end.timestamp) > 0) {
} else {/* The event is right in the buffer!
(or in the next buffer first event) */
while(1) {
- ltt_tracefile_read(tf);
- if(ret == ERANGE) goto found; /* ERANGE or EPERM */
+ ret = ltt_tracefile_read(tf);
+ if(ret == ERANGE) goto range; /* ERANGE or EPERM */
else if(ret) goto fail;
if(ltt_time_compare(time, tf->event.event_time) >= 0)
found:
return 0;
+range:
+ return ERANGE;
/* Error handling */
fail:
g_assert(tf->trace->has_tsc);
time = ltt_time_from_uint64(
- (guint64)tf->buffer.tsc*tf->buffer.nsecs_per_cycle);
+ (guint64)(tf->buffer.tsc - tf->buffer.begin.cycle_count) *
+ tf->buffer.nsecs_per_cycle);
time = ltt_time_add(tf->buffer.begin.timestamp, time);
return time;
*Return value
*
* Returns 0 if an event can be used in tf->event.
- * Returns ERANGE on end of trace. The event in tf->event still can be used.
+ * Returns ERANGE on end of trace. The event in tf->event still can be used
+ * (if the last block was not empty).
* Returns EPERM on error.
*
* This function does make the tracefile event structure point to the event
g_assert(block_num < tf->num_blocks);
- if(tf->buffer.head != NULL)
- munmap(tf->buffer.head, PAGE_ALIGN(tf->buf_size));
+ if(tf->buffer.head != NULL) {
+ if(munmap(tf->buffer.head, PAGE_ALIGN(tf->buf_size))) {
+ g_warning("unmap size : %u\n",
+ PAGE_ALIGN(tf->buf_size));
+ perror("munmap error");
+ g_assert(0);
+ }
+ }
+
/* Multiple of pages aligned head */
tf->buffer.head = mmap(0,
- PAGE_ALIGN(tf->block_size),
+ PAGE_ALIGN(tf->buf_size),
PROT_READ, MAP_PRIVATE, tf->fd,
- PAGE_ALIGN((off_t)tf->block_size * (off_t)block_num));
+ PAGE_ALIGN((off_t)tf->buf_size * (off_t)block_num));
if(tf->buffer.head == MAP_FAILED) {
perror("Error in allocating memory for buffer of tracefile");
tf->buffer.begin.timestamp = ltt_get_time(LTT_GET_BO(tf),
&header->begin.timestamp);
tf->buffer.begin.timestamp.tv_nsec *= NSEC_PER_USEC;
+ g_debug("block %u begin : %lu.%lu", block_num,
+ tf->buffer.begin.timestamp.tv_sec, tf->buffer.begin.timestamp.tv_nsec);
tf->buffer.begin.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
&header->begin.cycle_count);
tf->buffer.end.timestamp = ltt_get_time(LTT_GET_BO(tf),
&header->end.timestamp);
tf->buffer.end.timestamp.tv_nsec *= NSEC_PER_USEC;
+ g_debug("block %u end : %lu.%lu", block_num,
+ tf->buffer.end.timestamp.tv_sec, tf->buffer.end.timestamp.tv_nsec);
tf->buffer.end.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
&header->end.cycle_count);
tf->buffer.lost_size = ltt_get_uint32(LTT_GET_BO(tf),
/* FIXME
* eventually support variable buffer size : will need a partial pre-read of
* the headers to create an index when we open the trace... eventually. */
- g_assert(tf->block_size == ltt_get_uint32(LTT_GET_BO(tf),
+ g_assert(tf->buf_size == ltt_get_uint32(LTT_GET_BO(tf),
&header->buf_size));
/* Now that the buffer is mapped, calculate the time interpolation for the
g_quark_to_string(tf->name));
goto event_type_error;
}
+
+ if(event_type->root_field)
+ size = get_field_type_size(tf, event_type,
+ 0, 0, event_type->root_field, tf->event.data);
+ else
+ size = 0;
- size = get_field_type_size(tf, event_type,
- 0, 0, event_type->root_field, tf->event.data);
- g_debug("Event root field : f.e %hhu.%hhu size %lu", tf->event.facility_id,
+ g_debug("Event root field : f.e %hhu.%hhu size %zd",
+ tf->event.facility_id,
tf->event.event_id, size);
}
if(tf->event.offset == 0) {
tf->event.offset += sizeof(struct ltt_block_start_header);
- if(tf->event.offset == tf->block_size - tf->buffer.lost_size) {
+ if(tf->event.offset == tf->buf_size - tf->buffer.lost_size) {
ret = ERANGE;
}
goto found;
tf->event.offset = pos - tf->buffer.head;
- if(tf->event.offset == tf->block_size - tf->buffer.lost_size) {
+ if(tf->event.offset == tf->buf_size - tf->buffer.lost_size) {
ret = ERANGE;
goto found;
}
LttCycleCount lBufTotalCycle;/* Total cycles for this buffer */
/* Calculate the total time for this buffer */
- lBufTotalTime = ltt_time_sub(
- ltt_get_time(LTT_GET_BO(tf), &tf->buffer.end.timestamp),
- ltt_get_time(LTT_GET_BO(tf), &tf->buffer.begin.timestamp));
+ lBufTotalTime = ltt_time_sub(tf->buffer.end.timestamp,
+ tf->buffer.begin.timestamp);
/* Calculate the total cycles for this bufffer */
- lBufTotalCycle = ltt_get_uint64(LTT_GET_BO(tf), &tf->buffer.end.cycle_count);
- lBufTotalCycle -= ltt_get_uint64(LTT_GET_BO(tf),
- &tf->buffer.begin.cycle_count);
+ lBufTotalCycle = tf->buffer.end.cycle_count;
+ lBufTotalCycle -= tf->buffer.begin.cycle_count;
/* Convert the total time to double */
lBufTotalNSec = ltt_time_to_double(lBufTotalTime);
field->child[0]);
field->fixed_size = FIELD_VARIABLE;
field->field_size = 0;
+ *fixed_root = FIELD_VARIABLE;
+ *fixed_parent = FIELD_VARIABLE;
break;
case LTT_STRING:
field->fixed_size = FIELD_VARIABLE;
field->field_size = 0;
+ *fixed_root = FIELD_VARIABLE;
+ *fixed_parent = FIELD_VARIABLE;
break;
case LTT_ARRAY:
local_fixed_root = FIELD_VARIABLE;
&local_fixed_root, &local_fixed_parent,
field->child[0]);
field->fixed_size = field->child[0]->fixed_size;
- if(field->fixed_size == FIELD_FIXED)
+ if(field->fixed_size == FIELD_FIXED) {
field->field_size = type->element_number * field->child[0]->field_size;
- else
+ } else {
field->field_size = 0;
+ *fixed_root = FIELD_VARIABLE;
+ *fixed_parent = FIELD_VARIABLE;
+ }
break;
case LTT_STRUCT:
current_root_offset = field->offset_root;