+ err = ltt_tracefile_read_seek(tf);
+ if(err) return err;
+ err = ltt_tracefile_read_update_event(tf);
+ if(err) return err;
+ err = ltt_tracefile_read_op(tf);
+ if(err) return err;
+
+ return 0;
+}
+
+int ltt_tracefile_read_seek(LttTracefile *tf)
+{
+ int err;
+
+ /* Get next buffer until we finally have an event, or end of trace */
+ while(1) {
+ err = ltt_seek_next_event(tf);
+ if(unlikely(err == ENOPROTOOPT)) {
+ return EPERM;
+ }
+
+ /* Are we at the end of the buffer ? */
+ if(err == ERANGE) {
+ if(unlikely(tf->buffer.index == tf->num_blocks-1)){ /* end of trace ? */
+ return ERANGE;
+ } else {
+ /* get next block */
+ err = map_block(tf, tf->buffer.index + 1);
+ if(unlikely(err)) {
+ g_error("Can not map block");
+ return EPERM;
+ }
+ }
+ } else break; /* We found an event ! */
+ }
+
+ return 0;
+}
+
+
+/* do specific operation on events */
+int ltt_tracefile_read_op(LttTracefile *tf)
+{
+ LttEvent *event;
+
+ event = &tf->event;
+
+ /* do event specific operation */
+
+ /* do something if its an heartbeat event : increment the heartbeat count */
+ //if(event->facility_id == LTT_FACILITY_CORE)
+ // if(event->event_id == LTT_EVENT_HEARTBEAT)
+ // tf->cur_heart_beat_number++;
+
+ return 0;
+}
+
+
+/* same as ltt_tracefile_read, but does not seek to the next event nor call
+ * event specific operation. */
+int ltt_tracefile_read_update_event(LttTracefile *tf)
+{
+ void * pos;
+ LttEvent *event;
+
+ event = &tf->event;
+ pos = tf->buffer.head + event->offset;
+
+ /* Read event header */
+
+ /* Align the head */
+ if(!tf->compact)
+ pos += ltt_align((size_t)pos, tf->trace->arch_size, tf->has_alignment);
+ else {
+ g_assert(tf->has_heartbeat);
+ pos += ltt_align((size_t)pos, sizeof(uint32_t), tf->has_alignment);
+ }
+
+ if(tf->has_heartbeat) {
+ event->timestamp = ltt_get_uint32(LTT_GET_BO(tf),
+ pos);
+ if(!tf->compact) {
+ /* 32 bits -> 64 bits tsc */
+ /* note : still works for seek and non seek cases. */
+ if(event->timestamp < (0xFFFFFFFFULL&tf->buffer.tsc)) {
+ tf->buffer.tsc = ((tf->buffer.tsc&0xFFFFFFFF00000000ULL)
+ + 0x100000000ULL)
+ | (guint64)event->timestamp;
+ event->tsc = tf->buffer.tsc;
+ } else {
+ /* no overflow */
+ tf->buffer.tsc = (tf->buffer.tsc&0xFFFFFFFF00000000ULL)
+ | (guint64)event->timestamp;
+ event->tsc = tf->buffer.tsc;
+ event->compact_data = 0;
+ }
+ } else {
+ /* Compact header */
+ /* We keep the LSB of the previous timestamp, to make sure
+ * we never go back */
+ event->event_id = event->timestamp >> tf->tscbits;
+ event->event_id = event->event_id & ((1 << tf->trace->compact_event_bits) - 1);
+ event->compact_data = event->timestamp >>
+ (tf->trace->compact_event_bits + tf->tscbits);
+ //printf("tsc bits %u, ev bits %u init data %u\n",
+ // tf->tscbits, tf->trace->compact_event_bits, event->compact_data);
+ /* Put the compact data back in original endianness */
+ event->compact_data = ltt_get_uint32(LTT_GET_BO(tf), &event->compact_data);
+ event->event_size = 0xFFFF;
+ //printf("Found compact event %d\n", event->event_id);
+ //printf("Compact data %d\n", event->compact_data);
+ event->timestamp = event->timestamp << tf->tsc_lsb_truncate;
+ event->timestamp = event->timestamp & tf->tsc_mask;
+ //printf("timestamp 0x%lX\n", event->timestamp);
+ //printf("mask 0x%llX\n", tf->tsc_mask);
+ //printf("mask_next 0x%llX\n", tf->tsc_mask_next_bit);
+ //printf("previous tsc 0x%llX\n", tf->buffer.tsc);
+ //printf("previous tsc&mask 0x%llX\n", tf->tsc_mask&tf->buffer.tsc);
+ //printf("previous tsc&(~mask) 0x%llX\n", tf->buffer.tsc&(~tf->tsc_mask));
+ if(event->timestamp < (tf->tsc_mask&tf->buffer.tsc)) {
+ //printf("wrap\n");
+ tf->buffer.tsc = ((tf->buffer.tsc&(~tf->tsc_mask))
+ + tf->tsc_mask_next_bit)
+ | (guint64)event->timestamp;
+ event->tsc = tf->buffer.tsc;
+ } else {
+ //printf("no wrap\n");
+ /* no overflow */
+ tf->buffer.tsc = (tf->buffer.tsc&(~tf->tsc_mask))
+ | (guint64)event->timestamp;
+ event->tsc = tf->buffer.tsc;
+ }
+ //printf("current tsc 0x%llX\n", tf->buffer.tsc);
+ }
+ pos += sizeof(guint32);
+ } else {
+ event->tsc = ltt_get_uint64(LTT_GET_BO(tf), pos);
+ tf->buffer.tsc = event->tsc;
+ event->compact_data = 0;
+ pos += sizeof(guint64);
+ }
+ event->event_time = ltt_interpolate_time(tf, event);
+
+ if(!tf->compact) {
+ event->event_id = *(guint16*)pos;
+ pos += sizeof(guint16);
+
+ event->event_size = ltt_get_uint16(LTT_GET_BO(tf), pos);
+ pos += sizeof(guint16);
+ } else {
+ /* Compact event */
+ }
+ /* Align the head */
+ if(!tf->compact)
+ pos += ltt_align((size_t)pos, tf->trace->arch_size, tf->has_alignment);
+
+ event->data = pos;
+
+ /* get the data size and update the event fields with the current
+ * information. Also update the time if a heartbeat_full event is found. */
+ ltt_update_event_size(tf);
+
+ return 0;
+}
+
+
+/****************************************************************************
+ *Function name
+ * map_block : map a block from the file
+ *Input Params
+ * lttdes : ltt trace file
+ * whichBlock : the block which will be read
+ *return value
+ * 0 : success
+ * EINVAL : lseek fail
+ * EIO : can not read from the file
+ ****************************************************************************/
+
+gint map_block(LttTracefile * tf, guint block_num)
+{
+ int page_size = getpagesize();
+ struct ltt_block_start_header *header;
+
+ g_assert(block_num < tf->num_blocks);
+
+ if(tf->buffer.head != NULL) {
+ if(munmap(tf->buffer.head, PAGE_ALIGN(tf->buf_size))) {
+ g_warning("unmap size : %u\n",
+ PAGE_ALIGN(tf->buf_size));
+ perror("munmap error");
+ g_assert(0);
+ }
+ }
+
+
+ /* Multiple of pages aligned head */
+ tf->buffer.head = mmap(0,
+ PAGE_ALIGN(tf->buf_size),
+ PROT_READ, MAP_PRIVATE, tf->fd,
+ PAGE_ALIGN((off_t)tf->buf_size * (off_t)block_num));
+
+ if(tf->buffer.head == MAP_FAILED) {
+ perror("Error in allocating memory for buffer of tracefile");
+ g_assert(0);
+ goto map_error;
+ }
+ g_assert( ( (guint)tf->buffer.head&(8-1) ) == 0); // make sure it's aligned.
+
+
+ tf->buffer.index = block_num;
+
+ header = (struct ltt_block_start_header*)tf->buffer.head;
+
+#if 0
+ tf->buffer.begin.timestamp = ltt_time_add(
+ ltt_time_from_uint64(
+ ltt_get_uint64(LTT_GET_BO(tf),
+ &header->begin.timestamp)
+ - tf->trace->start_monotonic),
+ tf->trace->start_time);
+#endif //0
+ //g_debug("block %u begin : %lu.%lu", block_num,
+ // tf->buffer.begin.timestamp.tv_sec, tf->buffer.begin.timestamp.tv_nsec);
+ tf->buffer.begin.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
+ &header->begin.cycle_count);
+ tf->buffer.begin.freq = ltt_get_uint64(LTT_GET_BO(tf),
+ &header->begin.freq);
+ if(tf->buffer.begin.freq == 0)
+ tf->buffer.begin.freq = tf->trace->start_freq;
+
+ tf->buffer.begin.timestamp = ltt_interpolate_time_from_tsc(tf,
+ tf->buffer.begin.cycle_count);
+#if 0
+ ltt_time_add(
+ ltt_time_from_uint64(
+ (double)(tf->buffer.begin.cycle_count
+ - tf->trace->start_tsc) * 1000000.0
+ / (double)tf->trace->start_freq),
+ tf->trace->start_time_from_tsc);
+#endif //0
+#if 0
+
+ tf->buffer.end.timestamp = ltt_time_add(
+ ltt_time_from_uint64(
+ ltt_get_uint64(LTT_GET_BO(tf),
+ &header->end.timestamp)
+ - tf->trace->start_monotonic),
+ tf->trace->start_time);
+#endif //0
+ //g_debug("block %u end : %lu.%lu", block_num,
+ // tf->buffer.end.timestamp.tv_sec, tf->buffer.end.timestamp.tv_nsec);
+ tf->buffer.end.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
+ &header->end.cycle_count);
+ tf->buffer.end.freq = ltt_get_uint64(LTT_GET_BO(tf),
+ &header->end.freq);
+ if(tf->buffer.end.freq == 0)
+ tf->buffer.end.freq = tf->trace->start_freq;
+
+ tf->buffer.lost_size = ltt_get_uint32(LTT_GET_BO(tf),
+ &header->lost_size);
+ tf->buffer.end.timestamp = ltt_interpolate_time_from_tsc(tf,
+ tf->buffer.end.cycle_count);
+#if 0
+ ltt_time_add(
+ ltt_time_from_uint64(
+ (double)(tf->buffer.end.cycle_count
+ - tf->trace->start_tsc) * 1000000.0
+ / (double)tf->trace->start_freq),
+ tf->trace->start_time_from_tsc);
+#endif //0
+ tf->buffer.tsc = tf->buffer.begin.cycle_count;
+ tf->event.tsc = tf->buffer.tsc;
+ tf->buffer.freq = tf->buffer.begin.freq;
+
+ /* FIXME
+ * eventually support variable buffer size : will need a partial pre-read of
+ * the headers to create an index when we open the trace... eventually. */
+ g_assert(tf->buf_size == ltt_get_uint32(LTT_GET_BO(tf),
+ &header->buf_size));
+
+ /* Now that the buffer is mapped, calculate the time interpolation for the
+ * block. */
+
+// tf->buffer.nsecs_per_cycle = calc_nsecs_per_cycle(tf);
+ //tf->buffer.cyc2ns_scale = calc_nsecs_per_cycle(tf);
+
+ /* Make the current event point to the beginning of the buffer :
+ * it means that the event read must get the first event. */
+ tf->event.tracefile = tf;
+ tf->event.block = block_num;
+ tf->event.offset = 0;
+
+ return 0;
+
+map_error:
+ return -errno;
+
+}
+
+/* It will update the fields offsets too */
+void ltt_update_event_size(LttTracefile *tf)
+{
+ off_t size = 0;
+ char *tscdata;
+ struct marker_info *info;
+
+ switch((enum marker_id)tf->event.event_id) {
+ case MARKER_ID_SET_MARKER_ID:
+ size = strlen((char*)tf->event.data) + 1;
+ //g_debug("marker %s id set", (char*)tf->event.data);
+ size += ltt_align(size, sizeof(guint16), tf->has_alignment);
+ size += sizeof(guint16);
+ size += sizeof(guint8);
+ size += sizeof(guint8);
+ size += sizeof(guint8);
+ size += sizeof(guint8);
+ size += sizeof(guint8);
+ break;
+ case MARKER_ID_SET_MARKER_FORMAT:
+ //g_debug("marker %s format set", (char*)tf->event.data);
+ size = strlen((char*)tf->event.data) + 1;
+ size += strlen((char*)tf->event.data) + 1;
+ break;
+ case MARKER_ID_HEARTBEAT_32:
+ //g_debug("Update Event heartbeat 32 bits");
+ size = ltt_align(size, sizeof(guint32), tf->has_alignment);
+ size += sizeof(guint32);
+ break;
+ case MARKER_ID_HEARTBEAT_64:
+ //g_debug("Update Event heartbeat 64 bits");
+ tscdata = (char*)(tf->event.data);
+ tf->event.tsc = ltt_get_uint64(LTT_GET_BO(tf), tscdata);
+ tf->buffer.tsc = tf->event.tsc;
+ tf->event.event_time = ltt_interpolate_time(tf, &tf->event);
+ size = ltt_align(size, sizeof(guint64), tf->has_alignment);
+ size += sizeof(guint64);
+ break;
+ default:
+ info = marker_get_info_from_id(tf->trace, tf->event.event_id);
+ g_assert(info != NULL);
+ if (info->size != -1) {
+ size = info->size;
+ } else {
+ size = marker_update_fields_offsets(marker_get_info_from_id(tf->trace,
+ tf->event.event_id), tf->event.data);
+ }
+ }
+
+ tf->event.data_size = size;
+
+ /* Check consistency between kernel and LTTV structure sizes */
+ if(tf->event.event_size == 0xFFFF) {
+ /* Event size too big to fit in the event size field */
+ tf->event.event_size = tf->event.data_size;
+ }
+ if (tf->event.data_size != tf->event.event_size) {
+ struct marker_info *info = marker_get_info_from_id(tf->trace,
+ tf->event.event_id);
+ g_error("Kernel/LTTV event size differs for event %s: kernel %u, LTTV %u",
+ g_quark_to_string(info->name),
+ tf->event.event_size, tf->event.data_size);
+ exit(-1);
+ }
+
+#if 0
+ LttEventType *event_type =
+ ltt_facility_eventtype_get(f, tf->event.event_id);
+
+ if(!event_type) {
+ g_warning("Unknown event id %hhu in facility %s in tracefile %s",
+ tf->event.event_id,
+ g_quark_to_string(f->name),
+ g_quark_to_string(tf->name));
+ goto event_type_error;
+ }
+
+ /* Compute the dynamic offsets */
+ compute_offsets(tf, f, event_type, &size, tf->event.data);
+
+ //g_debug("Event root field : f.e %hhu.%hhu size %zd",
+ // tf->event.facility_id,
+ // tf->event.event_id, size);
+
+no_offset:
+ tf->event.data_size = size;
+
+ /* Check consistency between kernel and LTTV structure sizes */
+ if(tf->event.event_size == 0xFFFF) {
+ /* Event size too big to fit in the event size field */
+ tf->event.event_size = tf->event.data_size;
+ }
+ if (tf->event.data_size != tf->event.event_size) {
+ g_error("Kernel/LTTV event size differs for event %s.%s: kernel %u, LTTV %u",
+ g_quark_to_string(f->name), g_quark_to_string(event_type->name),
+ tf->event.event_size, tf->event.data_size);
+ exit(-1);
+ }
+ //g_assert(tf->event.data_size == tf->event.event_size);
+
+ return;
+
+event_type_error:
+event_id_error:
+ if(tf->event.event_size == 0xFFFF) {
+ g_error("Cannot jump over an unknown event bigger than 0xFFFE bytes");
+ }
+ /* The facility is unknown : use the kernel information about this event
+ * to jump over it. */
+ tf->event.data_size = tf->event.event_size;
+#endif //0
+}
+
+
+/* Take the tf current event offset and use the event facility id and event id
+ * to figure out where is the next event offset.
+ *
+ * This is an internal function not aiming at being used elsewhere : it will
+ * not jump over the current block limits. Please consider using
+ * ltt_tracefile_read to do this.
+ *
+ * Returns 0 on success
+ * ERANGE if we are at the end of the buffer.
+ * ENOPROTOOPT if an error occured when getting the current event size.
+ */
+int ltt_seek_next_event(LttTracefile *tf)
+{
+ int ret = 0;
+ void *pos;
+
+ /* seek over the buffer header if we are at the buffer start */
+ if(tf->event.offset == 0) {
+ tf->event.offset += tf->buffer_header_size;
+
+ if(tf->event.offset == tf->buf_size - tf->buffer.lost_size) {
+ ret = ERANGE;