+ int page_size = getpagesize();
+ struct ltt_block_start_header *header;
+
+ g_assert(block_num < tf->num_blocks);
+
+ if(tf->buffer.head != NULL) {
+ if(munmap(tf->buffer.head, PAGE_ALIGN(tf->buf_size))) {
+ g_warning("unmap size : %u\n",
+ PAGE_ALIGN(tf->buf_size));
+ perror("munmap error");
+ g_assert(0);
+ }
+ }
+
+
+ /* Multiple of pages aligned head */
+ tf->buffer.head = mmap(0,
+ PAGE_ALIGN(tf->buf_size),
+ PROT_READ, MAP_PRIVATE, tf->fd,
+ PAGE_ALIGN((off_t)tf->buf_size * (off_t)block_num));
+
+ if(tf->buffer.head == MAP_FAILED) {
+ perror("Error in allocating memory for buffer of tracefile");
+ g_assert(0);
+ goto map_error;
+ }
+ g_assert( ( (guint)tf->buffer.head&(8-1) ) == 0); // make sure it's aligned.
+
+
+ tf->buffer.index = block_num;
+
+ header = (struct ltt_block_start_header*)tf->buffer.head;
+
+#if 0
+ tf->buffer.begin.timestamp = ltt_time_add(
+ ltt_time_from_uint64(
+ ltt_get_uint64(LTT_GET_BO(tf),
+ &header->begin.timestamp)
+ - tf->trace->start_monotonic),
+ tf->trace->start_time);
+#endif //0
+ //g_debug("block %u begin : %lu.%lu", block_num,
+ // tf->buffer.begin.timestamp.tv_sec, tf->buffer.begin.timestamp.tv_nsec);
+ tf->buffer.begin.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
+ &header->begin.cycle_count);
+ tf->buffer.begin.freq = ltt_get_uint64(LTT_GET_BO(tf),
+ &header->begin.freq);
+ if(tf->buffer.begin.freq == 0)
+ tf->buffer.begin.freq = tf->trace->start_freq;
+
+ tf->buffer.begin.timestamp = ltt_interpolate_time_from_tsc(tf,
+ tf->buffer.begin.cycle_count);
+#if 0
+ ltt_time_add(
+ ltt_time_from_uint64(
+ (double)(tf->buffer.begin.cycle_count
+ - tf->trace->start_tsc) * 1000000.0
+ / (double)tf->trace->start_freq),
+ tf->trace->start_time_from_tsc);
+#endif //0
+#if 0
+
+ tf->buffer.end.timestamp = ltt_time_add(
+ ltt_time_from_uint64(
+ ltt_get_uint64(LTT_GET_BO(tf),
+ &header->end.timestamp)
+ - tf->trace->start_monotonic),
+ tf->trace->start_time);
+#endif //0
+ //g_debug("block %u end : %lu.%lu", block_num,
+ // tf->buffer.end.timestamp.tv_sec, tf->buffer.end.timestamp.tv_nsec);
+ tf->buffer.end.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
+ &header->end.cycle_count);
+ tf->buffer.end.freq = ltt_get_uint64(LTT_GET_BO(tf),
+ &header->end.freq);
+ if(tf->buffer.end.freq == 0)
+ tf->buffer.end.freq = tf->trace->start_freq;
+
+ tf->buffer.lost_size = ltt_get_uint32(LTT_GET_BO(tf),
+ &header->lost_size);
+ tf->buffer.end.timestamp = ltt_interpolate_time_from_tsc(tf,
+ tf->buffer.end.cycle_count);
+#if 0
+ ltt_time_add(
+ ltt_time_from_uint64(
+ (double)(tf->buffer.end.cycle_count
+ - tf->trace->start_tsc) * 1000000.0
+ / (double)tf->trace->start_freq),
+ tf->trace->start_time_from_tsc);
+#endif //0
+ tf->buffer.tsc = tf->buffer.begin.cycle_count;
+ tf->event.tsc = tf->buffer.tsc;
+ tf->buffer.freq = tf->buffer.begin.freq;
+
+ /* FIXME
+ * eventually support variable buffer size : will need a partial pre-read of
+ * the headers to create an index when we open the trace... eventually. */
+ g_assert(tf->buf_size == ltt_get_uint32(LTT_GET_BO(tf),
+ &header->buf_size));
+
+ /* Now that the buffer is mapped, calculate the time interpolation for the
+ * block. */
+
+// tf->buffer.nsecs_per_cycle = calc_nsecs_per_cycle(tf);
+ //tf->buffer.cyc2ns_scale = calc_nsecs_per_cycle(tf);
+
+ /* Make the current event point to the beginning of the buffer :
+ * it means that the event read must get the first event. */
+ tf->event.tracefile = tf;
+ tf->event.block = block_num;
+ tf->event.offset = 0;
+
+ return 0;
+
+map_error:
+ return -errno;
+
+}
+
+/* It will update the fields offsets too */
+void ltt_update_event_size(LttTracefile *tf)
+{
+ off_t size = 0;
+ LttFacility *f = ltt_trace_get_facility_by_num(tf->trace,
+ tf->event.facility_id);
+ char *tscdata;
+
+ if(!f->exists) {
+ /* Specific handling of core events : necessary to read the facility control
+ * tracefile. */
+
+ if(likely(tf->event.facility_id == LTT_FACILITY_CORE)) {
+ switch((enum ltt_core_events)tf->event.event_id) {
+ case LTT_EVENT_FACILITY_LOAD:
+ size = strlen((char*)tf->event.data) + 1;
+ //g_debug("Update Event facility load of facility %s", (char*)tf->event.data);
+ size += ltt_align(size, sizeof(guint32), tf->has_alignment);
+ size += sizeof(struct LttFacilityLoad);
+ break;
+ case LTT_EVENT_FACILITY_UNLOAD:
+ //g_debug("Update Event facility unload");
+ size = sizeof(struct LttFacilityUnload);
+ break;
+ case LTT_EVENT_STATE_DUMP_FACILITY_LOAD:
+ size = strlen((char*)tf->event.data) + 1;
+ size += ltt_align(size, sizeof(guint32), tf->has_alignment);
+ //g_debug("Update Event facility load state dump of facility %s",
+ // (char*)tf->event.data);
+ size += sizeof(struct LttStateDumpFacilityLoad);
+ break;
+ case LTT_EVENT_HEARTBEAT:
+ //g_debug("Update Event heartbeat");
+ size = sizeof(TimeHeartbeat);
+ break;
+ case LTT_EVENT_HEARTBEAT_FULL:
+ //g_debug("Update Event heartbeat full");
+ tscdata = (char*)(tf->event.data);
+ tf->event.tsc = ltt_get_uint64(LTT_GET_BO(tf), tscdata);
+ tf->buffer.tsc = tf->event.tsc;
+ tf->event.event_time = ltt_interpolate_time(tf, &tf->event);
+ size = sizeof(TimeHeartbeatFull);
+ size += ltt_align(size, sizeof(guint64), tf->has_alignment);
+ break;
+ default:
+ g_warning("Error in getting event size : tracefile %s, "
+ "unknown event id %hhu in core facility.",
+ g_quark_to_string(tf->name),
+ tf->event.event_id);
+ goto event_id_error;
+
+ }
+ goto no_offset; /* Skip the field computation */
+ } else {
+ g_warning("Unknown facility %hhu (0x%hhx) in tracefile %s",
+ tf->event.facility_id,
+ tf->event.facility_id,
+ g_quark_to_string(tf->name));
+ goto facility_error;
+ }
+ }
+
+ LttEventType *event_type =
+ ltt_facility_eventtype_get(f, tf->event.event_id);
+
+ if(!event_type) {
+ g_warning("Unknown event id %hhu in facility %s in tracefile %s",
+ tf->event.event_id,
+ g_quark_to_string(f->name),
+ g_quark_to_string(tf->name));
+ goto event_type_error;
+ }
+
+ /* Compute the dynamic offsets */
+ compute_offsets(tf, f, event_type, &size, tf->event.data);
+
+ //g_debug("Event root field : f.e %hhu.%hhu size %zd",
+ // tf->event.facility_id,
+ // tf->event.event_id, size);
+
+no_offset:
+ tf->event.data_size = size;
+
+ /* Check consistency between kernel and LTTV structure sizes */
+ if(tf->event.event_size == 0xFFFF) {
+ /* Event size too big to fit in the event size field */
+ tf->event.event_size = tf->event.data_size;
+ }
+ if (tf->event.data_size != tf->event.event_size) {
+ g_error("Kernel/LTTV event size differs for event %s.%s: kernel %u, LTTV %u",
+ g_quark_to_string(f->name), g_quark_to_string(event_type->name),
+ tf->event.event_size, tf->event.data_size);
+ exit(-1);
+ }
+ //g_assert(tf->event.data_size == tf->event.event_size);
+
+ return;
+
+facility_error:
+event_type_error:
+event_id_error:
+ if(tf->event.event_size == 0xFFFF) {
+ g_error("Cannot jump over an unknown event bigger than 0xFFFE bytes");
+ }
+ /* The facility is unknown : use the kernel information about this event
+ * to jump over it. */
+ tf->event.data_size = tf->event.event_size;
+}
+
+
+/* Take the tf current event offset and use the event facility id and event id
+ * to figure out where is the next event offset.
+ *
+ * This is an internal function not aiming at being used elsewhere : it will
+ * not jump over the current block limits. Please consider using
+ * ltt_tracefile_read to do this.
+ *
+ * Returns 0 on success
+ * ERANGE if we are at the end of the buffer.
+ * ENOPROTOOPT if an error occured when getting the current event size.
+ */
+static int ltt_seek_next_event(LttTracefile *tf)
+{
+ int ret = 0;
+ void *pos;
+
+ /* seek over the buffer header if we are at the buffer start */
+ if(tf->event.offset == 0) {
+ tf->event.offset += tf->buffer_header_size;
+
+ if(tf->event.offset == tf->buf_size - tf->buffer.lost_size) {
+ ret = ERANGE;
+ }
+ goto found;
+ }