+ /* Multiple of pages aligned head */
+ tf->buffer.head = mmap(0,
+ PAGE_ALIGN(tf->buf_size),
+ PROT_READ, MAP_PRIVATE, tf->fd,
+ PAGE_ALIGN((off_t)tf->buf_size * (off_t)block_num));
+
+ if(tf->buffer.head == MAP_FAILED) {
+ perror("Error in allocating memory for buffer of tracefile");
+ g_assert(0);
+ goto map_error;
+ }
+ g_assert( ( (guint)tf->buffer.head&(8-1) ) == 0); // make sure it's aligned.
+
+
+ tf->buffer.index = block_num;
+
+ header = (struct ltt_block_start_header*)tf->buffer.head;
+
+#if 0
+ tf->buffer.begin.timestamp = ltt_time_add(
+ ltt_time_from_uint64(
+ ltt_get_uint64(LTT_GET_BO(tf),
+ &header->begin.timestamp)
+ - tf->trace->start_monotonic),
+ tf->trace->start_time);
+#endif //0
+ //g_debug("block %u begin : %lu.%lu", block_num,
+ // tf->buffer.begin.timestamp.tv_sec, tf->buffer.begin.timestamp.tv_nsec);
+ tf->buffer.begin.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
+ &header->begin.cycle_count);
+ tf->buffer.begin.freq = ltt_get_uint64(LTT_GET_BO(tf),
+ &header->begin.freq);
+ if(tf->buffer.begin.freq == 0)
+ tf->buffer.begin.freq = tf->trace->start_freq;
+
+ tf->buffer.begin.timestamp = ltt_interpolate_time_from_tsc(tf,
+ tf->buffer.begin.cycle_count);
+#if 0
+ ltt_time_add(
+ ltt_time_from_uint64(
+ (double)(tf->buffer.begin.cycle_count
+ - tf->trace->start_tsc) * 1000000.0
+ / (double)tf->trace->start_freq),
+ tf->trace->start_time_from_tsc);
+#endif //0
+#if 0
+
+ tf->buffer.end.timestamp = ltt_time_add(
+ ltt_time_from_uint64(
+ ltt_get_uint64(LTT_GET_BO(tf),
+ &header->end.timestamp)
+ - tf->trace->start_monotonic),
+ tf->trace->start_time);
+#endif //0
+ //g_debug("block %u end : %lu.%lu", block_num,
+ // tf->buffer.end.timestamp.tv_sec, tf->buffer.end.timestamp.tv_nsec);
+ tf->buffer.end.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
+ &header->end.cycle_count);
+ tf->buffer.end.freq = ltt_get_uint64(LTT_GET_BO(tf),
+ &header->end.freq);
+ if(tf->buffer.end.freq == 0)
+ tf->buffer.end.freq = tf->trace->start_freq;
+
+ tf->buffer.lost_size = ltt_get_uint32(LTT_GET_BO(tf),
+ &header->lost_size);
+ tf->buffer.end.timestamp = ltt_interpolate_time_from_tsc(tf,
+ tf->buffer.end.cycle_count);
+#if 0
+ ltt_time_add(
+ ltt_time_from_uint64(
+ (double)(tf->buffer.end.cycle_count
+ - tf->trace->start_tsc) * 1000000.0
+ / (double)tf->trace->start_freq),
+ tf->trace->start_time_from_tsc);
+#endif //0
+ tf->buffer.tsc = tf->buffer.begin.cycle_count;
+ tf->event.tsc = tf->buffer.tsc;
+ tf->buffer.freq = tf->buffer.begin.freq;
+
+ /* FIXME
+ * eventually support variable buffer size : will need a partial pre-read of
+ * the headers to create an index when we open the trace... eventually. */
+ g_assert(tf->buf_size == ltt_get_uint32(LTT_GET_BO(tf),
+ &header->buf_size));
+
+ /* Now that the buffer is mapped, calculate the time interpolation for the
+ * block. */
+
+// tf->buffer.nsecs_per_cycle = calc_nsecs_per_cycle(tf);
+ //tf->buffer.cyc2ns_scale = calc_nsecs_per_cycle(tf);
+
+ /* Make the current event point to the beginning of the buffer :
+ * it means that the event read must get the first event. */
+ tf->event.tracefile = tf;
+ tf->event.block = block_num;
+ tf->event.offset = 0;
+
+ return 0;
+
+map_error:
+ return -errno;
+
+}
+
+/* It will update the fields offsets too */
+void ltt_update_event_size(LttTracefile *tf)
+{
+ off_t size = 0;
+ LttFacility *f = ltt_trace_get_facility_by_num(tf->trace,
+ tf->event.facility_id);
+ char *tscdata;
+
+ if(!f->exists) {
+ /* Specific handling of core events : necessary to read the facility control
+ * tracefile. */
+
+ if(likely(tf->event.facility_id == LTT_FACILITY_CORE)) {
+ switch((enum ltt_core_events)tf->event.event_id) {
+ case LTT_EVENT_FACILITY_LOAD:
+ size = strlen((char*)tf->event.data) + 1;
+ //g_debug("Update Event facility load of facility %s", (char*)tf->event.data);
+ size += ltt_align(size, sizeof(guint32), tf->has_alignment);
+ size += sizeof(struct LttFacilityLoad);
+ break;
+ case LTT_EVENT_FACILITY_UNLOAD:
+ //g_debug("Update Event facility unload");
+ size = sizeof(struct LttFacilityUnload);
+ break;
+ case LTT_EVENT_STATE_DUMP_FACILITY_LOAD:
+ size = strlen((char*)tf->event.data) + 1;
+ size += ltt_align(size, sizeof(guint32), tf->has_alignment);
+ //g_debug("Update Event facility load state dump of facility %s",
+ // (char*)tf->event.data);
+ size += sizeof(struct LttStateDumpFacilityLoad);
+ break;
+ case LTT_EVENT_HEARTBEAT:
+ //g_debug("Update Event heartbeat");
+ size = sizeof(TimeHeartbeat);
+ break;
+ case LTT_EVENT_HEARTBEAT_FULL:
+ //g_debug("Update Event heartbeat full");
+ tscdata = (char*)(tf->event.data);
+ tf->event.tsc = ltt_get_uint64(LTT_GET_BO(tf), tscdata);
+ tf->buffer.tsc = tf->event.tsc;
+ tf->event.event_time = ltt_interpolate_time(tf, &tf->event);
+ size = sizeof(TimeHeartbeatFull);
+ size += ltt_align(size, sizeof(guint64), tf->has_alignment);
+ break;
+ default:
+ g_warning("Error in getting event size : tracefile %s, "
+ "unknown event id %hhu in core facility.",
+ g_quark_to_string(tf->name),
+ tf->event.event_id);
+ goto event_id_error;
+
+ }
+ goto no_offset; /* Skip the field computation */
+ } else {
+ g_warning("Unknown facility %hhu (0x%hhx) in tracefile %s",
+ tf->event.facility_id,
+ tf->event.facility_id,
+ g_quark_to_string(tf->name));
+ goto facility_error;
+ }
+ }
+
+ LttEventType *event_type =
+ ltt_facility_eventtype_get(f, tf->event.event_id);
+
+ if(!event_type) {
+ g_warning("Unknown event id %hhu in facility %s in tracefile %s",
+ tf->event.event_id,
+ g_quark_to_string(f->name),
+ g_quark_to_string(tf->name));
+ goto event_type_error;