+static gint map_block(LttTracefile * tf, guint block_num)
+{
+ int page_size = getpagesize();
+ struct ltt_block_start_header *header;
+
+ g_assert(block_num < tf->num_blocks);
+
+ if(tf->buffer.head != NULL) {
+ if(munmap(tf->buffer.head, PAGE_ALIGN(tf->buf_size))) {
+ g_warning("unmap size : %u\n",
+ PAGE_ALIGN(tf->buf_size));
+ perror("munmap error");
+ g_assert(0);
+ }
+ }
+
+
+ /* Multiple of pages aligned head */
+ tf->buffer.head = mmap(0,
+ PAGE_ALIGN(tf->buf_size),
+ PROT_READ, MAP_PRIVATE, tf->fd,
+ PAGE_ALIGN((off_t)tf->buf_size * (off_t)block_num));
+
+ if(tf->buffer.head == MAP_FAILED) {
+ perror("Error in allocating memory for buffer of tracefile");
+ g_assert(0);
+ goto map_error;
+ }
+ g_assert( ( (guint)tf->buffer.head&(8-1) ) == 0); // make sure it's aligned.
+
+
+ tf->buffer.index = block_num;
+
+ header = (struct ltt_block_start_header*)tf->buffer.head;
+
+#if 0
+ tf->buffer.begin.timestamp = ltt_time_add(
+ ltt_time_from_uint64(
+ ltt_get_uint64(LTT_GET_BO(tf),
+ &header->begin.timestamp)
+ - tf->trace->start_monotonic),
+ tf->trace->start_time);
+#endif //0
+ //g_debug("block %u begin : %lu.%lu", block_num,
+ // tf->buffer.begin.timestamp.tv_sec, tf->buffer.begin.timestamp.tv_nsec);
+ tf->buffer.begin.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
+ &header->begin.cycle_count);
+ tf->buffer.begin.freq = ltt_get_uint64(LTT_GET_BO(tf),
+ &header->begin.freq);
+ if(tf->buffer.begin.freq == 0)
+ tf->buffer.begin.freq = tf->trace->start_freq;
+
+ tf->buffer.begin.timestamp = ltt_interpolate_time_from_tsc(tf,
+ tf->buffer.begin.cycle_count);
+#if 0
+ ltt_time_add(
+ ltt_time_from_uint64(
+ (double)(tf->buffer.begin.cycle_count
+ - tf->trace->start_tsc) * 1000000.0
+ / (double)tf->trace->start_freq),
+ tf->trace->start_time_from_tsc);
+#endif //0
+#if 0
+
+ tf->buffer.end.timestamp = ltt_time_add(
+ ltt_time_from_uint64(
+ ltt_get_uint64(LTT_GET_BO(tf),
+ &header->end.timestamp)
+ - tf->trace->start_monotonic),
+ tf->trace->start_time);
+#endif //0
+ //g_debug("block %u end : %lu.%lu", block_num,
+ // tf->buffer.end.timestamp.tv_sec, tf->buffer.end.timestamp.tv_nsec);
+ tf->buffer.end.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
+ &header->end.cycle_count);
+ tf->buffer.end.freq = ltt_get_uint64(LTT_GET_BO(tf),
+ &header->end.freq);
+ if(tf->buffer.end.freq == 0)
+ tf->buffer.end.freq = tf->trace->start_freq;
+
+ tf->buffer.lost_size = ltt_get_uint32(LTT_GET_BO(tf),
+ &header->lost_size);
+ tf->buffer.end.timestamp = ltt_interpolate_time_from_tsc(tf,
+ tf->buffer.end.cycle_count);
+#if 0
+ ltt_time_add(
+ ltt_time_from_uint64(
+ (double)(tf->buffer.end.cycle_count
+ - tf->trace->start_tsc) * 1000000.0
+ / (double)tf->trace->start_freq),
+ tf->trace->start_time_from_tsc);
+#endif //0
+ tf->buffer.tsc = tf->buffer.begin.cycle_count;
+ tf->event.tsc = tf->buffer.tsc;
+ tf->buffer.freq = tf->buffer.begin.freq;
+
+ /* FIXME
+ * eventually support variable buffer size : will need a partial pre-read of
+ * the headers to create an index when we open the trace... eventually. */
+ g_assert(tf->buf_size == ltt_get_uint32(LTT_GET_BO(tf),
+ &header->buf_size));
+
+ /* Now that the buffer is mapped, calculate the time interpolation for the
+ * block. */
+
+// tf->buffer.nsecs_per_cycle = calc_nsecs_per_cycle(tf);
+ //tf->buffer.cyc2ns_scale = calc_nsecs_per_cycle(tf);
+
+ /* Make the current event point to the beginning of the buffer :
+ * it means that the event read must get the first event. */
+ tf->event.tracefile = tf;
+ tf->event.block = block_num;
+ tf->event.offset = 0;
+
+ return 0;
+
+map_error:
+ return -errno;
+
+}
+
+/* It will update the fields offsets too */
+void ltt_update_event_size(LttTracefile *tf)