static gint map_block(LttTracefile * tf, guint block_num);
/* calculate nsec per cycles for current block */
-static double calc_nsecs_per_cycle(LttTracefile * t);
+#if 0
+static guint32 calc_nsecs_per_cycle(LttTracefile * t);
+static guint64 cycles_2_ns(LttTracefile *tf, guint64 cycles);
+#endif //0
/* go to the next event */
static int ltt_seek_next_event(LttTracefile *tf);
sizeof(struct ltt_block_start_header)
+ sizeof(struct ltt_trace_header_0_4);
if(t) {
+ t->start_freq = ltt_get_uint64(LTT_GET_BO(tf),
+ &vheader->start_freq);
+ t->start_tsc = ltt_get_uint64(LTT_GET_BO(tf),
+ &vheader->start_tsc);
t->start_monotonic = ltt_get_uint64(LTT_GET_BO(tf),
&vheader->start_monotonic);
t->start_time = ltt_get_time(LTT_GET_BO(tf),
g_assert(tf->trace->has_tsc);
- time = ltt_time_from_uint64(
- (guint64)(tf->buffer.tsc - tf->buffer.begin.cycle_count) *
- tf->buffer.nsecs_per_cycle);
- time = ltt_time_add(tf->buffer.begin.timestamp, time);
+// time = ltt_time_from_uint64(
+// cycles_2_ns(tf, (guint64)(tf->buffer.tsc - tf->buffer.begin.cycle_count)));
+ time = ltt_time_from_uint64((tf->buffer.tsc - tf->trace->start_tsc) * 1000000
+ / (double)tf->trace->start_freq);
+ //time = ltt_time_add(tf->buffer.begin.timestamp, time);
+ time = ltt_time_add(tf->trace->start_time, time);
return time;
}
header = (struct ltt_block_start_header*)tf->buffer.head;
+#if 0
tf->buffer.begin.timestamp = ltt_time_add(
ltt_time_from_uint64(
ltt_get_uint64(LTT_GET_BO(tf),
&header->begin.timestamp)
- tf->trace->start_monotonic),
tf->trace->start_time);
+#endif //0
//g_debug("block %u begin : %lu.%lu", block_num,
// tf->buffer.begin.timestamp.tv_sec, tf->buffer.begin.timestamp.tv_nsec);
tf->buffer.begin.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
&header->begin.cycle_count);
tf->buffer.begin.freq = ltt_get_uint64(LTT_GET_BO(tf),
&header->begin.freq);
+ tf->buffer.begin.timestamp = ltt_time_add(
+ ltt_time_from_uint64(
+ (tf->buffer.begin.cycle_count
+ - tf->trace->start_tsc) * 1000000
+ / (double)tf->trace->start_freq),
+ tf->trace->start_time);
+#if 0
+
tf->buffer.end.timestamp = ltt_time_add(
ltt_time_from_uint64(
ltt_get_uint64(LTT_GET_BO(tf),
&header->end.timestamp)
- tf->trace->start_monotonic),
tf->trace->start_time);
-
+#endif //0
//g_debug("block %u end : %lu.%lu", block_num,
// tf->buffer.end.timestamp.tv_sec, tf->buffer.end.timestamp.tv_nsec);
tf->buffer.end.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
&header->end.freq);
tf->buffer.lost_size = ltt_get_uint32(LTT_GET_BO(tf),
&header->lost_size);
-
+ tf->buffer.end.timestamp = ltt_time_add(
+ ltt_time_from_uint64(
+ (tf->buffer.end.cycle_count
+ - tf->trace->start_tsc) * 1000000
+ / (double)tf->trace->start_freq),
+ tf->trace->start_time);
+
tf->buffer.tsc = tf->buffer.begin.cycle_count;
tf->event.tsc = tf->buffer.tsc;
tf->buffer.freq = tf->buffer.begin.freq;
/* Now that the buffer is mapped, calculate the time interpolation for the
* block. */
- tf->buffer.nsecs_per_cycle = calc_nsecs_per_cycle(tf);
+// tf->buffer.nsecs_per_cycle = calc_nsecs_per_cycle(tf);
+ //tf->buffer.cyc2ns_scale = calc_nsecs_per_cycle(tf);
/* Make the current event point to the beginning of the buffer :
* it means that the event read must get the first event. */
return ENOPROTOOPT;
}
-
+#if 0
/*****************************************************************************
*Function name
* calc_nsecs_per_cycle : calculate nsecs per cycle for current block
****************************************************************************/
/* from timer_tsc.c */
#define CYC2NS_SCALE_FACTOR 10
-static double calc_nsecs_per_cycle(LttTracefile * tf)
+static guint32 calc_nsecs_per_cycle(LttTracefile * tf)
{
//return 1e6 / (double)tf->buffer.freq;
- guint64 cpu_mhz = tf->buffer.freq / 1000;
- guint64 cyc2ns_scale = (1000 << CYC2NS_SCALE_FACTOR)/cpu_mhz;
+ guint32 cpu_mhz = tf->buffer.freq / 1000;
+ guint32 cyc2ns_scale = (1000 << CYC2NS_SCALE_FACTOR)/cpu_mhz;
- return cyc2ns_scale >> CYC2NS_SCALE_FACTOR;
+ return cyc2ns_scale;
// return 1e6 / (double)tf->buffer.freq;
}
+
+static guint64 cycles_2_ns(LttTracefile *tf, guint64 cycles)
+{
+ return (cycles * tf->buffer.cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
+}
+#endif //0
+
#if 0
void setFieldsOffset(LttTracefile *tf, LttEventType *evT,void *evD)
{
LTTV_STATE_EXIT,
LTTV_STATE_ZOMBIE,
LTTV_STATE_WAIT,
- LTTV_STATE_RUN;
+ LTTV_STATE_RUN,
+ LTTV_STATE_DEAD;
static GQuark
LTTV_STATE_TRACEFILES,
if(unlikely(process->state->s == LTTV_STATE_EXIT)) {
process->state->s = LTTV_STATE_ZOMBIE;
+ process->state->change = s->parent.timestamp;
} else {
if(unlikely(state_out == 0)) process->state->s = LTTV_STATE_WAIT_CPU;
else process->state->s = LTTV_STATE_WAIT;
- } /* FIXME : we do not remove process here, because the kernel
- * still has them : they may be zombies. We need to know
- * exactly when release_task is executed on the PID to
- * know when the zombie is destroyed.
- */
- //else
- // exit_process(s, process);
-
- process->state->change = s->parent.timestamp;
+ process->state->change = s->parent.timestamp;
+ }
+
+ if(state_out == 32)
+ exit_process(s, process); /* EXIT_DEAD */
+ /* see sched.h for states */
}
process = ts->running_process[cpu] =
lttv_state_find_process_or_create(
* in a SMP case where we don't have enough precision on the clocks.
*
* Test reenabled after precision fixes on time. (Mathieu) */
-
+#if 0
zombie_process = lttv_state_find_process(ts, ANY_CPU, child_pid);
if(unlikely(zombie_process != NULL)) {
exit_process(s, zombie_process);
}
-
+#endif //0
g_assert(process->pid != child_pid);
// FIXME : Add this test in the "known state" section
// g_assert(process->pid == parent_pid);
child_pid, &s->parent.timestamp);
} else {
/* The process has already been created : due to time imprecision between
- * multiple CPUs : it has been scheduled in before creation.
+ * multiple CPUs : it has been scheduled in before creation. Note that we
+ * shouldn't have this kind of imprecision.
*
* Simply put a correct parent.
*/
+ g_assert(0); /* This is a problematic case : the process has been created
+ before the fork event */
child_process->ppid = process->pid;
}
//Clearly due to time imprecision, we disable it. (Mathieu)
//If this weird case happen, we have no choice but to put the
//Currently running process on the cpu to 0.
+ //I re-enable it following time precision fixes. (Mathieu)
+ //Well, in the case where an process is freed by a process on another CPU
+ //and still scheduled, it happens that this is the schedchange that will
+ //drop the last reference count. Do not free it here!
guint num_cpus = ltt_trace_get_num_cpu(ts->parent.t);
guint i;
for(i=0; i< num_cpus; i++) {
//g_assert(process != ts->running_process[i]);
if(process == ts->running_process[i]) {
- ts->running_process[i] = lttv_state_find_process(ts, i, 0);
+ //ts->running_process[i] = lttv_state_find_process(ts, i, 0);
+ break;
}
}
- exit_process(s, process);
+ if(i == num_cpus) /* process is not scheduled */
+ exit_process(s, process);
}
return FALSE;
LTTV_STATE_ZOMBIE = g_quark_from_string("zombie");
LTTV_STATE_WAIT = g_quark_from_string("wait for I/O");
LTTV_STATE_RUN = g_quark_from_string("running");
+ LTTV_STATE_DEAD = g_quark_from_string("dead");
LTTV_STATE_TRACEFILES = g_quark_from_string("tracefiles");
LTTV_STATE_PROCESSES = g_quark_from_string("processes");
LTTV_STATE_PROCESS = g_quark_from_string("process");