#define DIR_NAME_SIZE 256
#define __UNUSED__ __attribute__((__unused__))
+#define g_info(format...) g_log (G_LOG_DOMAIN, G_LOG_LEVEL_INFO, format)
+#define g_debug(format...) g_log (G_LOG_DOMAIN, G_LOG_LEVEL_DEBUG, format)
+
/* obtain the time of an event */
return count;
}
+/* FIXME : performances could be improved with a better design for this
+ * function */
LttFacility * ltt_trace_facility_by_id(LttTrace * trace, unsigned id)
{
LttFacility * facility = NULL;
for(i=0;i<trace->facility_number;i++){
LttFacility *iter_facility =
(LttFacility*) g_ptr_array_index(trace->facilities,i);
- if(id >= iter_facility->base_id &&
- id < iter_facility->base_id + iter_facility->event_number) {
+ if(unlikely(id >= iter_facility->base_id &&
+ id < iter_facility->base_id + iter_facility->event_number)) {
facility = iter_facility;
break;
}
void ltt_tracefile_seek_position(LttTracefile *t, const LttEventPosition *ep)
{
//if we are at the right place, just return
- if(t->which_block == ep->block_num && t->which_event == ep->event_num)
+ if(likely(t->which_block == ep->block_num && t->which_event == ep->event_num))
return;
- if(t->which_block == ep->block_num) updateTracefile(t);
+ if(likely(t->which_block == ep->block_num)) updateTracefile(t);
else readBlock(t,ep->block_num);
//event offset is available
- if(ep->old_position){
+ if(likely(ep->old_position)){
int err;
t->which_event = ep->event_num;
t->prev_event_time = ep->prev_event_time;
t->pre_cycle_count = ep->pre_cycle_count;
t->count = ep->count;
+ t->overflow_nsec = ep->overflow_nsec;
/* end of workaround */
//update the fields of the current event and go to the next event
err = skipEvent(t);
- if(err == ERANGE) g_error("event id is out of range\n");
+ if(unlikely(err == ERANGE)) g_error("event id is out of range\n");
return;
}
g_warning("using slow O(n) tracefile seek position");
LttEvent event;
- while(t->which_event < ep->event_num) ltt_tracefile_read(t, &event);
+ while(likely(t->which_event < ep->event_num)) ltt_tracefile_read(t, &event);
return;
}
{
int err;
- if(t->cur_event_pos == t->buffer + t->block_size){
- if(t->which_block == t->block_number){
+ if(unlikely(t->cur_event_pos == t->buffer + t->block_size)){
+ if(unlikely(t->which_block == t->block_number)){
return NULL;
}
err = readBlock(t, t->which_block + 1);
- if(err)g_error("Can not read tracefile");
+ if(unlikely(err))g_error("Can not read tracefile");
}
event->event_id = (int)(*(guint16 *)(t->cur_event_pos));
- if(event->event_id == TRACE_TIME_HEARTBEAT)
+ if(unlikely(event->event_id == TRACE_TIME_HEARTBEAT))
t->cur_heart_beat_number++;
t->prev_event_time = t->current_event_time;
event->prev_event_time = t->prev_event_time;
event->pre_cycle_count = t->pre_cycle_count;
event->count = t->count;
+ event->overflow_nsec = t->overflow_nsec;
+
/* end of workaround */
//update the fields of the current event and go to the next event
err = skipEvent(t);
- if(err == ERANGE) g_error("event id is out of range\n");
+ if(unlikely(err == ERANGE)) g_error("event id is out of range\n");
return event;
}
return 0;
}
+/*****************************************************************************
+ *Function name
+ * skipEvent_pre_read_cycles : go to the next event,
+ * update the necessary fields of the current event
+ * increment the cycle counter, save it at the end.
+ *Input params
+ * t : tracefile
+ *return value
+ * 0 : success
+ * ERANGE : event id is out of range
+ ****************************************************************************/
+
+int skipEvent_pre_read_cycles(LttTracefile * t)
+{
+ int evId;
+ void * evData;
+ LttEventType * evT;
+ LttField * rootFld;
+
+ evId = (int)(*(guint16 *)(t->cur_event_pos));
+ evData = t->cur_event_pos + EVENT_HEADER_SIZE;
+
+ evT = ltt_trace_eventtype_get(t->trace,(unsigned)evId);
+
+ if(likely(evT)) rootFld = evT->root_field;
+ else return ERANGE;
+
+ if(likely(rootFld)){
+ //event has string/sequence or the last event is not the same event
+ if(likely((evT->latest_block!=t->which_block || evT->latest_event!=t->which_event)
+ && rootFld->field_fixed == 0)){
+ setFieldsOffset(t, evT, evData, t->trace);
+ }
+ t->cur_event_pos += EVENT_HEADER_SIZE + rootFld->field_size;
+ }else t->cur_event_pos += EVENT_HEADER_SIZE;
+
+ //evT->latest_block = t->which_block;
+ //evT->latest_event = t->which_event;
+
+ //the next event is in the next block
+ //if(unlikely(evId == TRACE_BLOCK_END)){
+ // Specify end of buffer reached.
+ // t->cur_event_pos = t->buffer + t->block_size;
+ //}else{
+ //g_critical("COUNT : %lu", t->cur_cycle_count);
+ //t->which_event++;
+ // t->current_event_time = getEventTime(t);
+ //}
+
+ return 0;
+}
+
+
+
+
+/*****************************************************************************
+ *Function name
+ * ltt_tracefile_pre_read_cycles :
+ * read the current event, increment the cycle counter
+ *Input params
+ * t : tracefile
+ *Return value
+ * False : end of bloc reached
+ ****************************************************************************/
+
+gboolean ltt_tracefile_pre_read_cycles(LttTracefile *tf)
+{
+ int err;
+ //LttEvent event;
+
+ // if(unlikely(t->cur_event_pos == t->buffer + t->block_size)){
+ //if(unlikely(t->which_block == t->block_number)){
+ // return FALSE;
+ //}
+ // return FALSE; // end of bloc reached
+ //err = readBlock(t, t->which_block + 1);
+ //if(unlikely(err))g_error("Can not read tracefile");
+ //}
+
+ //event.event_id = (int)(*(guint16 *)(t->cur_event_pos));
+ //if(unlikely(event.event_id == TRACE_TIME_HEARTBEAT))
+ // t->cur_heart_beat_number++;
+
+ //t->prev_event_time = t->current_event_time;
+ // t->current_event_time = getEventTime(t);
+
+ //event.time_delta = *(guint32 *)(t->cur_event_pos + EVENT_ID_SIZE);
+ //event.event_time = t->current_event_time;
+ //event.event_cycle_count = t->cur_cycle_count;
+
+ //event.tracefile = t;
+ //event.data = t->cur_event_pos + EVENT_HEADER_SIZE;
+ //event.which_block = t->which_block;
+ //event.which_event = t->which_event;
+
+ /* This is a workaround for fast position seek */
+ //event.last_event_pos = t->last_event_pos;
+ //event.prev_block_end_time = t->prev_block_end_time;
+ //event.prev_event_time = t->prev_event_time;
+ //event.pre_cycle_count = t->pre_cycle_count;
+ //event.count = t->count;
+ /* end of workaround */
+
+
+ /* Increment the cycle counter for the bloc */
+ LttTime time;
+ LttCycleCount cycle_count; // cycle count for the current event
+ LttCycleCount lEventTotalCycle; // Total cycles from start for event
+ LttCycleCount lEventNSec; // Total nsecs from start for event
+ LttTime lTimeOffset; // Time offset in struct LttTime
+ guint16 evId;
+
+ evId = *(guint16 *)tf->cur_event_pos;
+
+ // Calculate total time in cycles from start of buffer for this event
+ cycle_count = (LttCycleCount)*(guint32 *)(tf->cur_event_pos + EVENT_ID_SIZE);
+ //g_debug("event cycle count %llu", cycle_count);
+ //
+ //gint64 delta_count = (gint64)(cycle_count - tf->pre_cycle_count);
+ //LttCycleCount res_delta_count;
+ gboolean comp_count = cycle_count < tf->pre_cycle_count;
+ tf->pre_cycle_count = cycle_count;
+
+ if(unlikely(comp_count)) {
+ /* Wrapped */
+ tf->count++; //increment wrap count
+ }
+
+ //if(unlikely(cycle_count < tf->pre_cycle_count)) tf->count++;
+ //if(unlikely(delta_count < 0)) {
+ // tf->count++; //increment wrap count
+ // keep in mind that delta_count is negative here.
+ // res_delta_count = delta_count + 0x100000000ULL ;
+ //} else
+ // res_delta_count = (LttCycleCount)delta_count;
+
+ //cycle_count += (LttCycleCount)tf->count << 32;
+
+ //FIXME (MD)
+ // if(tf->cur_heart_beat_number > tf->count)
+ // cycle_count += (tf->cur_heart_beat_number - tf->count) << 32;
+
+ //tf->cur_cycle_count = tf->cur_cycle_count + res_delta_count;
+ tf->cur_cycle_count = cycle_count | ((LttCycleCount)tf->count << 32);
+ //g_debug("cur cycle count %llu", tf->cur_cycle_count);
+
+
+
+
+ if(unlikely(evId == TRACE_BLOCK_START)){
+ //g_debug("BLOCK START");
+ }else if(unlikely(evId == TRACE_BLOCK_END)){
+ //g_debug("BLOCK END");
+
+ /* The goal of all this pre reading */
+ tf->a_block_end->cycle_count = tf->cur_cycle_count;
+ //g_debug("end of block cycle count : %llu", tf->cur_cycle_count);
+
+ return FALSE;
+ }
+
+ //update the fields of the current event and go to the next event
+ err = skipEvent_pre_read_cycles(tf);
+ if(unlikely(err == ERANGE)) g_error("event id is out of range\n");
+
+
+ return TRUE;
+}
+
/****************************************************************************
*Function name
* readBlock : read a block from the file
off_t nbBytes;
guint32 lostSize;
- if(whichBlock - tf->which_block == 1 && tf->which_block != 0){
+ /* same block already opened requested */
+ if((guint)whichBlock == tf->which_block) return 0;
+
+ if(likely(whichBlock - tf->which_block == 1 && tf->which_block != 0)){
tf->prev_block_end_time = tf->a_block_end->time;
tf->prev_event_time = tf->a_block_end->time;
}else{
}
nbBytes=lseek(tf->fd,(off_t)((whichBlock-1)*tf->block_size), SEEK_SET);
- if(nbBytes == -1) return EINVAL;
+ if(unlikely(nbBytes == -1)) return EINVAL;
- if(readFile(tf->fd,tf->buffer,tf->block_size,"Unable to read a block"))
+ if(unlikely(readFile(tf->fd,tf->buffer,tf->block_size,"Unable to read a block")))
return EIO;
tf->a_block_start=(BlockStart *) (tf->buffer + EVENT_HEADER_SIZE);
tf->which_event = 1;
tf->cur_event_pos = tf->buffer;//the beginning of the block, block start ev
tf->cur_heart_beat_number = 0;
+
+ /* read the whole block to precalculate total of cycles in it */
+ tf->count = 0;
+ tf->pre_cycle_count = 0;
+ tf->cur_cycle_count = 0;
+ //g_debug("precalculating cycles begin for block %i", whichBlock);
+ while(likely(ltt_tracefile_pre_read_cycles(tf)));
+ /* Rough approximation of cycles per usec to calculate
+ * the real block start and end time.
+ */
+ getCyclePerNsec(tf);
+ /* we are at end position, make end time more precise */
+ /* Start overflow_nsec to a negative value : takes account of the
+ * start of block cycle counter */
+ tf->overflow_nsec = (-((double)tf->a_block_start->cycle_count)
+ * tf->nsec_per_cycle);
+ /* put back the numbers corresponding to end time */
+ tf->overflow_nsec += tf->one_overflow_nsec * tf->count;
+
+ tf->a_block_end->time = getEventTime(tf);
+
+ //g_debug("precalculating cycles end for block %i", whichBlock);
+ /* put back pointer at the beginning */
+ tf->count = 0;
+ tf->pre_cycle_count = 0;
+ tf->cur_cycle_count = 0;
+ tf->which_event = 1;
+ tf->cur_event_pos = tf->buffer;//the beginning of the block, block start ev
+ tf->cur_heart_beat_number = 0;
+
+ /* Make start time more precise */
+ /* Start overflow_nsec to a negative value : takes account of the
+ * start of block cycle counter */
+ tf->overflow_nsec = (-((double)tf->a_block_start->cycle_count)
+ * tf->nsec_per_cycle);
+
+
+ tf->a_block_start->time = getEventTime(tf);
+
+ /* recalculate the cycles per nsec, with now more precise start and end time
+ */
getCyclePerNsec(tf);
+ tf->overflow_nsec = (-((double)tf->a_block_start->cycle_count)
+ * tf->nsec_per_cycle);
+
+
tf->current_event_time = getEventTime(tf);
tf->prev_event_time.tv_sec = 0;
tf->prev_event_time.tv_nsec = 0;
+ tf->count = 0;
+
+ tf->overflow_nsec = (-((double)tf->a_block_start->cycle_count)
+ * tf->nsec_per_cycle);
+
}
/*****************************************************************************
evT = ltt_trace_eventtype_get(t->trace,(unsigned)evId);
- if(evT) rootFld = evT->root_field;
+ if(likely(evT)) rootFld = evT->root_field;
else return ERANGE;
- if(rootFld){
+ if(likely(rootFld)){
//event has string/sequence or the last event is not the same event
- if((evT->latest_block!=t->which_block || evT->latest_event!=t->which_event)
- && rootFld->field_fixed == 0){
+ if(likely((evT->latest_block!=t->which_block || evT->latest_event!=t->which_event)
+ && rootFld->field_fixed == 0)){
setFieldsOffset(t, evT, evData, t->trace);
}
t->cur_event_pos += EVENT_HEADER_SIZE + rootFld->field_size;
evT->latest_event = t->which_event;
//the next event is in the next block
- if(evId == TRACE_BLOCK_END){
+ if(unlikely(evId == TRACE_BLOCK_END)){
t->cur_event_pos = t->buffer + t->block_size;
}else{
t->which_event++;
/*****************************************************************************
*Function name
* getCyclePerNsec : calculate cycles per nsec for current block
+ * MD: should have tracefile_read the whole block, so we know the
+ * total of cycles in it before being called.
*Input Params
* t : tracefile
****************************************************************************/
lBufTotalNSec = ltt_time_to_double(lBufTotalTime);
t->nsec_per_cycle = (double)lBufTotalNSec / (double)lBufTotalCycle;
+ /* Pre-multiply one overflow (2^32 cycles) by nsec_per_cycle */
+ t->one_overflow_nsec = t->nsec_per_cycle * (double)0x100000000ULL;
+
/* See : http://www.azillionmonkeys.com/qed/adiv.html */
// precalculate the reciprocal, so divisions will be really fast.
// 2^32-1 == 0xFFFFFFFFULL
{
LttTime time;
LttCycleCount cycle_count; // cycle count for the current event
- LttCycleCount lEventTotalCycle; // Total cycles from start for event
- LttCycleCount lEventNSec; // Total nsecs from start for event
+ //LttCycleCount lEventTotalCycle; // Total cycles from start for event
+ gint64 lEventNSec; // Total nsecs from start for event
LttTime lTimeOffset; // Time offset in struct LttTime
guint16 evId;
evId = *(guint16 *)tf->cur_event_pos;
- if(evId == TRACE_BLOCK_START){
- tf->count = 0;
- tf->pre_cycle_count = 0;
- tf->cur_cycle_count = tf->a_block_start->cycle_count;
- return tf->a_block_start->time;
- }else if(evId == TRACE_BLOCK_END){
- tf->count = 0;
- tf->pre_cycle_count = 0;
- tf->cur_cycle_count = tf->a_block_end->cycle_count;
- return tf->a_block_end->time;
+ //if(unlikely(evId == TRACE_BLOCK_START)){
+ // tf->count = 0;
+ // tf->pre_cycle_count = 0;
+ // tf->cur_cycle_count = tf->a_block_start->cycle_count;
+ // return tf->a_block_start->time;
+ //}//else if(unlikely(evId == TRACE_BLOCK_END)){
+ //tf->count = 0;
+ //tf->pre_cycle_count = 0;
+ //tf->cur_cycle_count = tf->a_block_end->cycle_count;
+ //return tf->a_block_end->time;
+ //}
+
+ // Calculate total time in cycles from start of buffer for this event
+ cycle_count = (LttCycleCount)*(guint32 *)(tf->cur_event_pos + EVENT_ID_SIZE);
+ //g_debug("event cycle count %llu", cycle_count);
+ //
+ //gint64 delta_count = (gint64)(cycle_count - tf->pre_cycle_count);
+ //LttCycleCount res_delta_count;
+ gboolean comp_count = cycle_count < tf->pre_cycle_count;
+ tf->pre_cycle_count = cycle_count;
+
+ if(unlikely(comp_count)) {
+ /* Wrapped */
+ tf->overflow_nsec += tf->one_overflow_nsec;
+ tf->count++; //increment overflow count
}
+ //if(unlikely(cycle_count < tf->pre_cycle_count)) tf->count++;
+ //if(unlikely(delta_count < 0)) {
+ // tf->count++; //increment wrap count
+ // keep in mind that delta_count is negative here.
+ // res_delta_count = delta_count + 0x100000000ULL ;
+ //} else
+ // res_delta_count = (LttCycleCount)delta_count;
+
+ //cycle_count += (LttCycleCount)tf->count << 32;
+
+ //FIXME (MD)
+ // if(tf->cur_heart_beat_number > tf->count)
+ // cycle_count += (tf->cur_heart_beat_number - tf->count) << 32;
+
+ //tf->cur_cycle_count = tf->cur_cycle_count + res_delta_count;
+ //
+ //
+ // Total cycle counter of the event.
+ //tf->cur_cycle_count = cycle_count | ((LttCycleCount)tf->count << 32);
+
+ //g_debug("cur cycle count %llu", tf->cur_cycle_count);
+
+ // Total number of cycles since the beginning of the block
+ //lEventTotalCycle = tf->cur_cycle_count
+ // - tf->a_block_start->cycle_count;
+
+
+
+#if 0
// Calculate total time in cycles from start of buffer for this event
cycle_count = (LttCycleCount)*(guint32 *)(tf->cur_event_pos + EVENT_ID_SIZE);
- if(cycle_count < tf->pre_cycle_count)tf->count++;
+ if(unlikely(cycle_count < tf->pre_cycle_count)) tf->count++;
tf->pre_cycle_count = cycle_count;
cycle_count += (LttCycleCount)tf->count << 32;
+ //FIXME (MD)
// if(tf->cur_heart_beat_number > tf->count)
- // cycle_count += tmpCycleCount * (tf->cur_heart_beat_number - tf->count);
+ // cycle_count += (tf->cur_heart_beat_number - tf->count) << 32;
tf->cur_cycle_count = cycle_count;
lEventTotalCycle = cycle_count;
lEventTotalCycle -= tf->a_block_start->cycle_count;
-
+#endif //0
// Convert it to nsecs
- lEventNSec = (double)lEventTotalCycle * (double)tf->nsec_per_cycle;
+ //lEventNSec = (double)lEventTotalCycle * (double)tf->nsec_per_cycle;
//lEventNSec = (tf->cycles_per_nsec_reciprocal * lEventTotalCycle) >> 16;
// Determine offset in struct LttTime
- lTimeOffset = ltt_time_from_double(lEventNSec);
-
+ //lTimeOffset = ltt_time_from_double(lEventNSec);
+ //
+ // We do not substract block start cycle count here, it has already been done
+ // on the overflow_nsec
+ // The result should never be negative, because the cycle count of
+ // the event following the block start should be >= the previous one.
+ lEventNSec = (gint64)((double)cycle_count * tf->nsec_per_cycle)
+ +tf->overflow_nsec;
+ //g_assert(lEventNSec >= 0);
+ lTimeOffset = ltt_time_from_uint64(lEventNSec);
+
time = ltt_time_add(tf->a_block_start->time, lTimeOffset);
return time;
LttField * rootFld = evT->root_field;
// rootFld->base_address = evD;
- if(rootFld)
+ if(likely(rootFld))
rootFld->field_size = getFieldtypeSize(tf, evT, 0,0,rootFld, evD,t);
}
int size, size1, element_number, i, offset1, offset2;
LttType * type = fld->field_type;
- if(t){
- if(evT->latest_block==t->which_block && evT->latest_event==t->which_event){
+ if(likely(t)){
+ if(unlikely(evT->latest_block==t->which_block &&
+ evT->latest_event==t->which_event)){
return fld->field_size;
}
}
- if(fld->field_fixed == 1){
+ if(likely(fld->field_fixed == 1)){
if(fld == evT->root_field) return fld->field_size;
}
case LTT_STRUCT:
element_number = (int) type->element_number;
size = 0;
- if(fld->field_fixed == -1){
+ if(fld->field_fixed == -1){
offset1 = offsetRoot;
offset2 = 0;
for(i=0;i<element_number;i++){