#define DIR_NAME_SIZE 256
#define __UNUSED__ __attribute__((__unused__))
+#define g_info(format...) g_log (G_LOG_DOMAIN, G_LOG_LEVEL_INFO, format)
+#define g_debug(format...) g_log (G_LOG_DOMAIN, G_LOG_LEVEL_DEBUG, format)
+
+
+/* obtain the time of an event */
+
+static inline LttTime getEventTime(LttTracefile * tf);
+
+
/* set the offset of the fields belonging to the event,
need the information of the archecture */
void setFieldsOffset(LttTracefile *tf,LttEventType *evT,void *evD,LttTrace *t);
return count;
}
+/* FIXME : performances could be improved with a better design for this
+ * function */
LttFacility * ltt_trace_facility_by_id(LttTrace * trace, unsigned id)
{
- LttFacility * facility;
+ LttFacility * facility = NULL;
unsigned int i;
+
for(i=0;i<trace->facility_number;i++){
- facility = (LttFacility*) g_ptr_array_index(trace->facilities,i);
- if(id >= facility->base_id &&
- id < facility->base_id + facility->event_number)
+ LttFacility *iter_facility =
+ (LttFacility*) g_ptr_array_index(trace->facilities,i);
+ if(unlikely(id >= iter_facility->base_id &&
+ id < iter_facility->base_id + iter_facility->event_number)) {
+ facility = iter_facility;
break;
+ }
}
- if(i==trace->facility_number) return NULL;
- else return facility;
+
+ return facility;
}
LttEventType *ltt_trace_eventtype_get(LttTrace *t, unsigned evId)
void ltt_tracefile_seek_position(LttTracefile *t, const LttEventPosition *ep)
{
//if we are at the right place, just return
- if(t->which_block == ep->block_num && t->which_event == ep->event_num)
+ if(likely(t->which_block == ep->block_num && t->which_event == ep->event_num))
return;
- if(t->which_block == ep->block_num) updateTracefile(t);
+ if(likely(t->which_block == ep->block_num)) updateTracefile(t);
else readBlock(t,ep->block_num);
//event offset is available
- if(ep->old_position){
+ if(likely(ep->old_position)){
int err;
t->which_event = ep->event_num;
t->prev_event_time = ep->prev_event_time;
t->pre_cycle_count = ep->pre_cycle_count;
t->count = ep->count;
+ t->overflow_nsec = ep->overflow_nsec;
/* end of workaround */
//update the fields of the current event and go to the next event
err = skipEvent(t);
- if(err == ERANGE) g_error("event id is out of range\n");
+ if(unlikely(err == ERANGE)) g_error("event id is out of range\n");
return;
}
g_warning("using slow O(n) tracefile seek position");
LttEvent event;
- while(t->which_event < ep->event_num) ltt_tracefile_read(t, &event);
+ while(likely(t->which_event < ep->event_num)) ltt_tracefile_read(t, &event);
return;
}
{
int err;
- if(t->cur_event_pos == t->buffer + t->block_size){
- if(t->which_block == t->block_number){
+ if(unlikely(t->cur_event_pos == t->buffer + t->block_size)){
+ if(unlikely(t->which_block == t->block_number)){
return NULL;
}
err = readBlock(t, t->which_block + 1);
- if(err)g_error("Can not read tracefile");
+ if(unlikely(err))g_error("Can not read tracefile");
}
event->event_id = (int)(*(guint16 *)(t->cur_event_pos));
- if(event->event_id == TRACE_TIME_HEARTBEAT)
+ if(unlikely(event->event_id == TRACE_TIME_HEARTBEAT))
t->cur_heart_beat_number++;
t->prev_event_time = t->current_event_time;
event->prev_event_time = t->prev_event_time;
event->pre_cycle_count = t->pre_cycle_count;
event->count = t->count;
+ event->overflow_nsec = t->overflow_nsec;
+
/* end of workaround */
//update the fields of the current event and go to the next event
err = skipEvent(t);
- if(err == ERANGE) g_error("event id is out of range\n");
+ if(unlikely(err == ERANGE)) g_error("event id is out of range\n");
return event;
}
return 0;
}
+/*****************************************************************************
+ *Function name
+ * skipEvent_pre_read_cycles : go to the next event,
+ * update the necessary fields of the current event
+ * increment the cycle counter, save it at the end.
+ *Input params
+ * t : tracefile
+ *return value
+ * 0 : success
+ * ERANGE : event id is out of range
+ ****************************************************************************/
+
+int skipEvent_pre_read_cycles(LttTracefile * t)
+{
+ int evId;
+ void * evData;
+ LttEventType * evT;
+ LttField * rootFld;
+
+ evId = (int)(*(guint16 *)(t->cur_event_pos));
+ evData = t->cur_event_pos + EVENT_HEADER_SIZE;
+
+ evT = ltt_trace_eventtype_get(t->trace,(unsigned)evId);
+
+ if(likely(evT)) rootFld = evT->root_field;
+ else return ERANGE;
+
+ if(likely(rootFld)){
+ //event has string/sequence or the last event is not the same event
+ if(likely((evT->latest_block!=t->which_block || evT->latest_event!=t->which_event)
+ && rootFld->field_fixed == 0)){
+ setFieldsOffset(t, evT, evData, t->trace);
+ }
+ t->cur_event_pos += EVENT_HEADER_SIZE + rootFld->field_size;
+ }else t->cur_event_pos += EVENT_HEADER_SIZE;
+
+ //evT->latest_block = t->which_block;
+ //evT->latest_event = t->which_event;
+
+ //the next event is in the next block
+ //if(unlikely(evId == TRACE_BLOCK_END)){
+ // Specify end of buffer reached.
+ // t->cur_event_pos = t->buffer + t->block_size;
+ //}else{
+ //g_critical("COUNT : %lu", t->cur_cycle_count);
+ //t->which_event++;
+ // t->current_event_time = getEventTime(t);
+ //}
+
+ return 0;
+}
+
+
+
+
+/*****************************************************************************
+ *Function name
+ * ltt_tracefile_pre_read_cycles :
+ * read the current event, increment the cycle counter
+ *Input params
+ * t : tracefile
+ *Return value
+ * False : end of bloc reached
+ ****************************************************************************/
+
+gboolean ltt_tracefile_pre_read_cycles(LttTracefile *tf)
+{
+ int err;
+ //LttEvent event;
+
+ // if(unlikely(t->cur_event_pos == t->buffer + t->block_size)){
+ //if(unlikely(t->which_block == t->block_number)){
+ // return FALSE;
+ //}
+ // return FALSE; // end of bloc reached
+ //err = readBlock(t, t->which_block + 1);
+ //if(unlikely(err))g_error("Can not read tracefile");
+ //}
+
+ //event.event_id = (int)(*(guint16 *)(t->cur_event_pos));
+ //if(unlikely(event.event_id == TRACE_TIME_HEARTBEAT))
+ // t->cur_heart_beat_number++;
+
+ //t->prev_event_time = t->current_event_time;
+ // t->current_event_time = getEventTime(t);
+
+ //event.time_delta = *(guint32 *)(t->cur_event_pos + EVENT_ID_SIZE);
+ //event.event_time = t->current_event_time;
+ //event.event_cycle_count = t->cur_cycle_count;
+
+ //event.tracefile = t;
+ //event.data = t->cur_event_pos + EVENT_HEADER_SIZE;
+ //event.which_block = t->which_block;
+ //event.which_event = t->which_event;
+
+ /* This is a workaround for fast position seek */
+ //event.last_event_pos = t->last_event_pos;
+ //event.prev_block_end_time = t->prev_block_end_time;
+ //event.prev_event_time = t->prev_event_time;
+ //event.pre_cycle_count = t->pre_cycle_count;
+ //event.count = t->count;
+ /* end of workaround */
+
+
+ /* Increment the cycle counter for the bloc */
+ LttTime time;
+ LttCycleCount cycle_count; // cycle count for the current event
+ LttCycleCount lEventTotalCycle; // Total cycles from start for event
+ LttCycleCount lEventNSec; // Total nsecs from start for event
+ LttTime lTimeOffset; // Time offset in struct LttTime
+ guint16 evId;
+
+ evId = *(guint16 *)tf->cur_event_pos;
+
+ // Calculate total time in cycles from start of buffer for this event
+ cycle_count = (LttCycleCount)*(guint32 *)(tf->cur_event_pos + EVENT_ID_SIZE);
+ //g_debug("event cycle count %llu", cycle_count);
+ //
+ //gint64 delta_count = (gint64)(cycle_count - tf->pre_cycle_count);
+ //LttCycleCount res_delta_count;
+ gboolean comp_count = cycle_count < tf->pre_cycle_count;
+ tf->pre_cycle_count = cycle_count;
+
+ if(unlikely(comp_count)) {
+ /* Wrapped */
+ tf->count++; //increment wrap count
+ }
+
+ //if(unlikely(cycle_count < tf->pre_cycle_count)) tf->count++;
+ //if(unlikely(delta_count < 0)) {
+ // tf->count++; //increment wrap count
+ // keep in mind that delta_count is negative here.
+ // res_delta_count = delta_count + 0x100000000ULL ;
+ //} else
+ // res_delta_count = (LttCycleCount)delta_count;
+
+ //cycle_count += (LttCycleCount)tf->count << 32;
+
+ //FIXME (MD)
+ // if(tf->cur_heart_beat_number > tf->count)
+ // cycle_count += (tf->cur_heart_beat_number - tf->count) << 32;
+
+ //tf->cur_cycle_count = tf->cur_cycle_count + res_delta_count;
+ tf->cur_cycle_count = cycle_count | ((LttCycleCount)tf->count << 32);
+ //g_debug("cur cycle count %llu", tf->cur_cycle_count);
+
+
+
+
+ if(unlikely(evId == TRACE_BLOCK_START)){
+ //g_debug("BLOCK START");
+ }else if(unlikely(evId == TRACE_BLOCK_END)){
+ //g_debug("BLOCK END");
+
+ /* The goal of all this pre reading */
+ tf->a_block_end->cycle_count = tf->cur_cycle_count;
+ //g_debug("end of block cycle count : %llu", tf->cur_cycle_count);
+
+ return FALSE;
+ }
+
+ //update the fields of the current event and go to the next event
+ err = skipEvent_pre_read_cycles(tf);
+ if(unlikely(err == ERANGE)) g_error("event id is out of range\n");
+
+
+ return TRUE;
+}
+
/****************************************************************************
*Function name
* readBlock : read a block from the file
off_t nbBytes;
guint32 lostSize;
- if(whichBlock - tf->which_block == 1 && tf->which_block != 0){
+ /* same block already opened requested */
+ if((guint)whichBlock == tf->which_block) return 0;
+
+ if(likely(whichBlock - tf->which_block == 1 && tf->which_block != 0)){
tf->prev_block_end_time = tf->a_block_end->time;
tf->prev_event_time = tf->a_block_end->time;
}else{
}
nbBytes=lseek(tf->fd,(off_t)((whichBlock-1)*tf->block_size), SEEK_SET);
- if(nbBytes == -1) return EINVAL;
+ if(unlikely(nbBytes == -1)) return EINVAL;
- if(readFile(tf->fd,tf->buffer,tf->block_size,"Unable to read a block"))
+ if(unlikely(readFile(tf->fd,tf->buffer,tf->block_size,"Unable to read a block")))
return EIO;
tf->a_block_start=(BlockStart *) (tf->buffer + EVENT_HEADER_SIZE);
tf->which_event = 1;
tf->cur_event_pos = tf->buffer;//the beginning of the block, block start ev
tf->cur_heart_beat_number = 0;
+
+ /* read the whole block to precalculate total of cycles in it */
+ tf->count = 0;
+ tf->pre_cycle_count = 0;
+ tf->cur_cycle_count = 0;
+ //g_debug("precalculating cycles begin for block %i", whichBlock);
+ while(likely(ltt_tracefile_pre_read_cycles(tf)));
+ /* Rough approximation of cycles per usec to calculate
+ * the real block start and end time.
+ */
+ getCyclePerNsec(tf);
+ /* we are at end position, make end time more precise */
+ /* Start overflow_nsec to a negative value : takes account of the
+ * start of block cycle counter */
+ tf->overflow_nsec = (-((double)tf->a_block_start->cycle_count)
+ * tf->nsec_per_cycle);
+ /* put back the numbers corresponding to end time */
+ tf->overflow_nsec += tf->one_overflow_nsec * tf->count;
+
+ tf->a_block_end->time = getEventTime(tf);
+
+ //g_debug("precalculating cycles end for block %i", whichBlock);
+
+ /* put back pointer at the beginning */
+ tf->count = 0;
+ tf->pre_cycle_count = 0;
+ tf->cur_cycle_count = 0;
+ tf->which_event = 1;
+ tf->cur_event_pos = tf->buffer;//the beginning of the block, block start ev
+ tf->cur_heart_beat_number = 0;
+
+ /* Make start time more precise */
+ /* Start overflow_nsec to a negative value : takes account of the
+ * start of block cycle counter */
+ tf->overflow_nsec = (-((double)tf->a_block_start->cycle_count)
+ * tf->nsec_per_cycle);
+
+
+ tf->a_block_start->time = getEventTime(tf);
+ /* recalculate the cycles per nsec, with now more precise start and end time
+ */
getCyclePerNsec(tf);
+ tf->overflow_nsec = (-((double)tf->a_block_start->cycle_count)
+ * tf->nsec_per_cycle);
+
+
tf->current_event_time = getEventTime(tf);
tf->prev_event_time.tv_sec = 0;
tf->prev_event_time.tv_nsec = 0;
+ tf->count = 0;
+
+ tf->overflow_nsec = (-((double)tf->a_block_start->cycle_count)
+ * tf->nsec_per_cycle);
+
}
/*****************************************************************************
evT = ltt_trace_eventtype_get(t->trace,(unsigned)evId);
- if(evT) rootFld = evT->root_field;
+ if(likely(evT)) rootFld = evT->root_field;
else return ERANGE;
- if(rootFld){
+ if(likely(rootFld)){
//event has string/sequence or the last event is not the same event
- if((evT->latest_block!=t->which_block || evT->latest_event!=t->which_event)
- && rootFld->field_fixed == 0){
+ if(likely((evT->latest_block!=t->which_block || evT->latest_event!=t->which_event)
+ && rootFld->field_fixed == 0)){
setFieldsOffset(t, evT, evData, t->trace);
}
t->cur_event_pos += EVENT_HEADER_SIZE + rootFld->field_size;
evT->latest_event = t->which_event;
//the next event is in the next block
- if(evId == TRACE_BLOCK_END){
+ if(unlikely(evId == TRACE_BLOCK_END)){
t->cur_event_pos = t->buffer + t->block_size;
}else{
t->which_event++;
/*****************************************************************************
*Function name
* getCyclePerNsec : calculate cycles per nsec for current block
+ * MD: should have tracefile_read the whole block, so we know the
+ * total of cycles in it before being called.
*Input Params
* t : tracefile
****************************************************************************/
void getCyclePerNsec(LttTracefile * t)
{
LttTime lBufTotalTime; /* Total time for this buffer */
- LttCycleCount lBufTotalNSec; /* Total time for this buffer in nsecs */
- LttCycleCount lBufTotalCycle;/* Total cycles for this buffer */
+ double lBufTotalNSec; /* Total time for this buffer in nsecs */
+ double lBufTotalCycle;/* Total cycles for this buffer */
/* Calculate the total time for this buffer */
lBufTotalTime = ltt_time_sub(t->a_block_end->time, t->a_block_start->time);
lBufTotalCycle -= t->a_block_start->cycle_count;
/* Convert the total time to nsecs */
- lBufTotalNSec = lBufTotalTime.tv_sec;
- lBufTotalNSec *= NANOSECONDS_PER_SECOND;
- lBufTotalNSec += lBufTotalTime.tv_nsec;
+ lBufTotalNSec = ltt_time_to_double(lBufTotalTime);
- t->cycle_per_nsec = (double)lBufTotalCycle / (double)lBufTotalNSec;
+ t->nsec_per_cycle = (double)lBufTotalNSec / (double)lBufTotalCycle;
+ /* Pre-multiply one overflow (2^32 cycles) by nsec_per_cycle */
+ t->one_overflow_nsec = t->nsec_per_cycle * (double)0x100000000ULL;
+
+ /* See : http://www.azillionmonkeys.com/qed/adiv.html */
+ // precalculate the reciprocal, so divisions will be really fast.
+ // 2^32-1 == 0xFFFFFFFFULL
+ //{
+ // double int_res = lBufTotalCycle/lBufTotalNSec;
+ // t->cycles_per_nsec_reciprocal =
+ // ((0xFFFF+int_res)/int_res);
+ //}
+
}
/****************************************************************************
*Function name
* getEventTime : obtain the time of an event
+ * NOTE : this function _really_ is on critical path.
*Input params
* tf : tracefile
*Return value
* LttTime : the time of the event
****************************************************************************/
-LttTime getEventTime(LttTracefile * tf)
+static inline LttTime getEventTime(LttTracefile * tf)
{
LttTime time;
LttCycleCount cycle_count; // cycle count for the current event
- LttCycleCount lEventTotalCycle; // Total cycles from start for event
- LttCycleCount lEventNSec; // Total usecs from start for event
+ //LttCycleCount lEventTotalCycle; // Total cycles from start for event
+ gint64 lEventNSec; // Total nsecs from start for event
LttTime lTimeOffset; // Time offset in struct LttTime
guint16 evId;
- LttCycleCount tmpCycleCount = (((LttCycleCount)1)<<32);
evId = *(guint16 *)tf->cur_event_pos;
- if(evId == TRACE_BLOCK_START){
- tf->count = 0;
- tf->pre_cycle_count = 0;
- tf->cur_cycle_count = tf->a_block_start->cycle_count;
- return tf->a_block_start->time;
- }else if(evId == TRACE_BLOCK_END){
- tf->count = 0;
- tf->pre_cycle_count = 0;
- tf->cur_cycle_count = tf->a_block_end->cycle_count;
- return tf->a_block_end->time;
+ //if(unlikely(evId == TRACE_BLOCK_START)){
+ // tf->count = 0;
+ // tf->pre_cycle_count = 0;
+ // tf->cur_cycle_count = tf->a_block_start->cycle_count;
+ // return tf->a_block_start->time;
+ //}//else if(unlikely(evId == TRACE_BLOCK_END)){
+ //tf->count = 0;
+ //tf->pre_cycle_count = 0;
+ //tf->cur_cycle_count = tf->a_block_end->cycle_count;
+ //return tf->a_block_end->time;
+ //}
+
+ // Calculate total time in cycles from start of buffer for this event
+ cycle_count = (LttCycleCount)*(guint32 *)(tf->cur_event_pos + EVENT_ID_SIZE);
+ //g_debug("event cycle count %llu", cycle_count);
+ //
+ //gint64 delta_count = (gint64)(cycle_count - tf->pre_cycle_count);
+ //LttCycleCount res_delta_count;
+ gboolean comp_count = cycle_count < tf->pre_cycle_count;
+ tf->pre_cycle_count = cycle_count;
+
+ if(unlikely(comp_count)) {
+ /* Wrapped */
+ tf->overflow_nsec += tf->one_overflow_nsec;
+ tf->count++; //increment overflow count
}
+ //if(unlikely(cycle_count < tf->pre_cycle_count)) tf->count++;
+ //if(unlikely(delta_count < 0)) {
+ // tf->count++; //increment wrap count
+ // keep in mind that delta_count is negative here.
+ // res_delta_count = delta_count + 0x100000000ULL ;
+ //} else
+ // res_delta_count = (LttCycleCount)delta_count;
+
+ //cycle_count += (LttCycleCount)tf->count << 32;
+
+ //FIXME (MD)
+ // if(tf->cur_heart_beat_number > tf->count)
+ // cycle_count += (tf->cur_heart_beat_number - tf->count) << 32;
+
+ //tf->cur_cycle_count = tf->cur_cycle_count + res_delta_count;
+ //
+ //
+ // Total cycle counter of the event.
+ //tf->cur_cycle_count = cycle_count | ((LttCycleCount)tf->count << 32);
+
+ //g_debug("cur cycle count %llu", tf->cur_cycle_count);
+
+ // Total number of cycles since the beginning of the block
+ //lEventTotalCycle = tf->cur_cycle_count
+ // - tf->a_block_start->cycle_count;
+
+
+
+#if 0
// Calculate total time in cycles from start of buffer for this event
cycle_count = (LttCycleCount)*(guint32 *)(tf->cur_event_pos + EVENT_ID_SIZE);
- if(cycle_count < tf->pre_cycle_count)tf->count++;
+ if(unlikely(cycle_count < tf->pre_cycle_count)) tf->count++;
tf->pre_cycle_count = cycle_count;
- cycle_count += tmpCycleCount * tf->count;
+ cycle_count += (LttCycleCount)tf->count << 32;
+ //FIXME (MD)
// if(tf->cur_heart_beat_number > tf->count)
- // cycle_count += tmpCycleCount * (tf->cur_heart_beat_number - tf->count);
+ // cycle_count += (tf->cur_heart_beat_number - tf->count) << 32;
tf->cur_cycle_count = cycle_count;
lEventTotalCycle = cycle_count;
lEventTotalCycle -= tf->a_block_start->cycle_count;
-
+#endif //0
// Convert it to nsecs
- lEventNSec = (double)lEventTotalCycle / (double)tf->cycle_per_nsec;
-
+ //lEventNSec = (double)lEventTotalCycle * (double)tf->nsec_per_cycle;
+ //lEventNSec = (tf->cycles_per_nsec_reciprocal * lEventTotalCycle) >> 16;
+
// Determine offset in struct LttTime
- lTimeOffset.tv_nsec = lEventNSec % NANOSECONDS_PER_SECOND;
- lTimeOffset.tv_sec = lEventNSec / NANOSECONDS_PER_SECOND;
-
+ //lTimeOffset = ltt_time_from_double(lEventNSec);
+ //
+ // We do not substract block start cycle count here, it has already been done
+ // on the overflow_nsec
+ // The result should never be negative, because the cycle count of
+ // the event following the block start should be >= the previous one.
+ lEventNSec = (gint64)((double)cycle_count * tf->nsec_per_cycle)
+ +tf->overflow_nsec;
+ //g_assert(lEventNSec >= 0);
+ lTimeOffset = ltt_time_from_uint64(lEventNSec);
+
time = ltt_time_add(tf->a_block_start->time, lTimeOffset);
return time;
LttField * rootFld = evT->root_field;
// rootFld->base_address = evD;
- if(rootFld)
+ if(likely(rootFld))
rootFld->field_size = getFieldtypeSize(tf, evT, 0,0,rootFld, evD,t);
}
int size, size1, element_number, i, offset1, offset2;
LttType * type = fld->field_type;
- if(t){
- if(evT->latest_block==t->which_block && evT->latest_event==t->which_event){
+ if(likely(t)){
+ if(unlikely(evT->latest_block==t->which_block &&
+ evT->latest_event==t->which_event)){
return fld->field_size;
}
}
- if(fld->field_fixed == 1){
+ if(likely(fld->field_fixed == 1)){
if(fld == evT->root_field) return fld->field_size;
}
+ switch(type->type_class) {
+ case LTT_ARRAY:
+ element_number = (int) type->element_number;
+ if(fld->field_fixed == -1){
+ size = getFieldtypeSize(t, evT, offsetRoot,
+ 0,fld->child[0], NULL, trace);
+ if(size == 0){ //has string or sequence
+ fld->field_fixed = 0;
+ }else{
+ fld->field_fixed = 1;
+ size *= element_number;
+ }
+ }else if(fld->field_fixed == 0){// has string or sequence
+ size = 0;
+ for(i=0;i<element_number;i++){
+ size += getFieldtypeSize(t, evT, offsetRoot+size,size,
+ fld->child[0], evD+size, trace);
+ }
+ }else size = fld->field_size;
+ break;
+
+ case LTT_SEQUENCE:
+ size1 = (int) ltt_type_size(trace, type);
+ if(fld->field_fixed == -1){
+ fld->sequ_number_size = size1;
+ fld->field_fixed = 0;
+ size = getFieldtypeSize(t, evT, offsetRoot,
+ 0,fld->child[0], NULL, trace);
+ fld->element_size = size;
+ }else{//0: sequence
+ element_number = getIntNumber(size1,evD);
+ type->element_number = element_number;
+ if(fld->element_size > 0){
+ size = element_number * fld->element_size;
+ }else{//sequence has string or sequence
+ size = 0;
+ for(i=0;i<element_number;i++){
+ size += getFieldtypeSize(t, evT, offsetRoot+size+size1,size+size1,
+ fld->child[0], evD+size+size1, trace);
+ }
+ }
+ size += size1;
+ }
+ break;
+
+ case LTT_STRING:
+ size = 0;
+ if(fld->field_fixed == -1){
+ fld->field_fixed = 0;
+ }else{//0: string
+ size = strlen((char*)evD) + 1; //include end : '\0'
+ }
+ break;
+
+ case LTT_STRUCT:
+ element_number = (int) type->element_number;
+ size = 0;
+ if(fld->field_fixed == -1){
+ offset1 = offsetRoot;
+ offset2 = 0;
+ for(i=0;i<element_number;i++){
+ size1=getFieldtypeSize(t, evT,offset1,offset2,
+ fld->child[i], NULL, trace);
+ if(size1 > 0 && size >= 0){
+ size += size1;
+ if(offset1 >= 0) offset1 += size1;
+ offset2 += size1;
+ }else{
+ size = -1;
+ offset1 = -1;
+ offset2 = -1;
+ }
+ }
+ if(size == -1){
+ fld->field_fixed = 0;
+ size = 0;
+ }else fld->field_fixed = 1;
+ }else if(fld->field_fixed == 0){
+ offset1 = offsetRoot;
+ offset2 = 0;
+ for(i=0;i<element_number;i++){
+ size=getFieldtypeSize(t,evT,offset1,offset2,
+ fld->child[i],evD+offset2, trace);
+ offset1 += size;
+ offset2 += size;
+ }
+ size = offset2;
+ }else size = fld->field_size;
+ break;
+
+ default:
+ if(fld->field_fixed == -1){
+ size = (int) ltt_type_size(trace, type);
+ fld->field_fixed = 1;
+ }else size = fld->field_size;
+ break;
+ }
+
+
+
+#if 0
if(type->type_class != LTT_STRUCT && type->type_class != LTT_ARRAY &&
type->type_class != LTT_SEQUENCE && type->type_class != LTT_STRING){
if(fld->field_fixed == -1){
size = offset2;
}else size = fld->field_size;
}
+#endif //0
fld->offset_root = offsetRoot;
fld->offset_parent = offsetParent;
* size : the size of the integer
* evD : the event data
*Return value
- * int : an integer
+ * gint64 : a 64 bits integer
****************************************************************************/
-int getIntNumber(int size, void *evD)
+gint64 getIntNumber(int size, void *evD)
{
gint64 i;
+
+ switch(size) {
+ case 1: i = *(gint8 *)evD; break;
+ case 2: i = *(gint16 *)evD; break;
+ case 4: i = *(gint32 *)evD; break;
+ case 8: i = *(gint64 *)evD; break;
+ default: i = *(gint64 *)evD;
+ g_critical("getIntNumber : integer size %d unknown", size);
+ break;
+ }
+
+#if 0
if(size == 1) i = *(gint8 *)evD;
else if(size == 2) i = *(gint16 *)evD;
else if(size == 4) i = *(gint32 *)evD;
else if(size == 8) i = *(gint64 *)evD;
-
- return (int) i;
+#endif //0
+
+ return (gint64)i;
}
/*****************************************************************************