X-Git-Url: http://git.lttng.org./?a=blobdiff_plain;f=ltt%2Fbranches%2Fpoly%2Fltt%2Ftime.h;h=fe7641dfbbf49fdcc871e37ac3d55e1994a36b40;hb=1d1df11d0582bef07ef2b33e8e82a75b08f32be4;hp=ec4d134baf97d051ae2d3d7b87c29f837a5943b4;hpb=f31675494449c73316cab60617f51a85e67291c1;p=lttv.git diff --git a/ltt/branches/poly/ltt/time.h b/ltt/branches/poly/ltt/time.h index ec4d134b..fe7641df 100644 --- a/ltt/branches/poly/ltt/time.h +++ b/ltt/branches/poly/ltt/time.h @@ -20,7 +20,7 @@ #define LTT_TIME_H #include - +#include typedef struct _LttTime { unsigned long tv_sec; @@ -29,6 +29,7 @@ typedef struct _LttTime { #define NANOSECONDS_PER_SECOND 1000000000 +#define SHIFT_CONST 1.07374182400631629848 static const LttTime ltt_time_zero = { 0, 0 }; @@ -41,7 +42,11 @@ static inline LttTime ltt_time_sub(LttTime t1, LttTime t2) LttTime res; res.tv_sec = t1.tv_sec - t2.tv_sec; res.tv_nsec = t1.tv_nsec - t2.tv_nsec; - if(t1.tv_nsec < t2.tv_nsec) { + /* unlikely : given equal chance to be anywhere in t1.tv_nsec, and + * higher probability of low value for t2.tv_sec, we will habitually + * not wrap. + */ + if(unlikely(t1.tv_nsec < t2.tv_nsec)) { res.tv_sec--; res.tv_nsec += NANOSECONDS_PER_SECOND; } @@ -54,21 +59,27 @@ static inline LttTime ltt_time_add(LttTime t1, LttTime t2) LttTime res; res.tv_nsec = t1.tv_nsec + t2.tv_nsec; res.tv_sec = t1.tv_sec + t2.tv_sec; - if(res.tv_nsec >= NANOSECONDS_PER_SECOND) { + /* unlikely : given equal chance to be anywhere in t1.tv_nsec, and + * higher probability of low value for t2.tv_sec, we will habitually + * not wrap. + */ + if(unlikely(res.tv_nsec >= NANOSECONDS_PER_SECOND)) { res.tv_sec++; res.tv_nsec -= NANOSECONDS_PER_SECOND; } return res; } - +/* Fastest comparison : t1 > t2 */ static inline int ltt_time_compare(LttTime t1, LttTime t2) { - if(t1.tv_sec > t2.tv_sec) return 1; - if(t1.tv_sec < t2.tv_sec) return -1; - if(t1.tv_nsec > t2.tv_nsec) return 1; - if(t1.tv_nsec < t2.tv_nsec) return -1; - return 0; + int ret=0; + if(likely(t1.tv_sec > t2.tv_sec)) ret = 1; + else if(unlikely(t1.tv_sec < t2.tv_sec)) ret = -1; + else if(likely(t1.tv_nsec > t2.tv_nsec)) ret = 1; + else if(unlikely(t1.tv_nsec < t2.tv_nsec)) ret = -1; + + return ret; } #define LTT_TIME_MIN(a,b) ((ltt_time_compare((a),(b)) < 0) ? (a) : (b)) @@ -85,10 +96,12 @@ static inline double ltt_time_to_double(LttTime t1) * * So we have 53-30 = 23 bits left for tv_sec. * */ +#ifdef EXTRA_CHECK g_assert(t1.tv_sec <= MAX_TV_SEC_TO_DOUBLE); if(t1.tv_sec > MAX_TV_SEC_TO_DOUBLE) g_warning("Precision loss in conversion LttTime to double"); - return (double)t1.tv_sec + (double)t1.tv_nsec / NANOSECONDS_PER_SECOND; +#endif //EXTRA_CHECK + return ((double)t1.tv_sec * (double)NANOSECONDS_PER_SECOND) + (double)t1.tv_nsec; } @@ -102,12 +115,15 @@ static inline LttTime ltt_time_from_double(double t1) * * So we have 53-30 = 23 bits left for tv_sec. * */ +#ifdef EXTRA_CHECK g_assert(t1 <= MAX_TV_SEC_TO_DOUBLE); if(t1 > MAX_TV_SEC_TO_DOUBLE) g_warning("Conversion from non precise double to LttTime"); +#endif //EXTRA_CHECK LttTime res; - res.tv_sec = t1; - res.tv_nsec = (t1 - res.tv_sec) * NANOSECONDS_PER_SECOND; + //res.tv_sec = t1/(double)NANOSECONDS_PER_SECOND; + res.tv_sec = (guint64)(t1 * SHIFT_CONST) >> 30; + res.tv_nsec = (t1 - (res.tv_sec*NANOSECONDS_PER_SECOND)); return res; } @@ -188,9 +204,11 @@ static inline LttTime ltt_time_from_uint64(guint64 t1) { /* We lose precision if tv_sec is > than (2^62)-1 * */ +#ifdef EXTRA_CHECK g_assert(t1 <= MAX_TV_SEC_TO_UINT64); if(t1 > MAX_TV_SEC_TO_UINT64) g_warning("Conversion from non precise uint64 to LttTime"); +#endif //EXTRA_CHECK LttTime res; res.tv_sec = t1/NANOSECONDS_PER_SECOND; res.tv_nsec = (t1 - res.tv_sec*NANOSECONDS_PER_SECOND);