X-Git-Url: http://git.lttng.org./?a=blobdiff_plain;f=share%2Fkernelcompat.h;h=f95d0d77e886d17090b41c8be0c556935e2df399;hb=09938485689f3482ec845e52d5bf5e78c1093e27;hp=5bd8e87503f22ecf30b3d2d170f77c5e3e15b13b;hpb=b6bf28ecd4c07e7865d340f1600a35d6edc05ec8;p=lttng-ust.git diff --git a/share/kernelcompat.h b/share/kernelcompat.h index 5bd8e875..f95d0d77 100644 --- a/share/kernelcompat.h +++ b/share/kernelcompat.h @@ -14,6 +14,7 @@ #define KERN_INFO "" #define KERN_ERR "" #define KERN_ALERT "" +#define KERN_WARNING "" /* ERROR OPS */ @@ -62,20 +63,34 @@ typedef uint64_t u64; #include #define DEFINE_MUTEX(m) pthread_mutex_t (m) = PTHREAD_MUTEX_INITIALIZER; +#define DECLARE_MUTEX(m) extern pthread_mutex_t (m); #define mutex_lock(m) pthread_mutex_lock(m) #define mutex_unlock(m) pthread_mutex_unlock(m) +/* SPINLOCKS */ + +typedef int spinlock_t; + +#define spin_lock(a) /* nothing */ +#define spin_unlock(a) /* nothing */ +#define spin_lock_init(a) /* nothing */ + + /* MALLOCATION */ #include #define kmalloc(s, t) malloc(s) -#define kzalloc(s, t) malloc(s) +#define kzalloc(s, t) zmalloc(s) #define kfree(p) free((void *)p) #define kstrdup(s, t) strdup(s) +#define zmalloc(s) calloc(1, s) + +#define GFP_KERNEL + /* PRINTK */ #include @@ -83,19 +98,21 @@ typedef uint64_t u64; /* MEMORY BARRIERS */ -#define smp_rmb() do {} while(0) -#define smp_wmb() do {} while(0) -#define smp_mb() do {} while(0) +//#define smp_rmb() do {} while(0) +//#define smp_wmb() do {} while(0) +//#define smp_mb() do {} while(0) #define smp_mb__after_atomic_inc() do {} while(0) #define read_barrier_depends() do {} while(0) -#define smp_read_barrier_depends() do {} while(0) +//#define smp_read_barrier_depends() do {} while(0) /* RCU */ -#define rcu_assign_pointer(a, b) do {} while(0) -#define call_rcu_sched(a,b) do {} while(0) -#define rcu_barrier_sched() do {} while(0) +#include "urcu.h" +#define call_rcu_sched(a,b) b(a); synchronize_rcu() +#define rcu_barrier_sched() do {} while(0) /* this nop is ok if call_rcu_sched does a synchronize_rcu() */ +#define rcu_read_lock_sched_notrace() rcu_read_lock() +#define rcu_read_unlock_sched_notrace() rcu_read_unlock() /* ATOMICITY */ @@ -124,9 +141,97 @@ static int atomic_read(atomic_t *p) return p->counter; } -/* CACHE */ +#define atomic_long_t atomic_t +#define atomic_long_set atomic_set +#define atomic_long_read atomic_read + +#include "asm.h" + +//#define __xg(x) ((volatile long *)(x)) + +#define cmpxchg(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ + (unsigned long)(n), sizeof(*(ptr)))) + +static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, + unsigned long new, int size) +{ + unsigned long prev; + switch (size) { + case 1: + asm volatile("lock; cmpxchgb %b1,%2" + : "=a"(prev) + : "q"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); + return prev; + case 2: + asm volatile("lock; cmpxchgw %w1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); + return prev; + case 4: + asm volatile("lock; cmpxchgl %k1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); + return prev; + case 8: + asm volatile("lock; cmpxchgq %1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); + return prev; + } + return old; +} + +//#define local_cmpxchg cmpxchg +#define local_cmpxchg(l, o, n) (cmpxchg(&((l)->a.counter), (o), (n))) + +#define atomic_long_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) + + +/* LOCAL OPS */ + +//typedef int local_t; +typedef struct +{ + atomic_long_t a; +} local_t; + + +static inline void local_inc(local_t *l) +{ + (l->a.counter)++; +} + +static inline void local_set(local_t *l, int v) +{ + l->a.counter = v; +} + +static inline void local_add(int v, local_t *l) +{ + l->a.counter += v; +} + +static int local_add_return(int v, local_t *l) +{ + return l->a.counter += v; +} + +static inline int local_read(local_t *l) +{ + return l->a.counter; +} + + +/* ATTRIBUTES */ #define ____cacheline_aligned +#define __init +#define __exit /* MATH */ @@ -166,25 +271,69 @@ static __inline__ int get_count_order(unsigned int count) } + + +#include + +#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1) +#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask)) +#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) +#define PAGE_SIZE sysconf(_SC_PAGE_SIZE) +#define PAGE_MASK (PAGE_SIZE-1) + + + + /* ARRAYS */ #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) /* TRACE CLOCK */ +//ust// static inline u64 trace_clock_read64(void) +//ust// { +//ust// uint32_t low; +//ust// uint32_t high; +//ust// uint64_t retval; +//ust// __asm__ volatile ("rdtsc\n" : "=a" (low), "=d" (high)); +//ust// +//ust// retval = high; +//ust// retval <<= 32; +//ust// return retval | low; +//ust// } + static inline u64 trace_clock_read64(void) { - return 0LL; + struct timeval tv; + u64 retval; + + gettimeofday(&tv, NULL); + retval = tv.tv_sec; + retval *= 1000000; + retval += tv.tv_usec; + + return retval; } -static inline unsigned int trace_clock_frequency(void) +static inline u64 trace_clock_frequency(void) { - return 0LL; + return 1000000LL; } static inline u32 trace_clock_freq_scale(void) { - return 0; + return 1; } + +/* LISTS */ + +#define list_add_rcu list_add +#define list_for_each_entry_rcu list_for_each_entry + + +#define EXPORT_SYMBOL_GPL(a) /*nothing*/ + +#define smp_processor_id() (-1) + #endif /* KERNELCOMPAT_H */