X-Git-Url: http://git.lttng.org./?a=blobdiff_plain;f=share%2Fkernelcompat.h;h=b442718e2af19a7e611aa36d03f04118834f6ab8;hb=c1dea0b3d1312d0e3747da93eb949145c487eeba;hp=6bc09caea232bc6f1257ad196380fd445ce5cad1;hpb=5f54827b88b093974e4bf58f67490036718644c7;p=ust.git diff --git a/share/kernelcompat.h b/share/kernelcompat.h index 6bc09ca..b442718 100644 --- a/share/kernelcompat.h +++ b/share/kernelcompat.h @@ -14,6 +14,7 @@ #define KERN_INFO "" #define KERN_ERR "" #define KERN_ALERT "" +#define KERN_WARNING "" /* ERROR OPS */ @@ -62,20 +63,34 @@ typedef uint64_t u64; #include #define DEFINE_MUTEX(m) pthread_mutex_t (m) = PTHREAD_MUTEX_INITIALIZER; +#define DECLARE_MUTEX(m) extern pthread_mutex_t (m); #define mutex_lock(m) pthread_mutex_lock(m) #define mutex_unlock(m) pthread_mutex_unlock(m) +/* SPINLOCKS */ + +typedef int spinlock_t; + +#define spin_lock(a) /* nothing */ +#define spin_unlock(a) /* nothing */ +#define spin_lock_init(a) /* nothing */ + + /* MALLOCATION */ #include #define kmalloc(s, t) malloc(s) -#define kzalloc(s, t) malloc(s) +#define kzalloc(s, t) zmalloc(s) #define kfree(p) free((void *)p) #define kstrdup(s, t) strdup(s) +#define zmalloc(s) calloc(1, s) + +#define GFP_KERNEL + /* PRINTK */ #include @@ -96,6 +111,8 @@ typedef uint64_t u64; #define rcu_assign_pointer(a, b) do {} while(0) #define call_rcu_sched(a,b) do {} while(0) #define rcu_barrier_sched() do {} while(0) +#define rcu_read_lock_sched_notrace() do{} while (0) +#define rcu_read_unlock_sched_notrace() do{} while (0) /* ATOMICITY */ @@ -124,8 +141,90 @@ static int atomic_read(atomic_t *p) return p->counter; } +#define atomic_long_t atomic_t +#define atomic_long_set atomic_set +#define atomic_long_read atomic_read + +#include "asm.h" + +#define __xg(x) ((volatile long *)(x)) + +#define cmpxchg(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ + (unsigned long)(n), sizeof(*(ptr)))) + +static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, + unsigned long new, int size) +{ + unsigned long prev; + switch (size) { + case 1: + asm volatile("lock cmpxchgb %b1,%2" + : "=a"(prev) + : "q"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); + return prev; + case 2: + asm volatile("lock cmpxchgw %w1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); + return prev; + case 4: + asm volatile("lock cmpxchgl %k1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); + return prev; + case 8: + asm volatile("lock cmpxchgq %1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); + return prev; + } + return old; +} + +//#define local_cmpxchg cmpxchg +#define local_cmpxchg(l, o, n) (cmpxchg(&((l)->a.counter), (o), (n))) + +#define atomic_long_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) + + /* LOCAL OPS */ +//typedef int local_t; +typedef struct +{ + atomic_long_t a; +} local_t; + + +static inline void local_inc(local_t *l) +{ + (l->a.counter)++; +} + +static inline void local_set(local_t *l, int v) +{ + l->a.counter = v; +} + +static inline void local_add(int v, local_t *l) +{ + l->a.counter += v; +} + +static int local_add_return(int v, local_t *l) +{ + return l->a.counter += v; +} + +static inline int local_read(local_t *l) +{ + return l->a.counter; +} /* ATTRIBUTES */ @@ -180,6 +279,7 @@ static __inline__ int get_count_order(unsigned int count) #define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask)) #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) #define PAGE_SIZE sysconf(_SC_PAGE_SIZE) +#define PAGE_MASK (PAGE_SIZE-1) @@ -205,4 +305,15 @@ static inline u32 trace_clock_freq_scale(void) return 0; } + +/* LISTS */ + +#define list_add_rcu list_add +#define list_for_each_entry_rcu list_for_each_entry + + +#define EXPORT_SYMBOL_GPL(a) /*nothing*/ + +#define smp_processor_id() (-1) + #endif /* KERNELCOMPAT_H */