X-Git-Url: http://git.lttng.org./?a=blobdiff_plain;f=share%2Fkernelcompat.h;h=41746d9f05d21b3ea18e8c30e2e5c9ef69fec1ba;hb=16a93583f647cb6e5da1ed395462200aaf07bb2d;hp=f95d0d77e886d17090b41c8be0c556935e2df399;hpb=09938485689f3482ec845e52d5bf5e78c1093e27;p=lttng-ust.git diff --git a/share/kernelcompat.h b/share/kernelcompat.h index f95d0d77..41746d9f 100644 --- a/share/kernelcompat.h +++ b/share/kernelcompat.h @@ -1,9 +1,12 @@ #ifndef KERNELCOMPAT_H #define KERNELCOMPAT_H +#include + #include "compiler.h" #include +#include #define container_of(ptr, type, member) ({ \ const typeof( ((type *)0)->member ) *__mptr = (ptr); \ @@ -40,12 +43,12 @@ static inline long IS_ERR(const void *ptr) /* FIXED SIZE INTEGERS */ -#include +//#include -typedef uint8_t u8; -typedef uint16_t u16; -typedef uint32_t u32; -typedef uint64_t u64; +//typedef uint8_t u8; +//typedef uint16_t u16; +//typedef uint32_t u32; +//typedef uint64_t u64; #define min_t(type, x, y) ({ \ type __min1 = (x); \ @@ -69,14 +72,6 @@ typedef uint64_t u64; #define mutex_unlock(m) pthread_mutex_unlock(m) -/* SPINLOCKS */ - -typedef int spinlock_t; - -#define spin_lock(a) /* nothing */ -#define spin_unlock(a) /* nothing */ -#define spin_lock_init(a) /* nothing */ - /* MALLOCATION */ @@ -98,133 +93,90 @@ typedef int spinlock_t; /* MEMORY BARRIERS */ -//#define smp_rmb() do {} while(0) -//#define smp_wmb() do {} while(0) -//#define smp_mb() do {} while(0) -#define smp_mb__after_atomic_inc() do {} while(0) - -#define read_barrier_depends() do {} while(0) -//#define smp_read_barrier_depends() do {} while(0) - -/* RCU */ +//#define smp_mb__after_atomic_inc() do {} while(0) -#include "urcu.h" -#define call_rcu_sched(a,b) b(a); synchronize_rcu() -#define rcu_barrier_sched() do {} while(0) /* this nop is ok if call_rcu_sched does a synchronize_rcu() */ -#define rcu_read_lock_sched_notrace() rcu_read_lock() -#define rcu_read_unlock_sched_notrace() rcu_read_unlock() +///* RCU */ +// +//#include "urcu.h" +//#define call_rcu_sched(a,b) b(a); synchronize_rcu() +//#define rcu_barrier_sched() do {} while(0) /* this nop is ok if call_rcu_sched does a synchronize_rcu() */ +//#define rcu_read_lock_sched_notrace() rcu_read_lock() +//#define rcu_read_unlock_sched_notrace() rcu_read_unlock() /* ATOMICITY */ -#include - -typedef struct { sig_atomic_t counter; } atomic_t; - -static inline int atomic_dec_and_test(atomic_t *p) -{ - (p->counter)--; - return !p->counter; -} - -static inline void atomic_set(atomic_t *p, int v) -{ - p->counter=v; -} - -static inline void atomic_inc(atomic_t *p) -{ - p->counter++; -} - -static int atomic_read(atomic_t *p) -{ - return p->counter; -} - -#define atomic_long_t atomic_t -#define atomic_long_set atomic_set -#define atomic_long_read atomic_read - -#include "asm.h" +//#include +// +//static inline int atomic_dec_and_test(atomic_t *p) +//{ +// (p->counter)--; +// return !p->counter; +//} +// +//static inline void atomic_set(atomic_t *p, int v) +//{ +// p->counter=v; +//} +// +//static inline void atomic_inc(atomic_t *p) +//{ +// p->counter++; +//} +// +//static int atomic_read(atomic_t *p) +//{ +// return p->counter; +//} +// +//#define atomic_long_t atomic_t +//#define atomic_long_set atomic_set +//#define atomic_long_read atomic_read //#define __xg(x) ((volatile long *)(x)) -#define cmpxchg(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ +//#define cmpxchg(ptr, o, n) \ +// ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ (unsigned long)(n), sizeof(*(ptr)))) -static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, - unsigned long new, int size) -{ - unsigned long prev; - switch (size) { - case 1: - asm volatile("lock; cmpxchgb %b1,%2" - : "=a"(prev) - : "q"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); - return prev; - case 2: - asm volatile("lock; cmpxchgw %w1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); - return prev; - case 4: - asm volatile("lock; cmpxchgl %k1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); - return prev; - case 8: - asm volatile("lock; cmpxchgq %1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); - return prev; - } - return old; -} - //#define local_cmpxchg cmpxchg -#define local_cmpxchg(l, o, n) (cmpxchg(&((l)->a.counter), (o), (n))) +//#define local_cmpxchg(l, o, n) (cmpxchg(&((l)->a.counter), (o), (n))) -#define atomic_long_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) +//#define atomic_long_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) /* LOCAL OPS */ //typedef int local_t; -typedef struct -{ - atomic_long_t a; -} local_t; - - -static inline void local_inc(local_t *l) -{ - (l->a.counter)++; -} - -static inline void local_set(local_t *l, int v) -{ - l->a.counter = v; -} - -static inline void local_add(int v, local_t *l) -{ - l->a.counter += v; -} - -static int local_add_return(int v, local_t *l) -{ - return l->a.counter += v; -} - -static inline int local_read(local_t *l) -{ - return l->a.counter; -} +//typedef struct +//{ +// atomic_long_t a; +//} local_t; +// +// +//static inline void local_inc(local_t *l) +//{ +// (l->a.counter)++; +//} +// +//static inline void local_set(local_t *l, int v) +//{ +// l->a.counter = v; +//} +// +//static inline void local_add(int v, local_t *l) +//{ +// l->a.counter += v; +//} +// +//static int local_add_return(int v, local_t *l) +//{ +// return l->a.counter += v; +//} +// +//static inline int local_read(local_t *l) +//{ +// return l->a.counter; +//} /* ATTRIBUTES */