1 /*****************************************************************************
4 * This file holds the code needed by LTT usertrace that comes from the
5 * kernel headers. Since including kernel headers is not recommended in
6 * userspace programs/libraries, we rewrote implementations HIGHLY INSPIRED
7 * (i.e. copied/pasted) from the original kernel headers (2.6.17).
9 * Martin Bisson, July 2006
12 #ifndef _KERNELUTILS_X86_64_H
13 #define _KERNELUTILS_X86_64_H
15 // We are careful, so we assume a possibly SMP machine
16 #define LOCK "lock ; "
17 #define LOCK_PREFIX "lock ; "
26 * Make sure gcc doesn't try to be clever and move things around
27 * on us. We need to use _exactly_ the address the user gave us,
28 * not some alias that contains the same information.
30 typedef struct { volatile int counter
; } atomic_t
;
34 * atomic_read - read atomic variable
35 * @v: pointer of type atomic_t
37 * Atomically reads the value of @v.
39 #define atomic_read(v) ((v)->counter)
42 * atomic_add - add integer to atomic variable
43 * @i: integer value to add
44 * @v: pointer of type atomic_t
46 * Atomically adds @i to @v.
48 static __inline__
void atomic_add(int i
, atomic_t
*v
)
53 :"ir" (i
), "m" (v
->counter
));
57 * atomic_inc - increment atomic variable
58 * @v: pointer of type atomic_t
60 * Atomically increments @v by 1.
62 static __inline__
void atomic_inc(atomic_t
*v
)
71 * atomic_add_return - add and return
72 * @i: integer value to add
73 * @v: pointer of type atomic_t
75 * Atomically adds @i to @v and returns @i + @v
77 static __inline__
int atomic_add_return(int i
, atomic_t
*v
)
83 :"m"(v
->counter
), "0"(i
));
92 #define __xg(x) ((volatile long *)(x))
96 * Atomic compare and exchange. Compare OLD with MEM, if identical,
97 * store NEW in MEM. Return the initial value in MEM. Success is
98 * indicated by comparing RETURN with OLD.
101 #define __HAVE_ARCH_CMPXCHG 1
103 static inline unsigned long __cmpxchg(volatile void *ptr
, unsigned long old
,
104 unsigned long new, int size
)
109 __asm__
__volatile__(LOCK_PREFIX
"cmpxchgb %b1,%2"
111 : "q"(new), "m"(*__xg(ptr
)), "0"(old
)
115 __asm__
__volatile__(LOCK_PREFIX
"cmpxchgw %w1,%2"
117 : "r"(new), "m"(*__xg(ptr
)), "0"(old
)
121 __asm__
__volatile__(LOCK_PREFIX
"cmpxchgl %k1,%2"
123 : "r"(new), "m"(*__xg(ptr
)), "0"(old
)
127 __asm__
__volatile__(LOCK_PREFIX
"cmpxchgq %1,%2"
129 : "r"(new), "m"(*__xg(ptr
)), "0"(old
)
136 #define cmpxchg(ptr,o,n)\
137 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
138 (unsigned long)(n),sizeof(*(ptr))))
146 #define rdtscll(val) do { \
147 unsigned int __a,__d; \
148 asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
149 (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
157 typedef unsigned long long cycles_t
;
159 static inline cycles_t
get_cycles (void)
161 unsigned long long ret
;
168 #endif // _KERNELUTILS_X86_64_H
This page took 0.043624 seconds and 4 git commands to generate.