1 /*****************************************************************************
4 * This file holds the code needed by LTT usertrace that comes from the
5 * kernel headers. Since including kernel headers is not recommended in
6 * userspace programs/libraries, we rewrote implementations HIGHLY INSPIRED
7 * (i.e. copied/pasted) from the original kernel headers (2.6.17).
9 * Martin Bisson, July 2006
12 #ifndef _KERNELUTILS_X86_64_H
13 #define _KERNELUTILS_X86_64_H
19 // We are careful, so we assume a possibly SMP machine
20 #define LOCK "lock ; "
21 #define LOCK_PREFIX "lock ; "
30 * Make sure gcc doesn't try to be clever and move things around
31 * on us. We need to use _exactly_ the address the user gave us,
32 * not some alias that contains the same information.
34 typedef struct { volatile int counter
; } atomic_t
;
38 * atomic_read - read atomic variable
39 * @v: pointer of type atomic_t
41 * Atomically reads the value of @v.
43 #define atomic_read(v) ((v)->counter)
46 * atomic_add - add integer to atomic variable
47 * @i: integer value to add
48 * @v: pointer of type atomic_t
50 * Atomically adds @i to @v.
52 static __inline__
void atomic_add(int i
, atomic_t
*v
)
57 :"ir" (i
), "m" (v
->counter
));
61 * atomic_inc - increment atomic variable
62 * @v: pointer of type atomic_t
64 * Atomically increments @v by 1.
66 static __inline__
void atomic_inc(atomic_t
*v
)
75 * atomic_add_return - add and return
76 * @i: integer value to add
77 * @v: pointer of type atomic_t
79 * Atomically adds @i to @v and returns @i + @v
81 static __inline__
int atomic_add_return(int i
, atomic_t
*v
)
87 :"m"(v
->counter
), "0"(i
));
96 #define __xg(x) ((volatile long *)(x))
100 * Atomic compare and exchange. Compare OLD with MEM, if identical,
101 * store NEW in MEM. Return the initial value in MEM. Success is
102 * indicated by comparing RETURN with OLD.
105 #define __HAVE_ARCH_CMPXCHG 1
107 static inline unsigned long __cmpxchg(volatile void *ptr
, unsigned long old
,
108 unsigned long new, int size
)
113 __asm__
__volatile__(LOCK_PREFIX
"cmpxchgb %b1,%2"
115 : "q"(new), "m"(*__xg(ptr
)), "0"(old
)
119 __asm__
__volatile__(LOCK_PREFIX
"cmpxchgw %w1,%2"
121 : "r"(new), "m"(*__xg(ptr
)), "0"(old
)
125 __asm__
__volatile__(LOCK_PREFIX
"cmpxchgl %k1,%2"
127 : "r"(new), "m"(*__xg(ptr
)), "0"(old
)
131 __asm__
__volatile__(LOCK_PREFIX
"cmpxchgq %1,%2"
133 : "r"(new), "m"(*__xg(ptr
)), "0"(old
)
140 #define cmpxchg(ptr,o,n)\
141 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
142 (unsigned long)(n),sizeof(*(ptr))))
150 #define rdtscll(val) do { \
151 unsigned int __a,__d; \
152 asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
153 (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
161 typedef unsigned long long cycles_t
;
163 static inline cycles_t
get_cycles (void)
165 unsigned long long ret
;
172 } /* end of extern "C" */
175 #endif // _KERNELUTILS_X86_64_H
This page took 0.034776 seconds and 4 git commands to generate.