1 /*****************************************************************************
4 * This file holds the code needed by LTT usertrace that comes from the
5 * kernel headers. Since including kernel headers is not recommended in
6 * userspace programs/libraries, we rewrote implementations HIGHLY INSPIRED
7 * (i.e. copied/pasted) from the original kernel headers (2.6.17).
9 * Martin Bisson, July 2006
10 * Mathieu Desnoyers, August 2006
13 #ifndef _KERNELUTILS_I386_H
14 #define _KERNELUTILS_I386_H
20 // We are careful, so we assume a possibly SMP machine
21 #define LOCK "lock ; "
22 #define LOCK_PREFIX "lock ; "
29 * Make sure gcc doesn't try to be clever and move things around
30 * on us. We need to use _exactly_ the address the user gave us,
31 * not some alias that contains the same information.
33 typedef struct { volatile int counter
; } atomic_t
;
37 * atomic_read - read atomic variable
38 * @v: pointer of type atomic_t
40 * Atomically reads the value of @v.
42 #define atomic_read(v) ((v)->counter)
45 * atomic_add - add integer to atomic variable
46 * @i: integer value to add
47 * @v: pointer of type atomic_t
49 * Atomically adds @i to @v.
51 static __inline__
void atomic_add(int i
, atomic_t
*v
)
56 :"ir" (i
), "m" (v
->counter
));
60 * atomic_inc - increment atomic variable
61 * @v: pointer of type atomic_t
63 * Atomically increments @v by 1.
65 static __inline__
void atomic_inc(atomic_t
*v
)
74 * atomic_add_return - add and return
75 * @i: integer value to add
76 * @v: pointer of type atomic_t
78 * Atomically adds @i to @v and returns @i + @v
80 static __inline__
int atomic_add_return(int i
, atomic_t
*v
)
86 :"m"(v
->counter
), "0"(i
));
95 struct __xchg_dummy
{ unsigned long a
[100]; };
96 #define __xg(x) ((struct __xchg_dummy *)(x))
100 * Atomic compare and exchange. Compare OLD with MEM, if identical,
101 * store NEW in MEM. Return the initial value in MEM. Success is
102 * indicated by comparing RETURN with OLD.
105 #define __HAVE_ARCH_CMPXCHG 1
106 #define cmpxchg(ptr,o,n)\
107 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
108 (unsigned long)(n),sizeof(*(ptr))))
110 static inline unsigned long __cmpxchg(volatile void *ptr
, unsigned long old
,
111 unsigned long newval
, int size
)
116 __asm__
__volatile__(LOCK_PREFIX
"cmpxchgb %b1,%2"
118 : "q"(newval
), "m"(*__xg(ptr
)), "0"(old
)
122 __asm__
__volatile__(LOCK_PREFIX
"cmpxchgw %w1,%2"
124 : "r"(newval
), "m"(*__xg(ptr
)), "0"(old
)
128 __asm__
__volatile__(LOCK_PREFIX
"cmpxchgl %1,%2"
130 : "r"(newval
), "m"(*__xg(ptr
)), "0"(old
)
140 #define rdtscll(val) \
141 __asm__ __volatile__("rdtsc" : "=A" (val))
145 typedef unsigned long long cycles_t
;
147 static inline cycles_t
get_cycles (void)
149 unsigned long long ret
;
156 } /* end of extern "C" */
159 #endif // _KERNELUTILS_I386_H
This page took 0.033055 seconds and 4 git commands to generate.