| 1 | /***************************************************************************** |
| 2 | * kernelutils-arm.h |
| 3 | * |
| 4 | * This file holds the code needed by LTT usertrace that comes from the |
| 5 | * kernel headers. Since including kernel headers is not recommended in |
| 6 | * userspace programs/libraries, we rewrote implementations HIGHLY INSPIRED |
| 7 | * (i.e. copied/pasted) from the original kernel headers (2.6.18). |
| 8 | * |
| 9 | * Do not use these functions within signal handlers, as the architecture offers |
| 10 | * no atomic operations. (Mathieu Desnoyers) It is safe to do multithreaded |
| 11 | * tracing though, as the buffers are per thread. |
| 12 | * |
| 13 | * Deepak Saxena, October 2006 |
| 14 | */ |
| 15 | |
| 16 | #ifndef _KERNELUTILS_ARM_H |
| 17 | #define _KERNELUTILS_ARM_H |
| 18 | |
| 19 | #include <time.h> |
| 20 | |
| 21 | #ifdef __cplusplus |
| 22 | extern "C" { |
| 23 | #endif |
| 24 | |
| 25 | typedef struct { volatile int counter; } atomic_t; |
| 26 | |
| 27 | #define atomic_read(v) ((v)->counter) |
| 28 | |
| 29 | static inline int atomic_add_return(int i, atomic_t *v) |
| 30 | { |
| 31 | unsigned long flags; |
| 32 | int val; |
| 33 | |
| 34 | val = v->counter; |
| 35 | v->counter = val += i; |
| 36 | |
| 37 | return val; |
| 38 | } |
| 39 | |
| 40 | #define atomic_add(i, v) (void) atomic_add_return(i, v) |
| 41 | #define atomic_inc(v) (void) atomic_add_return(1, v) |
| 42 | |
| 43 | static inline unsigned long cmpxchg(volatile void *ptr, |
| 44 | unsigned long old, |
| 45 | unsigned long new) |
| 46 | { |
| 47 | unsigned long flags, prev; |
| 48 | volatile unsigned long *p = ptr; |
| 49 | |
| 50 | if ((prev = *p) == old) |
| 51 | *p = new; |
| 52 | return(prev); |
| 53 | } |
| 54 | |
| 55 | static inline unsigned long long get_cycles(void) |
| 56 | { |
| 57 | struct timespec tp; |
| 58 | clock_gettime(CLOCK_MONOTONIC, &tp); |
| 59 | return tp.tv_sec * 1000000000 + tp.tv_nsec; |
| 60 | } |
| 61 | |
| 62 | |
| 63 | #ifdef __cplusplus |
| 64 | } /* end of extern "C" */ |
| 65 | #endif |
| 66 | |
| 67 | #endif |