| 1 | #ifndef KERNELCOMPAT_H |
| 2 | #define KERNELCOMPAT_H |
| 3 | |
| 4 | #include "compiler.h" |
| 5 | |
| 6 | #include <string.h> |
| 7 | |
| 8 | #define container_of(ptr, type, member) ({ \ |
| 9 | const typeof( ((type *)0)->member ) *__mptr = (ptr); \ |
| 10 | (type *)( (char *)__mptr - offsetof(type,member) );}) |
| 11 | |
| 12 | #define KERN_DEBUG "" |
| 13 | #define KERN_NOTICE "" |
| 14 | #define KERN_INFO "" |
| 15 | #define KERN_ERR "" |
| 16 | #define KERN_ALERT "" |
| 17 | #define KERN_WARNING "" |
| 18 | |
| 19 | /* ERROR OPS */ |
| 20 | |
| 21 | #define MAX_ERRNO 4095 |
| 22 | |
| 23 | #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO) |
| 24 | |
| 25 | static inline void *ERR_PTR(long error) |
| 26 | { |
| 27 | return (void *) error; |
| 28 | } |
| 29 | |
| 30 | static inline long PTR_ERR(const void *ptr) |
| 31 | { |
| 32 | return (long) ptr; |
| 33 | } |
| 34 | |
| 35 | static inline long IS_ERR(const void *ptr) |
| 36 | { |
| 37 | return IS_ERR_VALUE((unsigned long)ptr); |
| 38 | } |
| 39 | |
| 40 | |
| 41 | /* FIXED SIZE INTEGERS */ |
| 42 | |
| 43 | #include <stdint.h> |
| 44 | |
| 45 | typedef uint8_t u8; |
| 46 | typedef uint16_t u16; |
| 47 | typedef uint32_t u32; |
| 48 | typedef uint64_t u64; |
| 49 | |
| 50 | #define min_t(type, x, y) ({ \ |
| 51 | type __min1 = (x); \ |
| 52 | type __min2 = (y); \ |
| 53 | __min1 < __min2 ? __min1: __min2; }) |
| 54 | |
| 55 | #define max_t(type, x, y) ({ \ |
| 56 | type __max1 = (x); \ |
| 57 | type __max2 = (y); \ |
| 58 | __max1 > __max2 ? __max1: __max2; }) |
| 59 | |
| 60 | |
| 61 | /* MUTEXES */ |
| 62 | |
| 63 | #include <pthread.h> |
| 64 | |
| 65 | #define DEFINE_MUTEX(m) pthread_mutex_t (m) = PTHREAD_MUTEX_INITIALIZER; |
| 66 | #define DECLARE_MUTEX(m) extern pthread_mutex_t (m); |
| 67 | |
| 68 | #define mutex_lock(m) pthread_mutex_lock(m) |
| 69 | |
| 70 | #define mutex_unlock(m) pthread_mutex_unlock(m) |
| 71 | |
| 72 | /* SPINLOCKS */ |
| 73 | |
| 74 | typedef int spinlock_t; |
| 75 | |
| 76 | #define spin_lock(a) /* nothing */ |
| 77 | #define spin_unlock(a) /* nothing */ |
| 78 | #define spin_lock_init(a) /* nothing */ |
| 79 | |
| 80 | |
| 81 | /* MALLOCATION */ |
| 82 | |
| 83 | #include <stdlib.h> |
| 84 | |
| 85 | #define kmalloc(s, t) malloc(s) |
| 86 | #define kzalloc(s, t) zmalloc(s) |
| 87 | #define kfree(p) free((void *)p) |
| 88 | #define kstrdup(s, t) strdup(s) |
| 89 | |
| 90 | #define zmalloc(s) calloc(1, s) |
| 91 | |
| 92 | #define GFP_KERNEL |
| 93 | |
| 94 | /* PRINTK */ |
| 95 | |
| 96 | #include <stdio.h> |
| 97 | #define printk(fmt, args...) printf(fmt, ## args) |
| 98 | |
| 99 | /* MEMORY BARRIERS */ |
| 100 | |
| 101 | #define smp_rmb() do {} while(0) |
| 102 | #define smp_wmb() do {} while(0) |
| 103 | #define smp_mb() do {} while(0) |
| 104 | #define smp_mb__after_atomic_inc() do {} while(0) |
| 105 | |
| 106 | #define read_barrier_depends() do {} while(0) |
| 107 | #define smp_read_barrier_depends() do {} while(0) |
| 108 | |
| 109 | /* RCU */ |
| 110 | |
| 111 | #define rcu_assign_pointer(a, b) do {} while(0) |
| 112 | #define call_rcu_sched(a,b) do {} while(0) |
| 113 | #define rcu_barrier_sched() do {} while(0) |
| 114 | #define rcu_read_lock_sched_notrace() do{} while (0) |
| 115 | #define rcu_read_unlock_sched_notrace() do{} while (0) |
| 116 | |
| 117 | /* ATOMICITY */ |
| 118 | |
| 119 | #include <signal.h> |
| 120 | |
| 121 | typedef struct { sig_atomic_t counter; } atomic_t; |
| 122 | |
| 123 | static inline int atomic_dec_and_test(atomic_t *p) |
| 124 | { |
| 125 | (p->counter)--; |
| 126 | return !p->counter; |
| 127 | } |
| 128 | |
| 129 | static inline void atomic_set(atomic_t *p, int v) |
| 130 | { |
| 131 | p->counter=v; |
| 132 | } |
| 133 | |
| 134 | static inline void atomic_inc(atomic_t *p) |
| 135 | { |
| 136 | p->counter++; |
| 137 | } |
| 138 | |
| 139 | static int atomic_read(atomic_t *p) |
| 140 | { |
| 141 | return p->counter; |
| 142 | } |
| 143 | |
| 144 | #define atomic_long_t atomic_t |
| 145 | #define atomic_long_set atomic_set |
| 146 | #define atomic_long_read atomic_read |
| 147 | |
| 148 | #include "asm.h" |
| 149 | |
| 150 | #define __xg(x) ((volatile long *)(x)) |
| 151 | |
| 152 | #define cmpxchg(ptr, o, n) \ |
| 153 | ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ |
| 154 | (unsigned long)(n), sizeof(*(ptr)))) |
| 155 | |
| 156 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, |
| 157 | unsigned long new, int size) |
| 158 | { |
| 159 | unsigned long prev; |
| 160 | switch (size) { |
| 161 | case 1: |
| 162 | asm volatile("lock; cmpxchgb %b1,%2" |
| 163 | : "=a"(prev) |
| 164 | : "q"(new), "m"(*__xg(ptr)), "0"(old) |
| 165 | : "memory"); |
| 166 | return prev; |
| 167 | case 2: |
| 168 | asm volatile("lock; cmpxchgw %w1,%2" |
| 169 | : "=a"(prev) |
| 170 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
| 171 | : "memory"); |
| 172 | return prev; |
| 173 | case 4: |
| 174 | asm volatile("lock; cmpxchgl %k1,%2" |
| 175 | : "=a"(prev) |
| 176 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
| 177 | : "memory"); |
| 178 | return prev; |
| 179 | case 8: |
| 180 | asm volatile("lock; cmpxchgq %1,%2" |
| 181 | : "=a"(prev) |
| 182 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
| 183 | : "memory"); |
| 184 | return prev; |
| 185 | } |
| 186 | return old; |
| 187 | } |
| 188 | |
| 189 | //#define local_cmpxchg cmpxchg |
| 190 | #define local_cmpxchg(l, o, n) (cmpxchg(&((l)->a.counter), (o), (n))) |
| 191 | |
| 192 | #define atomic_long_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) |
| 193 | |
| 194 | |
| 195 | /* LOCAL OPS */ |
| 196 | |
| 197 | //typedef int local_t; |
| 198 | typedef struct |
| 199 | { |
| 200 | atomic_long_t a; |
| 201 | } local_t; |
| 202 | |
| 203 | |
| 204 | static inline void local_inc(local_t *l) |
| 205 | { |
| 206 | (l->a.counter)++; |
| 207 | } |
| 208 | |
| 209 | static inline void local_set(local_t *l, int v) |
| 210 | { |
| 211 | l->a.counter = v; |
| 212 | } |
| 213 | |
| 214 | static inline void local_add(int v, local_t *l) |
| 215 | { |
| 216 | l->a.counter += v; |
| 217 | } |
| 218 | |
| 219 | static int local_add_return(int v, local_t *l) |
| 220 | { |
| 221 | return l->a.counter += v; |
| 222 | } |
| 223 | |
| 224 | static inline int local_read(local_t *l) |
| 225 | { |
| 226 | return l->a.counter; |
| 227 | } |
| 228 | |
| 229 | |
| 230 | /* ATTRIBUTES */ |
| 231 | |
| 232 | #define ____cacheline_aligned |
| 233 | #define __init |
| 234 | #define __exit |
| 235 | |
| 236 | /* MATH */ |
| 237 | |
| 238 | static inline unsigned int hweight32(unsigned int w) |
| 239 | { |
| 240 | unsigned int res = w - ((w >> 1) & 0x55555555); |
| 241 | res = (res & 0x33333333) + ((res >> 2) & 0x33333333); |
| 242 | res = (res + (res >> 4)) & 0x0F0F0F0F; |
| 243 | res = res + (res >> 8); |
| 244 | return (res + (res >> 16)) & 0x000000FF; |
| 245 | } |
| 246 | |
| 247 | static inline int fls(int x) |
| 248 | { |
| 249 | int r; |
| 250 | //ust// #ifdef CONFIG_X86_CMOV |
| 251 | asm("bsrl %1,%0\n\t" |
| 252 | "cmovzl %2,%0" |
| 253 | : "=&r" (r) : "rm" (x), "rm" (-1)); |
| 254 | //ust// #else |
| 255 | //ust// asm("bsrl %1,%0\n\t" |
| 256 | //ust// "jnz 1f\n\t" |
| 257 | //ust// "movl $-1,%0\n" |
| 258 | //ust// "1:" : "=r" (r) : "rm" (x)); |
| 259 | //ust// #endif |
| 260 | return r + 1; |
| 261 | } |
| 262 | |
| 263 | static __inline__ int get_count_order(unsigned int count) |
| 264 | { |
| 265 | int order; |
| 266 | |
| 267 | order = fls(count) - 1; |
| 268 | if (count & (count - 1)) |
| 269 | order++; |
| 270 | return order; |
| 271 | } |
| 272 | |
| 273 | |
| 274 | |
| 275 | |
| 276 | #include <unistd.h> |
| 277 | |
| 278 | #define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1) |
| 279 | #define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask)) |
| 280 | #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) |
| 281 | #define PAGE_SIZE sysconf(_SC_PAGE_SIZE) |
| 282 | #define PAGE_MASK (PAGE_SIZE-1) |
| 283 | |
| 284 | |
| 285 | |
| 286 | |
| 287 | /* ARRAYS */ |
| 288 | |
| 289 | #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) |
| 290 | |
| 291 | /* TRACE CLOCK */ |
| 292 | |
| 293 | //ust// static inline u64 trace_clock_read64(void) |
| 294 | //ust// { |
| 295 | //ust// uint32_t low; |
| 296 | //ust// uint32_t high; |
| 297 | //ust// uint64_t retval; |
| 298 | //ust// __asm__ volatile ("rdtsc\n" : "=a" (low), "=d" (high)); |
| 299 | //ust// |
| 300 | //ust// retval = high; |
| 301 | //ust// retval <<= 32; |
| 302 | //ust// return retval | low; |
| 303 | //ust// } |
| 304 | |
| 305 | static inline u64 trace_clock_read64(void) |
| 306 | { |
| 307 | struct timeval tv; |
| 308 | u64 retval; |
| 309 | |
| 310 | gettimeofday(&tv, NULL); |
| 311 | retval = tv.tv_sec; |
| 312 | retval *= 1000000; |
| 313 | retval += tv.tv_usec; |
| 314 | |
| 315 | return retval; |
| 316 | } |
| 317 | |
| 318 | static inline u64 trace_clock_frequency(void) |
| 319 | { |
| 320 | return 1000000LL; |
| 321 | } |
| 322 | |
| 323 | static inline u32 trace_clock_freq_scale(void) |
| 324 | { |
| 325 | return 1; |
| 326 | } |
| 327 | |
| 328 | |
| 329 | /* LISTS */ |
| 330 | |
| 331 | #define list_add_rcu list_add |
| 332 | #define list_for_each_entry_rcu list_for_each_entry |
| 333 | |
| 334 | |
| 335 | #define EXPORT_SYMBOL_GPL(a) /*nothing*/ |
| 336 | |
| 337 | #define smp_processor_id() (-1) |
| 338 | |
| 339 | #endif /* KERNELCOMPAT_H */ |