1 /* MECHANICALLY GENERATED, DO NOT EDIT!!! */
6 * common.h: Common Linux kernel-isms.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; but version 2 of the License only due
11 * to code included from the Linux kernel.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 * Copyright (c) 2006 Paul E. McKenney, IBM.
24 * Much code taken from the Linux kernel. For such code, the option
25 * to redistribute under later versions of GPL might not be available.
28 #ifndef __always_inline
29 #define __always_inline inline
32 #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
33 #define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1)
36 # define stringify_in_c(...) __VA_ARGS__
37 # define ASM_CONST(x) x
39 /* This version of stringify will deal with commas... */
40 # define __stringify_in_c(...) #__VA_ARGS__
41 # define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " "
42 # define __ASM_CONST(x) x##UL
43 # define ASM_CONST(x) __ASM_CONST(x)
48 * arch-ppc64.h: Expose PowerPC atomic instructions.
50 * This program is free software; you can redistribute it and/or modify
51 * it under the terms of the GNU General Public License as published by
52 * the Free Software Foundation; but version 2 of the License only due
53 * to code included from the Linux kernel.
55 * This program is distributed in the hope that it will be useful,
56 * but WITHOUT ANY WARRANTY; without even the implied warranty of
57 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
58 * GNU General Public License for more details.
60 * You should have received a copy of the GNU General Public License
61 * along with this program; if not, write to the Free Software
62 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
64 * Copyright (c) 2006 Paul E. McKenney, IBM.
66 * Much code taken from the Linux kernel. For such code, the option
67 * to redistribute under later versions of GPL might not be available.
76 #define CACHE_LINE_SIZE 128
77 #define ____cacheline_internodealigned_in_smp \
78 __attribute__((__aligned__(1 << 7)))
81 * Atomic data structure, initialization, and access.
84 typedef struct { volatile int counter
; } atomic_t
;
86 #define ATOMIC_INIT(i) { (i) }
88 #define atomic_read(v) ((v)->counter)
89 #define atomic_set(v, i) (((v)->counter) = (i))
96 #define PPC405_ERR77(ra,rb)
98 # define LWSYNC_ON_SMP stringify_in_c(LWSYNC) "\n"
99 # define ISYNC_ON_SMP "\n\tisync\n"
101 # define LWSYNC_ON_SMP
102 # define ISYNC_ON_SMP
109 * Changes the memory location '*ptr' to be val and returns
110 * the previous value stored there.
112 static __always_inline
unsigned long
113 __xchg_u32(volatile void *p
, unsigned long val
)
117 __asm__
__volatile__(
119 "1: lwarx %0,0,%2 \n"
124 : "=&r" (prev
), "+m" (*(volatile unsigned int *)p
)
134 * Changes the memory location '*ptr' to be val and returns
135 * the previous value stored there.
137 static __always_inline
unsigned long
138 __xchg_u32_local(volatile void *p
, unsigned long val
)
142 __asm__
__volatile__(
143 "1: lwarx %0,0,%2 \n"
147 : "=&r" (prev
), "+m" (*(volatile unsigned int *)p
)
155 static __always_inline
unsigned long
156 __xchg_u64(volatile void *p
, unsigned long val
)
160 __asm__
__volatile__(
162 "1: ldarx %0,0,%2 \n"
167 : "=&r" (prev
), "+m" (*(volatile unsigned long *)p
)
174 static __always_inline
unsigned long
175 __xchg_u64_local(volatile void *p
, unsigned long val
)
179 __asm__
__volatile__(
180 "1: ldarx %0,0,%2 \n"
184 : "=&r" (prev
), "+m" (*(volatile unsigned long *)p
)
193 * This function doesn't exist, so you'll get a linker error
194 * if something tries to do an invalid xchg().
196 extern void __xchg_called_with_bad_pointer(void);
198 static __always_inline
unsigned long
199 __xchg(volatile void *ptr
, unsigned long x
, unsigned int size
)
203 return __xchg_u32(ptr
, x
);
206 return __xchg_u64(ptr
, x
);
209 __xchg_called_with_bad_pointer();
213 static __always_inline
unsigned long
214 __xchg_local(volatile void *ptr
, unsigned long x
, unsigned int size
)
218 return __xchg_u32_local(ptr
, x
);
221 return __xchg_u64_local(ptr
, x
);
224 __xchg_called_with_bad_pointer();
227 #define xchg(ptr,x) \
229 __typeof__(*(ptr)) _x_ = (x); \
230 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
233 #define xchg_local(ptr,x) \
235 __typeof__(*(ptr)) _x_ = (x); \
236 (__typeof__(*(ptr))) __xchg_local((ptr), \
237 (unsigned long)_x_, sizeof(*(ptr))); \
241 * Compare and exchange - if *p == old, set it to new,
242 * and return the old value of *p.
244 #define __HAVE_ARCH_CMPXCHG 1
246 static __always_inline
unsigned long
247 __cmpxchg_u32(volatile unsigned int *p
, unsigned long old
, unsigned long new)
251 __asm__
__volatile__ (
253 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
262 : "=&r" (prev
), "+m" (*p
)
263 : "r" (p
), "r" (old
), "r" (new)
269 static __always_inline
unsigned long
270 __cmpxchg_u32_local(volatile unsigned int *p
, unsigned long old
,
275 __asm__
__volatile__ (
276 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
284 : "=&r" (prev
), "+m" (*p
)
285 : "r" (p
), "r" (old
), "r" (new)
292 static __always_inline
unsigned long
293 __cmpxchg_u64(volatile unsigned long *p
, unsigned long old
, unsigned long new)
297 __asm__
__volatile__ (
299 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
307 : "=&r" (prev
), "+m" (*p
)
308 : "r" (p
), "r" (old
), "r" (new)
314 static __always_inline
unsigned long
315 __cmpxchg_u64_local(volatile unsigned long *p
, unsigned long old
,
320 __asm__
__volatile__ (
321 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
328 : "=&r" (prev
), "+m" (*p
)
329 : "r" (p
), "r" (old
), "r" (new)
336 /* This function doesn't exist, so you'll get a linker error
337 if something tries to do an invalid cmpxchg(). */
338 extern void __cmpxchg_called_with_bad_pointer(void);
340 static __always_inline
unsigned long
341 __cmpxchg(volatile void *ptr
, unsigned long old
, unsigned long new,
346 return __cmpxchg_u32(ptr
, old
, new);
349 return __cmpxchg_u64(ptr
, old
, new);
352 __cmpxchg_called_with_bad_pointer();
356 static __always_inline
unsigned long
357 __cmpxchg_local(volatile void *ptr
, unsigned long old
, unsigned long new,
362 return __cmpxchg_u32_local(ptr
, old
, new);
365 return __cmpxchg_u64_local(ptr
, old
, new);
368 __cmpxchg_called_with_bad_pointer();
372 #define cmpxchg(ptr, o, n) \
374 __typeof__(*(ptr)) _o_ = (o); \
375 __typeof__(*(ptr)) _n_ = (n); \
376 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
377 (unsigned long)_n_, sizeof(*(ptr))); \
381 #define cmpxchg_local(ptr, o, n) \
383 __typeof__(*(ptr)) _o_ = (o); \
384 __typeof__(*(ptr)) _n_ = (n); \
385 (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
386 (unsigned long)_n_, sizeof(*(ptr))); \
391 * We handle most unaligned accesses in hardware. On the other hand
392 * unaligned DMA can be very expensive on some ppc64 IO chips (it does
393 * powers of 2 writes until it reaches sufficient alignment).
395 * Based on this we disable the IP header alignment in network drivers.
396 * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining
397 * cacheline alignment of buffers.
399 #define NET_IP_ALIGN 0
400 #define NET_SKB_PAD L1_CACHE_BYTES
402 #define cmpxchg64(ptr, o, n) \
404 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
405 cmpxchg((ptr), (o), (n)); \
407 #define cmpxchg64_local(ptr, o, n) \
409 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
410 cmpxchg_local((ptr), (o), (n)); \
414 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
415 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
418 * atomic_add - add integer to atomic variable
419 * @i: integer value to add
420 * @v: pointer of type atomic_t
422 * Atomically adds @a to @v.
424 static __inline__
void atomic_add(int a
, atomic_t
*v
)
428 __asm__
__volatile__(
429 "1: lwarx %0,0,%3 # atomic_add\n\
433 : "=&r" (t
), "+m" (v
->counter
)
434 : "r" (a
), "r" (&v
->counter
)
439 * atomic_sub - subtract the atomic variable
440 * @i: integer value to subtract
441 * @v: pointer of type atomic_t
443 * Atomically subtracts @a from @v.
445 static __inline__
void atomic_sub(int a
, atomic_t
*v
)
449 __asm__
__volatile__(
450 "1: lwarx %0,0,%3 # atomic_sub \n\
454 : "=&r" (t
), "+m" (v
->counter
)
455 : "r" (a
), "r" (&v
->counter
)
459 static __inline__
atomic_sub_return(int a
, atomic_t
*v
)
463 __asm__
__volatile__(
465 1: lwarx %0,0,%2 # atomic_sub_return\n\
471 : "r" (a
), "r" (&v
->counter
)
478 * atomic_sub_and_test - subtract value from variable and test result
479 * @i: integer value to subtract
480 * @v: pointer of type atomic_t
482 * Atomically subtracts @i from @v and returns
483 * true if the result is zero, or false for all
486 static __inline__
int atomic_sub_and_test(int a
, atomic_t
*v
)
488 return atomic_sub_return(a
, v
) == 0;
492 * atomic_inc - increment atomic variable
493 * @v: pointer of type atomic_t
495 * Atomically increments @v by 1.
497 static __inline__
void atomic_inc(atomic_t
*v
)
503 * atomic_dec - decrement atomic variable
504 * @v: pointer of type atomic_t
506 * Atomically decrements @v by 1.
508 static __inline__
void atomic_dec(atomic_t
*v
)
514 * atomic_dec_and_test - decrement and test
515 * @v: pointer of type atomic_t
517 * Atomically decrements @v by 1 and
518 * returns true if the result is 0, or false for all other
521 static __inline__
int atomic_dec_and_test(atomic_t
*v
)
523 return atomic_sub_and_test(1, v
);
527 * atomic_inc_and_test - increment and test
528 * @v: pointer of type atomic_t
530 * Atomically increments @v by 1
531 * and returns true if the result is zero, or false for all
534 static __inline__
int atomic_inc_and_test(atomic_t
*v
)
536 return atomic_inc_return(v
);
540 * atomic_add_return - add and return
541 * @v: pointer of type atomic_t
542 * @i: integer value to add
544 * Atomically adds @i to @v and returns @i + @v
546 static __inline__
int atomic_add_return(int a
, atomic_t
*v
)
550 __asm__
__volatile__(
552 1: lwarx %0,0,%2 # atomic_add_return \n\
558 : "r" (a
), "r" (&v
->counter
)
565 * atomic_add_negative - add and test if negative
566 * @v: pointer of type atomic_t
567 * @i: integer value to add
569 * Atomically adds @i to @v and returns true
570 * if the result is negative, or false when
571 * result is greater than or equal to zero.
573 static __inline__
int atomic_add_negative(int a
, atomic_t
*v
)
575 return atomic_add_return(a
, v
) < 0;
579 * atomic_add_unless - add unless the number is a given value
580 * @v: pointer of type atomic_t
581 * @a: the amount to add to v...
582 * @u: ...unless v is equal to u.
584 * Atomically adds @a to @v, so long as it was not @u.
585 * Returns non-zero if @v was not @u, and zero otherwise.
587 static __inline__
int atomic_add_unless(atomic_t
*v
, int a
, int u
)
591 __asm__
__volatile__(
593 1: lwarx %0,0,%1 # atomic_add_unless\n\
603 : "r" (&v
->counter
), "r" (a
), "r" (u
)
609 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
611 #define atomic_inc_return(v) (atomic_add_return(1,v))
612 #define atomic_dec_return(v) (atomic_sub_return(1,v))
614 /* Atomic operations are already serializing on x86 */
615 #define smp_mb__before_atomic_dec() smp_mb()
616 #define smp_mb__after_atomic_dec() smp_mb()
617 #define smp_mb__before_atomic_inc() smp_mb()
618 #define smp_mb__after_atomic_inc() smp_mb()
621 * api_pthreads.h: API mapping to pthreads environment.
623 * This program is free software; you can redistribute it and/or modify
624 * it under the terms of the GNU General Public License as published by
625 * the Free Software Foundation; either version 2 of the License, or
626 * (at your option) any later version. However, please note that much
627 * of the code in this file derives from the Linux kernel, and that such
628 * code may not be available except under GPLv2.
630 * This program is distributed in the hope that it will be useful,
631 * but WITHOUT ANY WARRANTY; without even the implied warranty of
632 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
633 * GNU General Public License for more details.
635 * You should have received a copy of the GNU General Public License
636 * along with this program; if not, write to the Free Software
637 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
639 * Copyright (c) 2006 Paul E. McKenney, IBM.
646 #include <sys/types.h>
650 #include <sys/param.h>
651 /* #include "atomic.h" */
656 #define container_of(ptr, type, member) ({ \
657 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
658 (type *)( (char *)__mptr - offsetof(type,member) );})
661 * Default machine parameters.
664 #ifndef CACHE_LINE_SIZE
665 #define CACHE_LINE_SIZE 128
666 #endif /* #ifndef CACHE_LINE_SIZE */
669 * Exclusive locking primitives.
672 typedef pthread_mutex_t spinlock_t
;
674 #define DEFINE_SPINLOCK(lock) spinlock_t lock = PTHREAD_MUTEX_INITIALIZER;
675 #define __SPIN_LOCK_UNLOCKED(lockp) PTHREAD_MUTEX_INITIALIZER
677 static void spin_lock_init(spinlock_t
*sp
)
679 if (pthread_mutex_init(sp
, NULL
) != 0) {
680 perror("spin_lock_init:pthread_mutex_init");
685 static void spin_lock(spinlock_t
*sp
)
687 if (pthread_mutex_lock(sp
) != 0) {
688 perror("spin_lock:pthread_mutex_lock");
693 static void spin_unlock(spinlock_t
*sp
)
695 if (pthread_mutex_unlock(sp
) != 0) {
696 perror("spin_unlock:pthread_mutex_unlock");
701 #define spin_lock_irqsave(l, f) do { f = 1; spin_lock(l); } while (0)
702 #define spin_unlock_irqrestore(l, f) do { f = 0; spin_unlock(l); } while (0)
705 * Thread creation/destruction primitives.
708 typedef pthread_t thread_id_t
;
710 #define NR_THREADS 128
712 #define __THREAD_ID_MAP_EMPTY 0
713 #define __THREAD_ID_MAP_WAITING 1
714 thread_id_t __thread_id_map
[NR_THREADS
];
715 spinlock_t __thread_id_map_mutex
;
717 #define for_each_thread(t) \
718 for (t = 0; t < NR_THREADS; t++)
720 #define for_each_running_thread(t) \
721 for (t = 0; t < NR_THREADS; t++) \
722 if ((__thread_id_map[t] != __THREAD_ID_MAP_EMPTY) && \
723 (__thread_id_map[t] != __THREAD_ID_MAP_WAITING))
725 #define for_each_tid(t, tid) \
726 for (t = 0; t < NR_THREADS; t++) \
727 if ((((tid) = __thread_id_map[t]) != __THREAD_ID_MAP_EMPTY) && \
728 ((tid) != __THREAD_ID_MAP_WAITING))
730 pthread_key_t thread_id_key
;
732 static int __smp_thread_id(void)
735 thread_id_t tid
= pthread_self();
737 for (i
= 0; i
< NR_THREADS
; i
++) {
738 if (__thread_id_map
[i
] == tid
) {
739 long v
= i
+ 1; /* must be non-NULL. */
741 if (pthread_setspecific(thread_id_key
, (void *)v
) != 0) {
742 perror("pthread_setspecific");
748 spin_lock(&__thread_id_map_mutex
);
749 for (i
= 0; i
< NR_THREADS
; i
++) {
750 if (__thread_id_map
[i
] == tid
)
751 spin_unlock(&__thread_id_map_mutex
);
754 spin_unlock(&__thread_id_map_mutex
);
755 fprintf(stderr
, "smp_thread_id: Rogue thread, id: %d(%#x)\n",
760 static int smp_thread_id(void)
764 id
= pthread_getspecific(thread_id_key
);
766 return __smp_thread_id();
767 return (long)(id
- 1);
770 static thread_id_t
create_thread(void *(*func
)(void *), void *arg
)
775 spin_lock(&__thread_id_map_mutex
);
776 for (i
= 0; i
< NR_THREADS
; i
++) {
777 if (__thread_id_map
[i
] == __THREAD_ID_MAP_EMPTY
)
780 if (i
>= NR_THREADS
) {
781 spin_unlock(&__thread_id_map_mutex
);
782 fprintf(stderr
, "Thread limit of %d exceeded!\n", NR_THREADS
);
785 __thread_id_map
[i
] = __THREAD_ID_MAP_WAITING
;
786 spin_unlock(&__thread_id_map_mutex
);
787 if (pthread_create(&tid
, NULL
, func
, arg
) != 0) {
788 perror("create_thread:pthread_create");
791 __thread_id_map
[i
] = tid
;
795 static void *wait_thread(thread_id_t tid
)
800 for (i
= 0; i
< NR_THREADS
; i
++) {
801 if (__thread_id_map
[i
] == tid
)
804 if (i
>= NR_THREADS
){
805 fprintf(stderr
, "wait_thread: bad tid = %d(%#x)\n",
809 if (pthread_join(tid
, &vp
) != 0) {
810 perror("wait_thread:pthread_join");
813 __thread_id_map
[i
] = __THREAD_ID_MAP_EMPTY
;
817 static void wait_all_threads(void)
822 for (i
= 1; i
< NR_THREADS
; i
++) {
823 tid
= __thread_id_map
[i
];
824 if (tid
!= __THREAD_ID_MAP_EMPTY
&&
825 tid
!= __THREAD_ID_MAP_WAITING
)
826 (void)wait_thread(tid
);
830 static void run_on(int cpu
)
836 sched_setaffinity(0, sizeof(mask
), &mask
);
840 * timekeeping -- very crude -- should use MONOTONIC...
843 long long get_microseconds(void)
847 if (gettimeofday(&tv
, NULL
) != 0)
849 return ((long long)tv
.tv_sec
) * 1000000LL + (long long)tv
.tv_usec
;
853 * Per-thread variables.
856 #define DEFINE_PER_THREAD(type, name) \
859 __attribute__((__aligned__(CACHE_LINE_SIZE))); \
860 } __per_thread_##name[NR_THREADS];
861 #define DECLARE_PER_THREAD(type, name) extern DEFINE_PER_THREAD(type, name)
863 #define per_thread(name, thread) __per_thread_##name[thread].v
864 #define __get_thread_var(name) per_thread(name, smp_thread_id())
866 #define init_per_thread(name, v) \
869 for (__i_p_t_i = 0; __i_p_t_i < NR_THREADS; __i_p_t_i++) \
870 per_thread(name, __i_p_t_i) = v; \
874 * CPU traversal primitives.
879 #endif /* #ifndef NR_CPUS */
881 #define for_each_possible_cpu(cpu) \
882 for (cpu = 0; cpu < NR_CPUS; cpu++)
883 #define for_each_online_cpu(cpu) \
884 for (cpu = 0; cpu < NR_CPUS; cpu++)
890 #define DEFINE_PER_CPU(type, name) \
893 __attribute__((__aligned__(CACHE_LINE_SIZE))); \
894 } __per_cpu_##name[NR_CPUS]
895 #define DECLARE_PER_CPU(type, name) extern DEFINE_PER_CPU(type, name)
897 DEFINE_PER_THREAD(int, smp_processor_id
);
899 #define per_cpu(name, thread) __per_cpu_##name[thread].v
900 #define __get_cpu_var(name) per_cpu(name, smp_processor_id())
902 #define init_per_cpu(name, v) \
905 for (__i_p_c_i = 0; __i_p_c_i < NR_CPUS; __i_p_c_i++) \
906 per_cpu(name, __i_p_c_i) = v; \
910 * CPU state checking (crowbarred).
913 #define idle_cpu(cpu) 0
914 #define in_softirq() 1
915 #define hardirq_count() 0
916 #define PREEMPT_SHIFT 0
917 #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
918 #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
919 #define PREEMPT_BITS 8
920 #define SOFTIRQ_BITS 8
926 struct notifier_block
{
927 int (*notifier_call
)(struct notifier_block
*, unsigned long, void *);
928 struct notifier_block
*next
;
932 #define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
933 #define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
934 #define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */
935 #define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
936 #define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
937 #define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
938 #define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task,
939 * not handling interrupts, soon dead */
940 #define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
943 /* Used for CPU hotplug events occuring while tasks are frozen due to a suspend
944 * operation in progress
946 #define CPU_TASKS_FROZEN 0x0010
948 #define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN)
949 #define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN)
950 #define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
951 #define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
952 #define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
953 #define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
954 #define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
956 /* Hibernation and suspend events */
957 #define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */
958 #define PM_POST_HIBERNATION 0x0002 /* Hibernation finished */
959 #define PM_SUSPEND_PREPARE 0x0003 /* Going to suspend the system */
960 #define PM_POST_SUSPEND 0x0004 /* Suspend finished */
961 #define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */
962 #define PM_POST_RESTORE 0x0006 /* Restore failed */
964 #define NOTIFY_DONE 0x0000 /* Don't care */
965 #define NOTIFY_OK 0x0001 /* Suits me */
966 #define NOTIFY_STOP_MASK 0x8000 /* Don't call further */
967 #define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002)
968 /* Bad/Veto action */
970 * Clean way to return from the notifier and stop further calls.
972 #define NOTIFY_STOP (NOTIFY_OK|NOTIFY_STOP_MASK)
978 #define BUG_ON(c) do { if (!(c)) abort(); } while (0)
981 * Initialization -- Must be called before calling any primitives.
984 static void smp_init(void)
988 spin_lock_init(&__thread_id_map_mutex
);
989 __thread_id_map
[0] = pthread_self();
990 for (i
= 1; i
< NR_THREADS
; i
++)
991 __thread_id_map
[i
] = __THREAD_ID_MAP_EMPTY
;
992 init_per_thread(smp_processor_id
, 0);
993 if (pthread_key_create(&thread_id_key
, NULL
) != 0) {
994 perror("pthread_key_create");
999 /* Taken from the Linux kernel source tree, so GPLv2-only!!! */
1001 #ifndef _LINUX_LIST_H
1002 #define _LINUX_LIST_H
1004 #define LIST_POISON1 ((void *) 0x00100100)
1005 #define LIST_POISON2 ((void *) 0x00200200)
1007 #define container_of(ptr, type, member) ({ \
1008 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
1009 (type *)( (char *)__mptr - offsetof(type,member) );})
1012 * Simple doubly linked list implementation.
1014 * Some of the internal functions ("__xxx") are useful when
1015 * manipulating whole lists rather than single entries, as
1016 * sometimes we already know the next/prev entries and we can
1017 * generate better code by using them directly rather than
1018 * using the generic single-entry routines.
1022 struct list_head
*next
, *prev
;
1025 #define LIST_HEAD_INIT(name) { &(name), &(name) }
1027 #define LIST_HEAD(name) \
1028 struct list_head name = LIST_HEAD_INIT(name)
1030 static inline void INIT_LIST_HEAD(struct list_head
*list
)
1037 * Insert a new entry between two known consecutive entries.
1039 * This is only for internal list manipulation where we know
1040 * the prev/next entries already!
1042 #ifndef CONFIG_DEBUG_LIST
1043 static inline void __list_add(struct list_head
*new,
1044 struct list_head
*prev
,
1045 struct list_head
*next
)
1053 extern void __list_add(struct list_head
*new,
1054 struct list_head
*prev
,
1055 struct list_head
*next
);
1059 * list_add - add a new entry
1060 * @new: new entry to be added
1061 * @head: list head to add it after
1063 * Insert a new entry after the specified head.
1064 * This is good for implementing stacks.
1066 static inline void list_add(struct list_head
*new, struct list_head
*head
)
1068 __list_add(new, head
, head
->next
);
1073 * list_add_tail - add a new entry
1074 * @new: new entry to be added
1075 * @head: list head to add it before
1077 * Insert a new entry before the specified head.
1078 * This is useful for implementing queues.
1080 static inline void list_add_tail(struct list_head
*new, struct list_head
*head
)
1082 __list_add(new, head
->prev
, head
);
1086 * Delete a list entry by making the prev/next entries
1087 * point to each other.
1089 * This is only for internal list manipulation where we know
1090 * the prev/next entries already!
1092 static inline void __list_del(struct list_head
* prev
, struct list_head
* next
)
1099 * list_del - deletes entry from list.
1100 * @entry: the element to delete from the list.
1101 * Note: list_empty() on entry does not return true after this, the entry is
1102 * in an undefined state.
1104 #ifndef CONFIG_DEBUG_LIST
1105 static inline void list_del(struct list_head
*entry
)
1107 __list_del(entry
->prev
, entry
->next
);
1108 entry
->next
= LIST_POISON1
;
1109 entry
->prev
= LIST_POISON2
;
1112 extern void list_del(struct list_head
*entry
);
1116 * list_replace - replace old entry by new one
1117 * @old : the element to be replaced
1118 * @new : the new element to insert
1120 * If @old was empty, it will be overwritten.
1122 static inline void list_replace(struct list_head
*old
,
1123 struct list_head
*new)
1125 new->next
= old
->next
;
1126 new->next
->prev
= new;
1127 new->prev
= old
->prev
;
1128 new->prev
->next
= new;
1131 static inline void list_replace_init(struct list_head
*old
,
1132 struct list_head
*new)
1134 list_replace(old
, new);
1135 INIT_LIST_HEAD(old
);
1139 * list_del_init - deletes entry from list and reinitialize it.
1140 * @entry: the element to delete from the list.
1142 static inline void list_del_init(struct list_head
*entry
)
1144 __list_del(entry
->prev
, entry
->next
);
1145 INIT_LIST_HEAD(entry
);
1149 * list_move - delete from one list and add as another's head
1150 * @list: the entry to move
1151 * @head: the head that will precede our entry
1153 static inline void list_move(struct list_head
*list
, struct list_head
*head
)
1155 __list_del(list
->prev
, list
->next
);
1156 list_add(list
, head
);
1160 * list_move_tail - delete from one list and add as another's tail
1161 * @list: the entry to move
1162 * @head: the head that will follow our entry
1164 static inline void list_move_tail(struct list_head
*list
,
1165 struct list_head
*head
)
1167 __list_del(list
->prev
, list
->next
);
1168 list_add_tail(list
, head
);
1172 * list_is_last - tests whether @list is the last entry in list @head
1173 * @list: the entry to test
1174 * @head: the head of the list
1176 static inline int list_is_last(const struct list_head
*list
,
1177 const struct list_head
*head
)
1179 return list
->next
== head
;
1183 * list_empty - tests whether a list is empty
1184 * @head: the list to test.
1186 static inline int list_empty(const struct list_head
*head
)
1188 return head
->next
== head
;
1192 * list_empty_careful - tests whether a list is empty and not being modified
1193 * @head: the list to test
1196 * tests whether a list is empty _and_ checks that no other CPU might be
1197 * in the process of modifying either member (next or prev)
1199 * NOTE: using list_empty_careful() without synchronization
1200 * can only be safe if the only activity that can happen
1201 * to the list entry is list_del_init(). Eg. it cannot be used
1202 * if another CPU could re-list_add() it.
1204 static inline int list_empty_careful(const struct list_head
*head
)
1206 struct list_head
*next
= head
->next
;
1207 return (next
== head
) && (next
== head
->prev
);
1211 * list_is_singular - tests whether a list has just one entry.
1212 * @head: the list to test.
1214 static inline int list_is_singular(const struct list_head
*head
)
1216 return !list_empty(head
) && (head
->next
== head
->prev
);
1219 static inline void __list_cut_position(struct list_head
*list
,
1220 struct list_head
*head
, struct list_head
*entry
)
1222 struct list_head
*new_first
= entry
->next
;
1223 list
->next
= head
->next
;
1224 list
->next
->prev
= list
;
1227 head
->next
= new_first
;
1228 new_first
->prev
= head
;
1232 * list_cut_position - cut a list into two
1233 * @list: a new list to add all removed entries
1234 * @head: a list with entries
1235 * @entry: an entry within head, could be the head itself
1236 * and if so we won't cut the list
1238 * This helper moves the initial part of @head, up to and
1239 * including @entry, from @head to @list. You should
1240 * pass on @entry an element you know is on @head. @list
1241 * should be an empty list or a list you do not care about
1245 static inline void list_cut_position(struct list_head
*list
,
1246 struct list_head
*head
, struct list_head
*entry
)
1248 if (list_empty(head
))
1250 if (list_is_singular(head
) &&
1251 (head
->next
!= entry
&& head
!= entry
))
1254 INIT_LIST_HEAD(list
);
1256 __list_cut_position(list
, head
, entry
);
1259 static inline void __list_splice(const struct list_head
*list
,
1260 struct list_head
*prev
,
1261 struct list_head
*next
)
1263 struct list_head
*first
= list
->next
;
1264 struct list_head
*last
= list
->prev
;
1274 * list_splice - join two lists, this is designed for stacks
1275 * @list: the new list to add.
1276 * @head: the place to add it in the first list.
1278 static inline void list_splice(const struct list_head
*list
,
1279 struct list_head
*head
)
1281 if (!list_empty(list
))
1282 __list_splice(list
, head
, head
->next
);
1286 * list_splice_tail - join two lists, each list being a queue
1287 * @list: the new list to add.
1288 * @head: the place to add it in the first list.
1290 static inline void list_splice_tail(struct list_head
*list
,
1291 struct list_head
*head
)
1293 if (!list_empty(list
))
1294 __list_splice(list
, head
->prev
, head
);
1298 * list_splice_init - join two lists and reinitialise the emptied list.
1299 * @list: the new list to add.
1300 * @head: the place to add it in the first list.
1302 * The list at @list is reinitialised
1304 static inline void list_splice_init(struct list_head
*list
,
1305 struct list_head
*head
)
1307 if (!list_empty(list
)) {
1308 __list_splice(list
, head
, head
->next
);
1309 INIT_LIST_HEAD(list
);
1314 * list_splice_tail_init - join two lists and reinitialise the emptied list
1315 * @list: the new list to add.
1316 * @head: the place to add it in the first list.
1318 * Each of the lists is a queue.
1319 * The list at @list is reinitialised
1321 static inline void list_splice_tail_init(struct list_head
*list
,
1322 struct list_head
*head
)
1324 if (!list_empty(list
)) {
1325 __list_splice(list
, head
->prev
, head
);
1326 INIT_LIST_HEAD(list
);
1331 * list_entry - get the struct for this entry
1332 * @ptr: the &struct list_head pointer.
1333 * @type: the type of the struct this is embedded in.
1334 * @member: the name of the list_struct within the struct.
1336 #define list_entry(ptr, type, member) \
1337 container_of(ptr, type, member)
1340 * list_first_entry - get the first element from a list
1341 * @ptr: the list head to take the element from.
1342 * @type: the type of the struct this is embedded in.
1343 * @member: the name of the list_struct within the struct.
1345 * Note, that list is expected to be not empty.
1347 #define list_first_entry(ptr, type, member) \
1348 list_entry((ptr)->next, type, member)
1351 * list_for_each - iterate over a list
1352 * @pos: the &struct list_head to use as a loop cursor.
1353 * @head: the head for your list.
1355 #define list_for_each(pos, head) \
1356 for (pos = (head)->next; prefetch(pos->next), pos != (head); \
1360 * __list_for_each - iterate over a list
1361 * @pos: the &struct list_head to use as a loop cursor.
1362 * @head: the head for your list.
1364 * This variant differs from list_for_each() in that it's the
1365 * simplest possible list iteration code, no prefetching is done.
1366 * Use this for code that knows the list to be very short (empty
1367 * or 1 entry) most of the time.
1369 #define __list_for_each(pos, head) \
1370 for (pos = (head)->next; pos != (head); pos = pos->next)
1373 * list_for_each_prev - iterate over a list backwards
1374 * @pos: the &struct list_head to use as a loop cursor.
1375 * @head: the head for your list.
1377 #define list_for_each_prev(pos, head) \
1378 for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \
1382 * list_for_each_safe - iterate over a list safe against removal of list entry
1383 * @pos: the &struct list_head to use as a loop cursor.
1384 * @n: another &struct list_head to use as temporary storage
1385 * @head: the head for your list.
1387 #define list_for_each_safe(pos, n, head) \
1388 for (pos = (head)->next, n = pos->next; pos != (head); \
1389 pos = n, n = pos->next)
1392 * list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
1393 * @pos: the &struct list_head to use as a loop cursor.
1394 * @n: another &struct list_head to use as temporary storage
1395 * @head: the head for your list.
1397 #define list_for_each_prev_safe(pos, n, head) \
1398 for (pos = (head)->prev, n = pos->prev; \
1399 prefetch(pos->prev), pos != (head); \
1400 pos = n, n = pos->prev)
1403 * list_for_each_entry - iterate over list of given type
1404 * @pos: the type * to use as a loop cursor.
1405 * @head: the head for your list.
1406 * @member: the name of the list_struct within the struct.
1408 #define list_for_each_entry(pos, head, member) \
1409 for (pos = list_entry((head)->next, typeof(*pos), member); \
1410 prefetch(pos->member.next), &pos->member != (head); \
1411 pos = list_entry(pos->member.next, typeof(*pos), member))
1414 * list_for_each_entry_reverse - iterate backwards over list of given type.
1415 * @pos: the type * to use as a loop cursor.
1416 * @head: the head for your list.
1417 * @member: the name of the list_struct within the struct.
1419 #define list_for_each_entry_reverse(pos, head, member) \
1420 for (pos = list_entry((head)->prev, typeof(*pos), member); \
1421 prefetch(pos->member.prev), &pos->member != (head); \
1422 pos = list_entry(pos->member.prev, typeof(*pos), member))
1425 * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
1426 * @pos: the type * to use as a start point
1427 * @head: the head of the list
1428 * @member: the name of the list_struct within the struct.
1430 * Prepares a pos entry for use as a start point in list_for_each_entry_continue().
1432 #define list_prepare_entry(pos, head, member) \
1433 ((pos) ? : list_entry(head, typeof(*pos), member))
1436 * list_for_each_entry_continue - continue iteration over list of given type
1437 * @pos: the type * to use as a loop cursor.
1438 * @head: the head for your list.
1439 * @member: the name of the list_struct within the struct.
1441 * Continue to iterate over list of given type, continuing after
1442 * the current position.
1444 #define list_for_each_entry_continue(pos, head, member) \
1445 for (pos = list_entry(pos->member.next, typeof(*pos), member); \
1446 prefetch(pos->member.next), &pos->member != (head); \
1447 pos = list_entry(pos->member.next, typeof(*pos), member))
1450 * list_for_each_entry_continue_reverse - iterate backwards from the given point
1451 * @pos: the type * to use as a loop cursor.
1452 * @head: the head for your list.
1453 * @member: the name of the list_struct within the struct.
1455 * Start to iterate over list of given type backwards, continuing after
1456 * the current position.
1458 #define list_for_each_entry_continue_reverse(pos, head, member) \
1459 for (pos = list_entry(pos->member.prev, typeof(*pos), member); \
1460 prefetch(pos->member.prev), &pos->member != (head); \
1461 pos = list_entry(pos->member.prev, typeof(*pos), member))
1464 * list_for_each_entry_from - iterate over list of given type from the current point
1465 * @pos: the type * to use as a loop cursor.
1466 * @head: the head for your list.
1467 * @member: the name of the list_struct within the struct.
1469 * Iterate over list of given type, continuing from current position.
1471 #define list_for_each_entry_from(pos, head, member) \
1472 for (; prefetch(pos->member.next), &pos->member != (head); \
1473 pos = list_entry(pos->member.next, typeof(*pos), member))
1476 * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
1477 * @pos: the type * to use as a loop cursor.
1478 * @n: another type * to use as temporary storage
1479 * @head: the head for your list.
1480 * @member: the name of the list_struct within the struct.
1482 #define list_for_each_entry_safe(pos, n, head, member) \
1483 for (pos = list_entry((head)->next, typeof(*pos), member), \
1484 n = list_entry(pos->member.next, typeof(*pos), member); \
1485 &pos->member != (head); \
1486 pos = n, n = list_entry(n->member.next, typeof(*n), member))
1489 * list_for_each_entry_safe_continue
1490 * @pos: the type * to use as a loop cursor.
1491 * @n: another type * to use as temporary storage
1492 * @head: the head for your list.
1493 * @member: the name of the list_struct within the struct.
1495 * Iterate over list of given type, continuing after current point,
1496 * safe against removal of list entry.
1498 #define list_for_each_entry_safe_continue(pos, n, head, member) \
1499 for (pos = list_entry(pos->member.next, typeof(*pos), member), \
1500 n = list_entry(pos->member.next, typeof(*pos), member); \
1501 &pos->member != (head); \
1502 pos = n, n = list_entry(n->member.next, typeof(*n), member))
1505 * list_for_each_entry_safe_from
1506 * @pos: the type * to use as a loop cursor.
1507 * @n: another type * to use as temporary storage
1508 * @head: the head for your list.
1509 * @member: the name of the list_struct within the struct.
1511 * Iterate over list of given type from current point, safe against
1512 * removal of list entry.
1514 #define list_for_each_entry_safe_from(pos, n, head, member) \
1515 for (n = list_entry(pos->member.next, typeof(*pos), member); \
1516 &pos->member != (head); \
1517 pos = n, n = list_entry(n->member.next, typeof(*n), member))
1520 * list_for_each_entry_safe_reverse
1521 * @pos: the type * to use as a loop cursor.
1522 * @n: another type * to use as temporary storage
1523 * @head: the head for your list.
1524 * @member: the name of the list_struct within the struct.
1526 * Iterate backwards over list of given type, safe against removal
1529 #define list_for_each_entry_safe_reverse(pos, n, head, member) \
1530 for (pos = list_entry((head)->prev, typeof(*pos), member), \
1531 n = list_entry(pos->member.prev, typeof(*pos), member); \
1532 &pos->member != (head); \
1533 pos = n, n = list_entry(n->member.prev, typeof(*n), member))
1536 * Double linked lists with a single pointer list head.
1537 * Mostly useful for hash tables where the two pointer list head is
1539 * You lose the ability to access the tail in O(1).
1543 struct hlist_node
*first
;
1547 struct hlist_node
*next
, **pprev
;
1550 #define HLIST_HEAD_INIT { .first = NULL }
1551 #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
1552 #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
1553 static inline void INIT_HLIST_NODE(struct hlist_node
*h
)
1559 static inline int hlist_unhashed(const struct hlist_node
*h
)
1564 static inline int hlist_empty(const struct hlist_head
*h
)
1569 static inline void __hlist_del(struct hlist_node
*n
)
1571 struct hlist_node
*next
= n
->next
;
1572 struct hlist_node
**pprev
= n
->pprev
;
1575 next
->pprev
= pprev
;
1578 static inline void hlist_del(struct hlist_node
*n
)
1581 n
->next
= LIST_POISON1
;
1582 n
->pprev
= LIST_POISON2
;
1585 static inline void hlist_del_init(struct hlist_node
*n
)
1587 if (!hlist_unhashed(n
)) {
1593 static inline void hlist_add_head(struct hlist_node
*n
, struct hlist_head
*h
)
1595 struct hlist_node
*first
= h
->first
;
1598 first
->pprev
= &n
->next
;
1600 n
->pprev
= &h
->first
;
1603 /* next must be != NULL */
1604 static inline void hlist_add_before(struct hlist_node
*n
,
1605 struct hlist_node
*next
)
1607 n
->pprev
= next
->pprev
;
1609 next
->pprev
= &n
->next
;
1613 static inline void hlist_add_after(struct hlist_node
*n
,
1614 struct hlist_node
*next
)
1616 next
->next
= n
->next
;
1618 next
->pprev
= &n
->next
;
1621 next
->next
->pprev
= &next
->next
;
1625 * Move a list from one list head to another. Fixup the pprev
1626 * reference of the first entry if it exists.
1628 static inline void hlist_move_list(struct hlist_head
*old
,
1629 struct hlist_head
*new)
1631 new->first
= old
->first
;
1633 new->first
->pprev
= &new->first
;
1637 #define hlist_entry(ptr, type, member) container_of(ptr,type,member)
1639 #define hlist_for_each(pos, head) \
1640 for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
1643 #define hlist_for_each_safe(pos, n, head) \
1644 for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
1648 * hlist_for_each_entry - iterate over list of given type
1649 * @tpos: the type * to use as a loop cursor.
1650 * @pos: the &struct hlist_node to use as a loop cursor.
1651 * @head: the head for your list.
1652 * @member: the name of the hlist_node within the struct.
1654 #define hlist_for_each_entry(tpos, pos, head, member) \
1655 for (pos = (head)->first; \
1656 pos && ({ prefetch(pos->next); 1;}) && \
1657 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
1661 * hlist_for_each_entry_continue - iterate over a hlist continuing after current point
1662 * @tpos: the type * to use as a loop cursor.
1663 * @pos: the &struct hlist_node to use as a loop cursor.
1664 * @member: the name of the hlist_node within the struct.
1666 #define hlist_for_each_entry_continue(tpos, pos, member) \
1667 for (pos = (pos)->next; \
1668 pos && ({ prefetch(pos->next); 1;}) && \
1669 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
1673 * hlist_for_each_entry_from - iterate over a hlist continuing from current point
1674 * @tpos: the type * to use as a loop cursor.
1675 * @pos: the &struct hlist_node to use as a loop cursor.
1676 * @member: the name of the hlist_node within the struct.
1678 #define hlist_for_each_entry_from(tpos, pos, member) \
1679 for (; pos && ({ prefetch(pos->next); 1;}) && \
1680 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
1684 * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
1685 * @tpos: the type * to use as a loop cursor.
1686 * @pos: the &struct hlist_node to use as a loop cursor.
1687 * @n: another &struct hlist_node to use as temporary storage
1688 * @head: the head for your list.
1689 * @member: the name of the hlist_node within the struct.
1691 #define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
1692 for (pos = (head)->first; \
1693 pos && ({ n = pos->next; 1; }) && \
1694 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \