1 /* MECHANICALLY GENERATED, DO NOT EDIT!!! */
6 * common.h: Common Linux kernel-isms.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; but version 2 of the License only due
11 * to code included from the Linux kernel.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 * Copyright (c) 2006 Paul E. McKenney, IBM.
24 * Much code taken from the Linux kernel. For such code, the option
25 * to redistribute under later versions of GPL might not be available.
28 #ifndef __always_inline
29 #define __always_inline inline
32 #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
33 #define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1)
36 # define stringify_in_c(...) __VA_ARGS__
37 # define ASM_CONST(x) x
39 /* This version of stringify will deal with commas... */
40 # define __stringify_in_c(...) #__VA_ARGS__
41 # define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " "
42 # define __ASM_CONST(x) x##UL
43 # define ASM_CONST(x) __ASM_CONST(x)
48 * arch-ppc64.h: Expose PowerPC atomic instructions.
50 * This program is free software; you can redistribute it and/or modify
51 * it under the terms of the GNU General Public License as published by
52 * the Free Software Foundation; but version 2 of the License only due
53 * to code included from the Linux kernel.
55 * This program is distributed in the hope that it will be useful,
56 * but WITHOUT ANY WARRANTY; without even the implied warranty of
57 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
58 * GNU General Public License for more details.
60 * You should have received a copy of the GNU General Public License
61 * along with this program; if not, write to the Free Software
62 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
64 * Copyright (c) 2006 Paul E. McKenney, IBM.
66 * Much code taken from the Linux kernel. For such code, the option
67 * to redistribute under later versions of GPL might not be available.
76 /*#define CACHE_LINE_SIZE 128 */
77 #define ____cacheline_internodealigned_in_smp \
78 __attribute__((__aligned__(1 << 7)))
80 #if 0 /* duplicate with arch_atomic.h */
83 * Atomic data structure, initialization, and access.
86 typedef struct { volatile int counter
; } atomic_t
;
88 #define ATOMIC_INIT(i) { (i) }
90 #define atomic_read(v) ((v)->counter)
91 #define atomic_set(v, i) (((v)->counter) = (i))
98 #define PPC405_ERR77(ra,rb)
100 # define LWSYNC_ON_SMP stringify_in_c(LWSYNC) "\n"
101 # define ISYNC_ON_SMP "\n\tisync\n"
103 # define LWSYNC_ON_SMP
104 # define ISYNC_ON_SMP
110 * Changes the memory location '*ptr' to be val and returns
111 * the previous value stored there.
113 static __always_inline
unsigned long
114 __xchg_u32(volatile void *p
, unsigned long val
)
118 __asm__
__volatile__(
120 "1: lwarx %0,0,%2 \n"
125 : "=&r" (prev
), "+m" (*(volatile unsigned int *)p
)
135 * Changes the memory location '*ptr' to be val and returns
136 * the previous value stored there.
138 static __always_inline
unsigned long
139 __xchg_u32_local(volatile void *p
, unsigned long val
)
143 __asm__
__volatile__(
144 "1: lwarx %0,0,%2 \n"
148 : "=&r" (prev
), "+m" (*(volatile unsigned int *)p
)
156 static __always_inline
unsigned long
157 __xchg_u64(volatile void *p
, unsigned long val
)
161 __asm__
__volatile__(
163 "1: ldarx %0,0,%2 \n"
168 : "=&r" (prev
), "+m" (*(volatile unsigned long *)p
)
175 static __always_inline
unsigned long
176 __xchg_u64_local(volatile void *p
, unsigned long val
)
180 __asm__
__volatile__(
181 "1: ldarx %0,0,%2 \n"
185 : "=&r" (prev
), "+m" (*(volatile unsigned long *)p
)
194 * This function doesn't exist, so you'll get a linker error
195 * if something tries to do an invalid xchg().
197 extern void __xchg_called_with_bad_pointer(void);
199 static __always_inline
unsigned long
200 __xchg(volatile void *ptr
, unsigned long x
, unsigned int size
)
204 return __xchg_u32(ptr
, x
);
207 return __xchg_u64(ptr
, x
);
210 __xchg_called_with_bad_pointer();
214 static __always_inline
unsigned long
215 __xchg_local(volatile void *ptr
, unsigned long x
, unsigned int size
)
219 return __xchg_u32_local(ptr
, x
);
222 return __xchg_u64_local(ptr
, x
);
225 __xchg_called_with_bad_pointer();
228 #define xchg(ptr,x) \
230 __typeof__(*(ptr)) _x_ = (x); \
231 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
234 #define xchg_local(ptr,x) \
236 __typeof__(*(ptr)) _x_ = (x); \
237 (__typeof__(*(ptr))) __xchg_local((ptr), \
238 (unsigned long)_x_, sizeof(*(ptr))); \
242 * Compare and exchange - if *p == old, set it to new,
243 * and return the old value of *p.
245 #define __HAVE_ARCH_CMPXCHG 1
247 static __always_inline
unsigned long
248 __cmpxchg_u32(volatile unsigned int *p
, unsigned long old
, unsigned long new)
252 __asm__
__volatile__ (
254 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
263 : "=&r" (prev
), "+m" (*p
)
264 : "r" (p
), "r" (old
), "r" (new)
270 static __always_inline
unsigned long
271 __cmpxchg_u32_local(volatile unsigned int *p
, unsigned long old
,
276 __asm__
__volatile__ (
277 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
285 : "=&r" (prev
), "+m" (*p
)
286 : "r" (p
), "r" (old
), "r" (new)
293 static __always_inline
unsigned long
294 __cmpxchg_u64(volatile unsigned long *p
, unsigned long old
, unsigned long new)
298 __asm__
__volatile__ (
300 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
308 : "=&r" (prev
), "+m" (*p
)
309 : "r" (p
), "r" (old
), "r" (new)
315 static __always_inline
unsigned long
316 __cmpxchg_u64_local(volatile unsigned long *p
, unsigned long old
,
321 __asm__
__volatile__ (
322 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
329 : "=&r" (prev
), "+m" (*p
)
330 : "r" (p
), "r" (old
), "r" (new)
337 /* This function doesn't exist, so you'll get a linker error
338 if something tries to do an invalid cmpxchg(). */
339 extern void __cmpxchg_called_with_bad_pointer(void);
341 static __always_inline
unsigned long
342 __cmpxchg(volatile void *ptr
, unsigned long old
, unsigned long new,
347 return __cmpxchg_u32(ptr
, old
, new);
350 return __cmpxchg_u64(ptr
, old
, new);
353 __cmpxchg_called_with_bad_pointer();
357 static __always_inline
unsigned long
358 __cmpxchg_local(volatile void *ptr
, unsigned long old
, unsigned long new,
363 return __cmpxchg_u32_local(ptr
, old
, new);
366 return __cmpxchg_u64_local(ptr
, old
, new);
369 __cmpxchg_called_with_bad_pointer();
373 #define cmpxchg(ptr, o, n) \
375 __typeof__(*(ptr)) _o_ = (o); \
376 __typeof__(*(ptr)) _n_ = (n); \
377 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
378 (unsigned long)_n_, sizeof(*(ptr))); \
382 #define cmpxchg_local(ptr, o, n) \
384 __typeof__(*(ptr)) _o_ = (o); \
385 __typeof__(*(ptr)) _n_ = (n); \
386 (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
387 (unsigned long)_n_, sizeof(*(ptr))); \
392 * We handle most unaligned accesses in hardware. On the other hand
393 * unaligned DMA can be very expensive on some ppc64 IO chips (it does
394 * powers of 2 writes until it reaches sufficient alignment).
396 * Based on this we disable the IP header alignment in network drivers.
397 * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining
398 * cacheline alignment of buffers.
400 #define NET_IP_ALIGN 0
401 #define NET_SKB_PAD L1_CACHE_BYTES
403 #define cmpxchg64(ptr, o, n) \
405 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
406 cmpxchg((ptr), (o), (n)); \
408 #define cmpxchg64_local(ptr, o, n) \
410 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
411 cmpxchg_local((ptr), (o), (n)); \
415 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
416 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
419 * atomic_add - add integer to atomic variable
420 * @i: integer value to add
421 * @v: pointer of type atomic_t
423 * Atomically adds @a to @v.
425 static __inline__
void atomic_add(int a
, atomic_t
*v
)
429 __asm__
__volatile__(
430 "1: lwarx %0,0,%3 # atomic_add\n\
434 : "=&r" (t
), "+m" (v
->counter
)
435 : "r" (a
), "r" (&v
->counter
)
440 * atomic_sub - subtract the atomic variable
441 * @i: integer value to subtract
442 * @v: pointer of type atomic_t
444 * Atomically subtracts @a from @v.
446 static __inline__
void atomic_sub(int a
, atomic_t
*v
)
450 __asm__
__volatile__(
451 "1: lwarx %0,0,%3 # atomic_sub \n\
455 : "=&r" (t
), "+m" (v
->counter
)
456 : "r" (a
), "r" (&v
->counter
)
460 static __inline__
atomic_sub_return(int a
, atomic_t
*v
)
464 __asm__
__volatile__(
466 1: lwarx %0,0,%2 # atomic_sub_return\n\
472 : "r" (a
), "r" (&v
->counter
)
479 * atomic_sub_and_test - subtract value from variable and test result
480 * @i: integer value to subtract
481 * @v: pointer of type atomic_t
483 * Atomically subtracts @i from @v and returns
484 * true if the result is zero, or false for all
487 static __inline__
int atomic_sub_and_test(int a
, atomic_t
*v
)
489 return atomic_sub_return(a
, v
) == 0;
493 * atomic_inc - increment atomic variable
494 * @v: pointer of type atomic_t
496 * Atomically increments @v by 1.
498 static __inline__
void atomic_inc(atomic_t
*v
)
504 * atomic_dec - decrement atomic variable
505 * @v: pointer of type atomic_t
507 * Atomically decrements @v by 1.
509 static __inline__
void atomic_dec(atomic_t
*v
)
515 * atomic_dec_and_test - decrement and test
516 * @v: pointer of type atomic_t
518 * Atomically decrements @v by 1 and
519 * returns true if the result is 0, or false for all other
522 static __inline__
int atomic_dec_and_test(atomic_t
*v
)
524 return atomic_sub_and_test(1, v
);
528 * atomic_inc_and_test - increment and test
529 * @v: pointer of type atomic_t
531 * Atomically increments @v by 1
532 * and returns true if the result is zero, or false for all
535 static __inline__
int atomic_inc_and_test(atomic_t
*v
)
537 return atomic_inc_return(v
);
541 * atomic_add_return - add and return
542 * @v: pointer of type atomic_t
543 * @i: integer value to add
545 * Atomically adds @i to @v and returns @i + @v
547 static __inline__
int atomic_add_return(int a
, atomic_t
*v
)
551 __asm__
__volatile__(
553 1: lwarx %0,0,%2 # atomic_add_return \n\
559 : "r" (a
), "r" (&v
->counter
)
566 * atomic_add_negative - add and test if negative
567 * @v: pointer of type atomic_t
568 * @i: integer value to add
570 * Atomically adds @i to @v and returns true
571 * if the result is negative, or false when
572 * result is greater than or equal to zero.
574 static __inline__
int atomic_add_negative(int a
, atomic_t
*v
)
576 return atomic_add_return(a
, v
) < 0;
580 * atomic_add_unless - add unless the number is a given value
581 * @v: pointer of type atomic_t
582 * @a: the amount to add to v...
583 * @u: ...unless v is equal to u.
585 * Atomically adds @a to @v, so long as it was not @u.
586 * Returns non-zero if @v was not @u, and zero otherwise.
588 static __inline__
int atomic_add_unless(atomic_t
*v
, int a
, int u
)
592 __asm__
__volatile__(
594 1: lwarx %0,0,%1 # atomic_add_unless\n\
604 : "r" (&v
->counter
), "r" (a
), "r" (u
)
610 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
612 #define atomic_inc_return(v) (atomic_add_return(1,v))
613 #define atomic_dec_return(v) (atomic_sub_return(1,v))
615 /* Atomic operations are already serializing on x86 */
616 #define smp_mb__before_atomic_dec() smp_mb()
617 #define smp_mb__after_atomic_dec() smp_mb()
618 #define smp_mb__before_atomic_inc() smp_mb()
619 #define smp_mb__after_atomic_inc() smp_mb()
621 #endif //0 /* duplicate with arch_atomic.h */
624 * api_pthreads.h: API mapping to pthreads environment.
626 * This program is free software; you can redistribute it and/or modify
627 * it under the terms of the GNU General Public License as published by
628 * the Free Software Foundation; either version 2 of the License, or
629 * (at your option) any later version. However, please note that much
630 * of the code in this file derives from the Linux kernel, and that such
631 * code may not be available except under GPLv2.
633 * This program is distributed in the hope that it will be useful,
634 * but WITHOUT ANY WARRANTY; without even the implied warranty of
635 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
636 * GNU General Public License for more details.
638 * You should have received a copy of the GNU General Public License
639 * along with this program; if not, write to the Free Software
640 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
642 * Copyright (c) 2006 Paul E. McKenney, IBM.
649 #include <sys/types.h>
653 #include <sys/param.h>
654 /* #include "atomic.h" */
659 #define container_of(ptr, type, member) ({ \
660 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
661 (type *)( (char *)__mptr - offsetof(type,member) );})
664 * Default machine parameters.
667 #ifndef CACHE_LINE_SIZE
668 /* #define CACHE_LINE_SIZE 128 */
669 #endif /* #ifndef CACHE_LINE_SIZE */
672 * Exclusive locking primitives.
675 typedef pthread_mutex_t spinlock_t
;
677 #define DEFINE_SPINLOCK(lock) spinlock_t lock = PTHREAD_MUTEX_INITIALIZER;
678 #define __SPIN_LOCK_UNLOCKED(lockp) PTHREAD_MUTEX_INITIALIZER
680 static void spin_lock_init(spinlock_t
*sp
)
682 if (pthread_mutex_init(sp
, NULL
) != 0) {
683 perror("spin_lock_init:pthread_mutex_init");
688 static void spin_lock(spinlock_t
*sp
)
690 if (pthread_mutex_lock(sp
) != 0) {
691 perror("spin_lock:pthread_mutex_lock");
696 static void spin_unlock(spinlock_t
*sp
)
698 if (pthread_mutex_unlock(sp
) != 0) {
699 perror("spin_unlock:pthread_mutex_unlock");
704 #define spin_lock_irqsave(l, f) do { f = 1; spin_lock(l); } while (0)
705 #define spin_unlock_irqrestore(l, f) do { f = 0; spin_unlock(l); } while (0)
708 * Thread creation/destruction primitives.
711 typedef pthread_t thread_id_t
;
713 #define NR_THREADS 128
715 #define __THREAD_ID_MAP_EMPTY 0
716 #define __THREAD_ID_MAP_WAITING 1
717 thread_id_t __thread_id_map
[NR_THREADS
];
718 spinlock_t __thread_id_map_mutex
;
720 #define for_each_thread(t) \
721 for (t = 0; t < NR_THREADS; t++)
723 #define for_each_running_thread(t) \
724 for (t = 0; t < NR_THREADS; t++) \
725 if ((__thread_id_map[t] != __THREAD_ID_MAP_EMPTY) && \
726 (__thread_id_map[t] != __THREAD_ID_MAP_WAITING))
728 #define for_each_tid(t, tid) \
729 for (t = 0; t < NR_THREADS; t++) \
730 if ((((tid) = __thread_id_map[t]) != __THREAD_ID_MAP_EMPTY) && \
731 ((tid) != __THREAD_ID_MAP_WAITING))
733 pthread_key_t thread_id_key
;
735 static int __smp_thread_id(void)
738 thread_id_t tid
= pthread_self();
740 for (i
= 0; i
< NR_THREADS
; i
++) {
741 if (__thread_id_map
[i
] == tid
) {
742 long v
= i
+ 1; /* must be non-NULL. */
744 if (pthread_setspecific(thread_id_key
, (void *)v
) != 0) {
745 perror("pthread_setspecific");
751 spin_lock(&__thread_id_map_mutex
);
752 for (i
= 0; i
< NR_THREADS
; i
++) {
753 if (__thread_id_map
[i
] == tid
)
754 spin_unlock(&__thread_id_map_mutex
);
757 spin_unlock(&__thread_id_map_mutex
);
758 fprintf(stderr
, "smp_thread_id: Rogue thread, id: %d(%#x)\n",
763 static int smp_thread_id(void)
767 id
= pthread_getspecific(thread_id_key
);
769 return __smp_thread_id();
770 return (long)(id
- 1);
773 static thread_id_t
create_thread(void *(*func
)(void *), void *arg
)
778 spin_lock(&__thread_id_map_mutex
);
779 for (i
= 0; i
< NR_THREADS
; i
++) {
780 if (__thread_id_map
[i
] == __THREAD_ID_MAP_EMPTY
)
783 if (i
>= NR_THREADS
) {
784 spin_unlock(&__thread_id_map_mutex
);
785 fprintf(stderr
, "Thread limit of %d exceeded!\n", NR_THREADS
);
788 __thread_id_map
[i
] = __THREAD_ID_MAP_WAITING
;
789 spin_unlock(&__thread_id_map_mutex
);
790 if (pthread_create(&tid
, NULL
, func
, arg
) != 0) {
791 perror("create_thread:pthread_create");
794 __thread_id_map
[i
] = tid
;
798 static void *wait_thread(thread_id_t tid
)
803 for (i
= 0; i
< NR_THREADS
; i
++) {
804 if (__thread_id_map
[i
] == tid
)
807 if (i
>= NR_THREADS
){
808 fprintf(stderr
, "wait_thread: bad tid = %d(%#x)\n",
812 if (pthread_join(tid
, &vp
) != 0) {
813 perror("wait_thread:pthread_join");
816 __thread_id_map
[i
] = __THREAD_ID_MAP_EMPTY
;
820 static void wait_all_threads(void)
825 for (i
= 1; i
< NR_THREADS
; i
++) {
826 tid
= __thread_id_map
[i
];
827 if (tid
!= __THREAD_ID_MAP_EMPTY
&&
828 tid
!= __THREAD_ID_MAP_WAITING
)
829 (void)wait_thread(tid
);
833 static void run_on(int cpu
)
839 sched_setaffinity(0, sizeof(mask
), &mask
);
843 * timekeeping -- very crude -- should use MONOTONIC...
846 long long get_microseconds(void)
850 if (gettimeofday(&tv
, NULL
) != 0)
852 return ((long long)tv
.tv_sec
) * 1000000LL + (long long)tv
.tv_usec
;
856 * Per-thread variables.
859 #define DEFINE_PER_THREAD(type, name) \
862 __attribute__((__aligned__(CACHE_LINE_SIZE))); \
863 } __per_thread_##name[NR_THREADS];
864 #define DECLARE_PER_THREAD(type, name) extern DEFINE_PER_THREAD(type, name)
866 #define per_thread(name, thread) __per_thread_##name[thread].v
867 #define __get_thread_var(name) per_thread(name, smp_thread_id())
869 #define init_per_thread(name, v) \
872 for (__i_p_t_i = 0; __i_p_t_i < NR_THREADS; __i_p_t_i++) \
873 per_thread(name, __i_p_t_i) = v; \
877 * CPU traversal primitives.
882 #endif /* #ifndef NR_CPUS */
884 #define for_each_possible_cpu(cpu) \
885 for (cpu = 0; cpu < NR_CPUS; cpu++)
886 #define for_each_online_cpu(cpu) \
887 for (cpu = 0; cpu < NR_CPUS; cpu++)
893 #define DEFINE_PER_CPU(type, name) \
896 __attribute__((__aligned__(CACHE_LINE_SIZE))); \
897 } __per_cpu_##name[NR_CPUS]
898 #define DECLARE_PER_CPU(type, name) extern DEFINE_PER_CPU(type, name)
900 DEFINE_PER_THREAD(int, smp_processor_id
);
902 #define per_cpu(name, thread) __per_cpu_##name[thread].v
903 #define __get_cpu_var(name) per_cpu(name, smp_processor_id())
905 #define init_per_cpu(name, v) \
908 for (__i_p_c_i = 0; __i_p_c_i < NR_CPUS; __i_p_c_i++) \
909 per_cpu(name, __i_p_c_i) = v; \
913 * CPU state checking (crowbarred).
916 #define idle_cpu(cpu) 0
917 #define in_softirq() 1
918 #define hardirq_count() 0
919 #define PREEMPT_SHIFT 0
920 #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
921 #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
922 #define PREEMPT_BITS 8
923 #define SOFTIRQ_BITS 8
929 struct notifier_block
{
930 int (*notifier_call
)(struct notifier_block
*, unsigned long, void *);
931 struct notifier_block
*next
;
935 #define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
936 #define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
937 #define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */
938 #define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
939 #define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
940 #define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
941 #define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task,
942 * not handling interrupts, soon dead */
943 #define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
946 /* Used for CPU hotplug events occuring while tasks are frozen due to a suspend
947 * operation in progress
949 #define CPU_TASKS_FROZEN 0x0010
951 #define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN)
952 #define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN)
953 #define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
954 #define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
955 #define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
956 #define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
957 #define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
959 /* Hibernation and suspend events */
960 #define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */
961 #define PM_POST_HIBERNATION 0x0002 /* Hibernation finished */
962 #define PM_SUSPEND_PREPARE 0x0003 /* Going to suspend the system */
963 #define PM_POST_SUSPEND 0x0004 /* Suspend finished */
964 #define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */
965 #define PM_POST_RESTORE 0x0006 /* Restore failed */
967 #define NOTIFY_DONE 0x0000 /* Don't care */
968 #define NOTIFY_OK 0x0001 /* Suits me */
969 #define NOTIFY_STOP_MASK 0x8000 /* Don't call further */
970 #define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002)
971 /* Bad/Veto action */
973 * Clean way to return from the notifier and stop further calls.
975 #define NOTIFY_STOP (NOTIFY_OK|NOTIFY_STOP_MASK)
981 #define BUG_ON(c) do { if (!(c)) abort(); } while (0)
984 * Initialization -- Must be called before calling any primitives.
987 static void smp_init(void)
991 spin_lock_init(&__thread_id_map_mutex
);
992 __thread_id_map
[0] = pthread_self();
993 for (i
= 1; i
< NR_THREADS
; i
++)
994 __thread_id_map
[i
] = __THREAD_ID_MAP_EMPTY
;
995 init_per_thread(smp_processor_id
, 0);
996 if (pthread_key_create(&thread_id_key
, NULL
) != 0) {
997 perror("pthread_key_create");
1002 /* Taken from the Linux kernel source tree, so GPLv2-only!!! */
1004 #ifndef _LINUX_LIST_H
1005 #define _LINUX_LIST_H
1007 #define LIST_POISON1 ((void *) 0x00100100)
1008 #define LIST_POISON2 ((void *) 0x00200200)
1010 #define container_of(ptr, type, member) ({ \
1011 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
1012 (type *)( (char *)__mptr - offsetof(type,member) );})
1017 * Simple doubly linked list implementation.
1019 * Some of the internal functions ("__xxx") are useful when
1020 * manipulating whole lists rather than single entries, as
1021 * sometimes we already know the next/prev entries and we can
1022 * generate better code by using them directly rather than
1023 * using the generic single-entry routines.
1027 struct list_head
*next
, *prev
;
1030 #define LIST_HEAD_INIT(name) { &(name), &(name) }
1032 #define LIST_HEAD(name) \
1033 struct list_head name = LIST_HEAD_INIT(name)
1035 static inline void INIT_LIST_HEAD(struct list_head
*list
)
1042 * Insert a new entry between two known consecutive entries.
1044 * This is only for internal list manipulation where we know
1045 * the prev/next entries already!
1047 #ifndef CONFIG_DEBUG_LIST
1048 static inline void __list_add(struct list_head
*new,
1049 struct list_head
*prev
,
1050 struct list_head
*next
)
1058 extern void __list_add(struct list_head
*new,
1059 struct list_head
*prev
,
1060 struct list_head
*next
);
1064 * list_add - add a new entry
1065 * @new: new entry to be added
1066 * @head: list head to add it after
1068 * Insert a new entry after the specified head.
1069 * This is good for implementing stacks.
1071 static inline void list_add(struct list_head
*new, struct list_head
*head
)
1073 __list_add(new, head
, head
->next
);
1078 * list_add_tail - add a new entry
1079 * @new: new entry to be added
1080 * @head: list head to add it before
1082 * Insert a new entry before the specified head.
1083 * This is useful for implementing queues.
1085 static inline void list_add_tail(struct list_head
*new, struct list_head
*head
)
1087 __list_add(new, head
->prev
, head
);
1091 * Delete a list entry by making the prev/next entries
1092 * point to each other.
1094 * This is only for internal list manipulation where we know
1095 * the prev/next entries already!
1097 static inline void __list_del(struct list_head
* prev
, struct list_head
* next
)
1104 * list_del - deletes entry from list.
1105 * @entry: the element to delete from the list.
1106 * Note: list_empty() on entry does not return true after this, the entry is
1107 * in an undefined state.
1109 #ifndef CONFIG_DEBUG_LIST
1110 static inline void list_del(struct list_head
*entry
)
1112 __list_del(entry
->prev
, entry
->next
);
1113 entry
->next
= LIST_POISON1
;
1114 entry
->prev
= LIST_POISON2
;
1117 extern void list_del(struct list_head
*entry
);
1121 * list_replace - replace old entry by new one
1122 * @old : the element to be replaced
1123 * @new : the new element to insert
1125 * If @old was empty, it will be overwritten.
1127 static inline void list_replace(struct list_head
*old
,
1128 struct list_head
*new)
1130 new->next
= old
->next
;
1131 new->next
->prev
= new;
1132 new->prev
= old
->prev
;
1133 new->prev
->next
= new;
1136 static inline void list_replace_init(struct list_head
*old
,
1137 struct list_head
*new)
1139 list_replace(old
, new);
1140 INIT_LIST_HEAD(old
);
1144 * list_del_init - deletes entry from list and reinitialize it.
1145 * @entry: the element to delete from the list.
1147 static inline void list_del_init(struct list_head
*entry
)
1149 __list_del(entry
->prev
, entry
->next
);
1150 INIT_LIST_HEAD(entry
);
1154 * list_move - delete from one list and add as another's head
1155 * @list: the entry to move
1156 * @head: the head that will precede our entry
1158 static inline void list_move(struct list_head
*list
, struct list_head
*head
)
1160 __list_del(list
->prev
, list
->next
);
1161 list_add(list
, head
);
1165 * list_move_tail - delete from one list and add as another's tail
1166 * @list: the entry to move
1167 * @head: the head that will follow our entry
1169 static inline void list_move_tail(struct list_head
*list
,
1170 struct list_head
*head
)
1172 __list_del(list
->prev
, list
->next
);
1173 list_add_tail(list
, head
);
1177 * list_is_last - tests whether @list is the last entry in list @head
1178 * @list: the entry to test
1179 * @head: the head of the list
1181 static inline int list_is_last(const struct list_head
*list
,
1182 const struct list_head
*head
)
1184 return list
->next
== head
;
1188 * list_empty - tests whether a list is empty
1189 * @head: the list to test.
1191 static inline int list_empty(const struct list_head
*head
)
1193 return head
->next
== head
;
1197 * list_empty_careful - tests whether a list is empty and not being modified
1198 * @head: the list to test
1201 * tests whether a list is empty _and_ checks that no other CPU might be
1202 * in the process of modifying either member (next or prev)
1204 * NOTE: using list_empty_careful() without synchronization
1205 * can only be safe if the only activity that can happen
1206 * to the list entry is list_del_init(). Eg. it cannot be used
1207 * if another CPU could re-list_add() it.
1209 static inline int list_empty_careful(const struct list_head
*head
)
1211 struct list_head
*next
= head
->next
;
1212 return (next
== head
) && (next
== head
->prev
);
1216 * list_is_singular - tests whether a list has just one entry.
1217 * @head: the list to test.
1219 static inline int list_is_singular(const struct list_head
*head
)
1221 return !list_empty(head
) && (head
->next
== head
->prev
);
1224 static inline void __list_cut_position(struct list_head
*list
,
1225 struct list_head
*head
, struct list_head
*entry
)
1227 struct list_head
*new_first
= entry
->next
;
1228 list
->next
= head
->next
;
1229 list
->next
->prev
= list
;
1232 head
->next
= new_first
;
1233 new_first
->prev
= head
;
1237 * list_cut_position - cut a list into two
1238 * @list: a new list to add all removed entries
1239 * @head: a list with entries
1240 * @entry: an entry within head, could be the head itself
1241 * and if so we won't cut the list
1243 * This helper moves the initial part of @head, up to and
1244 * including @entry, from @head to @list. You should
1245 * pass on @entry an element you know is on @head. @list
1246 * should be an empty list or a list you do not care about
1250 static inline void list_cut_position(struct list_head
*list
,
1251 struct list_head
*head
, struct list_head
*entry
)
1253 if (list_empty(head
))
1255 if (list_is_singular(head
) &&
1256 (head
->next
!= entry
&& head
!= entry
))
1259 INIT_LIST_HEAD(list
);
1261 __list_cut_position(list
, head
, entry
);
1264 static inline void __list_splice(const struct list_head
*list
,
1265 struct list_head
*prev
,
1266 struct list_head
*next
)
1268 struct list_head
*first
= list
->next
;
1269 struct list_head
*last
= list
->prev
;
1279 * list_splice - join two lists, this is designed for stacks
1280 * @list: the new list to add.
1281 * @head: the place to add it in the first list.
1283 static inline void list_splice(const struct list_head
*list
,
1284 struct list_head
*head
)
1286 if (!list_empty(list
))
1287 __list_splice(list
, head
, head
->next
);
1291 * list_splice_tail - join two lists, each list being a queue
1292 * @list: the new list to add.
1293 * @head: the place to add it in the first list.
1295 static inline void list_splice_tail(struct list_head
*list
,
1296 struct list_head
*head
)
1298 if (!list_empty(list
))
1299 __list_splice(list
, head
->prev
, head
);
1303 * list_splice_init - join two lists and reinitialise the emptied list.
1304 * @list: the new list to add.
1305 * @head: the place to add it in the first list.
1307 * The list at @list is reinitialised
1309 static inline void list_splice_init(struct list_head
*list
,
1310 struct list_head
*head
)
1312 if (!list_empty(list
)) {
1313 __list_splice(list
, head
, head
->next
);
1314 INIT_LIST_HEAD(list
);
1319 * list_splice_tail_init - join two lists and reinitialise the emptied list
1320 * @list: the new list to add.
1321 * @head: the place to add it in the first list.
1323 * Each of the lists is a queue.
1324 * The list at @list is reinitialised
1326 static inline void list_splice_tail_init(struct list_head
*list
,
1327 struct list_head
*head
)
1329 if (!list_empty(list
)) {
1330 __list_splice(list
, head
->prev
, head
);
1331 INIT_LIST_HEAD(list
);
1336 * list_entry - get the struct for this entry
1337 * @ptr: the &struct list_head pointer.
1338 * @type: the type of the struct this is embedded in.
1339 * @member: the name of the list_struct within the struct.
1341 #define list_entry(ptr, type, member) \
1342 container_of(ptr, type, member)
1345 * list_first_entry - get the first element from a list
1346 * @ptr: the list head to take the element from.
1347 * @type: the type of the struct this is embedded in.
1348 * @member: the name of the list_struct within the struct.
1350 * Note, that list is expected to be not empty.
1352 #define list_first_entry(ptr, type, member) \
1353 list_entry((ptr)->next, type, member)
1356 * list_for_each - iterate over a list
1357 * @pos: the &struct list_head to use as a loop cursor.
1358 * @head: the head for your list.
1360 #define list_for_each(pos, head) \
1361 for (pos = (head)->next; prefetch(pos->next), pos != (head); \
1365 * __list_for_each - iterate over a list
1366 * @pos: the &struct list_head to use as a loop cursor.
1367 * @head: the head for your list.
1369 * This variant differs from list_for_each() in that it's the
1370 * simplest possible list iteration code, no prefetching is done.
1371 * Use this for code that knows the list to be very short (empty
1372 * or 1 entry) most of the time.
1374 #define __list_for_each(pos, head) \
1375 for (pos = (head)->next; pos != (head); pos = pos->next)
1378 * list_for_each_prev - iterate over a list backwards
1379 * @pos: the &struct list_head to use as a loop cursor.
1380 * @head: the head for your list.
1382 #define list_for_each_prev(pos, head) \
1383 for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \
1387 * list_for_each_safe - iterate over a list safe against removal of list entry
1388 * @pos: the &struct list_head to use as a loop cursor.
1389 * @n: another &struct list_head to use as temporary storage
1390 * @head: the head for your list.
1392 #define list_for_each_safe(pos, n, head) \
1393 for (pos = (head)->next, n = pos->next; pos != (head); \
1394 pos = n, n = pos->next)
1397 * list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
1398 * @pos: the &struct list_head to use as a loop cursor.
1399 * @n: another &struct list_head to use as temporary storage
1400 * @head: the head for your list.
1402 #define list_for_each_prev_safe(pos, n, head) \
1403 for (pos = (head)->prev, n = pos->prev; \
1404 prefetch(pos->prev), pos != (head); \
1405 pos = n, n = pos->prev)
1408 * list_for_each_entry - iterate over list of given type
1409 * @pos: the type * to use as a loop cursor.
1410 * @head: the head for your list.
1411 * @member: the name of the list_struct within the struct.
1413 #define list_for_each_entry(pos, head, member) \
1414 for (pos = list_entry((head)->next, typeof(*pos), member); \
1415 prefetch(pos->member.next), &pos->member != (head); \
1416 pos = list_entry(pos->member.next, typeof(*pos), member))
1419 * list_for_each_entry_reverse - iterate backwards over list of given type.
1420 * @pos: the type * to use as a loop cursor.
1421 * @head: the head for your list.
1422 * @member: the name of the list_struct within the struct.
1424 #define list_for_each_entry_reverse(pos, head, member) \
1425 for (pos = list_entry((head)->prev, typeof(*pos), member); \
1426 prefetch(pos->member.prev), &pos->member != (head); \
1427 pos = list_entry(pos->member.prev, typeof(*pos), member))
1430 * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
1431 * @pos: the type * to use as a start point
1432 * @head: the head of the list
1433 * @member: the name of the list_struct within the struct.
1435 * Prepares a pos entry for use as a start point in list_for_each_entry_continue().
1437 #define list_prepare_entry(pos, head, member) \
1438 ((pos) ? : list_entry(head, typeof(*pos), member))
1441 * list_for_each_entry_continue - continue iteration over list of given type
1442 * @pos: the type * to use as a loop cursor.
1443 * @head: the head for your list.
1444 * @member: the name of the list_struct within the struct.
1446 * Continue to iterate over list of given type, continuing after
1447 * the current position.
1449 #define list_for_each_entry_continue(pos, head, member) \
1450 for (pos = list_entry(pos->member.next, typeof(*pos), member); \
1451 prefetch(pos->member.next), &pos->member != (head); \
1452 pos = list_entry(pos->member.next, typeof(*pos), member))
1455 * list_for_each_entry_continue_reverse - iterate backwards from the given point
1456 * @pos: the type * to use as a loop cursor.
1457 * @head: the head for your list.
1458 * @member: the name of the list_struct within the struct.
1460 * Start to iterate over list of given type backwards, continuing after
1461 * the current position.
1463 #define list_for_each_entry_continue_reverse(pos, head, member) \
1464 for (pos = list_entry(pos->member.prev, typeof(*pos), member); \
1465 prefetch(pos->member.prev), &pos->member != (head); \
1466 pos = list_entry(pos->member.prev, typeof(*pos), member))
1469 * list_for_each_entry_from - iterate over list of given type from the current point
1470 * @pos: the type * to use as a loop cursor.
1471 * @head: the head for your list.
1472 * @member: the name of the list_struct within the struct.
1474 * Iterate over list of given type, continuing from current position.
1476 #define list_for_each_entry_from(pos, head, member) \
1477 for (; prefetch(pos->member.next), &pos->member != (head); \
1478 pos = list_entry(pos->member.next, typeof(*pos), member))
1481 * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
1482 * @pos: the type * to use as a loop cursor.
1483 * @n: another type * to use as temporary storage
1484 * @head: the head for your list.
1485 * @member: the name of the list_struct within the struct.
1487 #define list_for_each_entry_safe(pos, n, head, member) \
1488 for (pos = list_entry((head)->next, typeof(*pos), member), \
1489 n = list_entry(pos->member.next, typeof(*pos), member); \
1490 &pos->member != (head); \
1491 pos = n, n = list_entry(n->member.next, typeof(*n), member))
1494 * list_for_each_entry_safe_continue
1495 * @pos: the type * to use as a loop cursor.
1496 * @n: another type * to use as temporary storage
1497 * @head: the head for your list.
1498 * @member: the name of the list_struct within the struct.
1500 * Iterate over list of given type, continuing after current point,
1501 * safe against removal of list entry.
1503 #define list_for_each_entry_safe_continue(pos, n, head, member) \
1504 for (pos = list_entry(pos->member.next, typeof(*pos), member), \
1505 n = list_entry(pos->member.next, typeof(*pos), member); \
1506 &pos->member != (head); \
1507 pos = n, n = list_entry(n->member.next, typeof(*n), member))
1510 * list_for_each_entry_safe_from
1511 * @pos: the type * to use as a loop cursor.
1512 * @n: another type * to use as temporary storage
1513 * @head: the head for your list.
1514 * @member: the name of the list_struct within the struct.
1516 * Iterate over list of given type from current point, safe against
1517 * removal of list entry.
1519 #define list_for_each_entry_safe_from(pos, n, head, member) \
1520 for (n = list_entry(pos->member.next, typeof(*pos), member); \
1521 &pos->member != (head); \
1522 pos = n, n = list_entry(n->member.next, typeof(*n), member))
1525 * list_for_each_entry_safe_reverse
1526 * @pos: the type * to use as a loop cursor.
1527 * @n: another type * to use as temporary storage
1528 * @head: the head for your list.
1529 * @member: the name of the list_struct within the struct.
1531 * Iterate backwards over list of given type, safe against removal
1534 #define list_for_each_entry_safe_reverse(pos, n, head, member) \
1535 for (pos = list_entry((head)->prev, typeof(*pos), member), \
1536 n = list_entry(pos->member.prev, typeof(*pos), member); \
1537 &pos->member != (head); \
1538 pos = n, n = list_entry(n->member.prev, typeof(*n), member))
1543 * Double linked lists with a single pointer list head.
1544 * Mostly useful for hash tables where the two pointer list head is
1546 * You lose the ability to access the tail in O(1).
1550 struct hlist_node
*first
;
1554 struct hlist_node
*next
, **pprev
;
1557 #define HLIST_HEAD_INIT { .first = NULL }
1558 #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
1559 #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
1560 static inline void INIT_HLIST_NODE(struct hlist_node
*h
)
1566 static inline int hlist_unhashed(const struct hlist_node
*h
)
1571 static inline int hlist_empty(const struct hlist_head
*h
)
1576 static inline void __hlist_del(struct hlist_node
*n
)
1578 struct hlist_node
*next
= n
->next
;
1579 struct hlist_node
**pprev
= n
->pprev
;
1582 next
->pprev
= pprev
;
1585 static inline void hlist_del(struct hlist_node
*n
)
1588 n
->next
= LIST_POISON1
;
1589 n
->pprev
= LIST_POISON2
;
1592 static inline void hlist_del_init(struct hlist_node
*n
)
1594 if (!hlist_unhashed(n
)) {
1600 static inline void hlist_add_head(struct hlist_node
*n
, struct hlist_head
*h
)
1602 struct hlist_node
*first
= h
->first
;
1605 first
->pprev
= &n
->next
;
1607 n
->pprev
= &h
->first
;
1610 /* next must be != NULL */
1611 static inline void hlist_add_before(struct hlist_node
*n
,
1612 struct hlist_node
*next
)
1614 n
->pprev
= next
->pprev
;
1616 next
->pprev
= &n
->next
;
1620 static inline void hlist_add_after(struct hlist_node
*n
,
1621 struct hlist_node
*next
)
1623 next
->next
= n
->next
;
1625 next
->pprev
= &n
->next
;
1628 next
->next
->pprev
= &next
->next
;
1632 * Move a list from one list head to another. Fixup the pprev
1633 * reference of the first entry if it exists.
1635 static inline void hlist_move_list(struct hlist_head
*old
,
1636 struct hlist_head
*new)
1638 new->first
= old
->first
;
1640 new->first
->pprev
= &new->first
;
1644 #define hlist_entry(ptr, type, member) container_of(ptr,type,member)
1646 #define hlist_for_each(pos, head) \
1647 for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
1650 #define hlist_for_each_safe(pos, n, head) \
1651 for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
1655 * hlist_for_each_entry - iterate over list of given type
1656 * @tpos: the type * to use as a loop cursor.
1657 * @pos: the &struct hlist_node to use as a loop cursor.
1658 * @head: the head for your list.
1659 * @member: the name of the hlist_node within the struct.
1661 #define hlist_for_each_entry(tpos, pos, head, member) \
1662 for (pos = (head)->first; \
1663 pos && ({ prefetch(pos->next); 1;}) && \
1664 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
1668 * hlist_for_each_entry_continue - iterate over a hlist continuing after current point
1669 * @tpos: the type * to use as a loop cursor.
1670 * @pos: the &struct hlist_node to use as a loop cursor.
1671 * @member: the name of the hlist_node within the struct.
1673 #define hlist_for_each_entry_continue(tpos, pos, member) \
1674 for (pos = (pos)->next; \
1675 pos && ({ prefetch(pos->next); 1;}) && \
1676 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
1680 * hlist_for_each_entry_from - iterate over a hlist continuing from current point
1681 * @tpos: the type * to use as a loop cursor.
1682 * @pos: the &struct hlist_node to use as a loop cursor.
1683 * @member: the name of the hlist_node within the struct.
1685 #define hlist_for_each_entry_from(tpos, pos, member) \
1686 for (; pos && ({ prefetch(pos->next); 1;}) && \
1687 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
1691 * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
1692 * @tpos: the type * to use as a loop cursor.
1693 * @pos: the &struct hlist_node to use as a loop cursor.
1694 * @n: another &struct hlist_node to use as temporary storage
1695 * @head: the head for your list.
1696 * @member: the name of the hlist_node within the struct.
1698 #define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
1699 for (pos = (head)->first; \
1700 pos && ({ n = pos->next; 1; }) && \
1701 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \