1 /* MECHANICALLY GENERATED, DO NOT EDIT!!! */
6 * common.h: Common Linux kernel-isms.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; but version 2 of the License only due
11 * to code included from the Linux kernel.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 * Copyright (c) 2006 Paul E. McKenney, IBM.
24 * Much code taken from the Linux kernel. For such code, the option
25 * to redistribute under later versions of GPL might not be available.
28 #include <urcu/arch.h>
30 #ifndef __always_inline
31 #define __always_inline inline
34 #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
35 #define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1)
38 # define stringify_in_c(...) __VA_ARGS__
39 # define ASM_CONST(x) x
41 /* This version of stringify will deal with commas... */
42 # define __stringify_in_c(...) #__VA_ARGS__
43 # define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " "
44 # define __ASM_CONST(x) x##UL
45 # define ASM_CONST(x) __ASM_CONST(x)
50 * arch-ppc64.h: Expose PowerPC atomic instructions.
52 * This program is free software; you can redistribute it and/or modify
53 * it under the terms of the GNU General Public License as published by
54 * the Free Software Foundation; but version 2 of the License only due
55 * to code included from the Linux kernel.
57 * This program is distributed in the hope that it will be useful,
58 * but WITHOUT ANY WARRANTY; without even the implied warranty of
59 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
60 * GNU General Public License for more details.
62 * You should have received a copy of the GNU General Public License
63 * along with this program; if not, write to the Free Software
64 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
66 * Copyright (c) 2006 Paul E. McKenney, IBM.
68 * Much code taken from the Linux kernel. For such code, the option
69 * to redistribute under later versions of GPL might not be available.
78 /*#define CACHE_LINE_SIZE 128 */
79 #define ____cacheline_internodealigned_in_smp \
80 __attribute__((__aligned__(1 << 7)))
82 #if 0 /* duplicate with arch_atomic.h */
85 * Atomic data structure, initialization, and access.
88 typedef struct { volatile int counter
; } atomic_t
;
90 #define ATOMIC_INIT(i) { (i) }
92 #define atomic_read(v) ((v)->counter)
93 #define atomic_set(v, i) (((v)->counter) = (i))
100 #define PPC405_ERR77(ra,rb)
102 # define LWSYNC_ON_SMP stringify_in_c(LWSYNC) "\n"
103 # define ISYNC_ON_SMP "\n\tisync\n"
105 # define LWSYNC_ON_SMP
106 # define ISYNC_ON_SMP
112 * Changes the memory location '*ptr' to be val and returns
113 * the previous value stored there.
115 static __always_inline
unsigned long
116 __xchg_u32(volatile void *p
, unsigned long val
)
120 __asm__
__volatile__(
122 "1: lwarx %0,0,%2 \n"
127 : "=&r" (prev
), "+m" (*(volatile unsigned int *)p
)
137 * Changes the memory location '*ptr' to be val and returns
138 * the previous value stored there.
140 static __always_inline
unsigned long
141 __xchg_u32_local(volatile void *p
, unsigned long val
)
145 __asm__
__volatile__(
146 "1: lwarx %0,0,%2 \n"
150 : "=&r" (prev
), "+m" (*(volatile unsigned int *)p
)
158 static __always_inline
unsigned long
159 __xchg_u64(volatile void *p
, unsigned long val
)
163 __asm__
__volatile__(
165 "1: ldarx %0,0,%2 \n"
170 : "=&r" (prev
), "+m" (*(volatile unsigned long *)p
)
177 static __always_inline
unsigned long
178 __xchg_u64_local(volatile void *p
, unsigned long val
)
182 __asm__
__volatile__(
183 "1: ldarx %0,0,%2 \n"
187 : "=&r" (prev
), "+m" (*(volatile unsigned long *)p
)
196 * This function doesn't exist, so you'll get a linker error
197 * if something tries to do an invalid xchg().
199 extern void __xchg_called_with_bad_pointer(void);
201 static __always_inline
unsigned long
202 __xchg(volatile void *ptr
, unsigned long x
, unsigned int size
)
206 return __xchg_u32(ptr
, x
);
209 return __xchg_u64(ptr
, x
);
212 __xchg_called_with_bad_pointer();
216 static __always_inline
unsigned long
217 __xchg_local(volatile void *ptr
, unsigned long x
, unsigned int size
)
221 return __xchg_u32_local(ptr
, x
);
224 return __xchg_u64_local(ptr
, x
);
227 __xchg_called_with_bad_pointer();
230 #define xchg(ptr,x) \
232 __typeof__(*(ptr)) _x_ = (x); \
233 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
236 #define xchg_local(ptr,x) \
238 __typeof__(*(ptr)) _x_ = (x); \
239 (__typeof__(*(ptr))) __xchg_local((ptr), \
240 (unsigned long)_x_, sizeof(*(ptr))); \
244 * Compare and exchange - if *p == old, set it to new,
245 * and return the old value of *p.
247 #define __HAVE_ARCH_CMPXCHG 1
249 static __always_inline
unsigned long
250 __cmpxchg_u32(volatile unsigned int *p
, unsigned long old
, unsigned long new)
254 __asm__
__volatile__ (
256 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
265 : "=&r" (prev
), "+m" (*p
)
266 : "r" (p
), "r" (old
), "r" (new)
272 static __always_inline
unsigned long
273 __cmpxchg_u32_local(volatile unsigned int *p
, unsigned long old
,
278 __asm__
__volatile__ (
279 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
287 : "=&r" (prev
), "+m" (*p
)
288 : "r" (p
), "r" (old
), "r" (new)
295 static __always_inline
unsigned long
296 __cmpxchg_u64(volatile unsigned long *p
, unsigned long old
, unsigned long new)
300 __asm__
__volatile__ (
302 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
310 : "=&r" (prev
), "+m" (*p
)
311 : "r" (p
), "r" (old
), "r" (new)
317 static __always_inline
unsigned long
318 __cmpxchg_u64_local(volatile unsigned long *p
, unsigned long old
,
323 __asm__
__volatile__ (
324 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
331 : "=&r" (prev
), "+m" (*p
)
332 : "r" (p
), "r" (old
), "r" (new)
339 /* This function doesn't exist, so you'll get a linker error
340 if something tries to do an invalid cmpxchg(). */
341 extern void __cmpxchg_called_with_bad_pointer(void);
343 static __always_inline
unsigned long
344 __cmpxchg(volatile void *ptr
, unsigned long old
, unsigned long new,
349 return __cmpxchg_u32(ptr
, old
, new);
352 return __cmpxchg_u64(ptr
, old
, new);
355 __cmpxchg_called_with_bad_pointer();
359 static __always_inline
unsigned long
360 __cmpxchg_local(volatile void *ptr
, unsigned long old
, unsigned long new,
365 return __cmpxchg_u32_local(ptr
, old
, new);
368 return __cmpxchg_u64_local(ptr
, old
, new);
371 __cmpxchg_called_with_bad_pointer();
375 #define cmpxchg(ptr, o, n) \
377 __typeof__(*(ptr)) _o_ = (o); \
378 __typeof__(*(ptr)) _n_ = (n); \
379 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
380 (unsigned long)_n_, sizeof(*(ptr))); \
384 #define cmpxchg_local(ptr, o, n) \
386 __typeof__(*(ptr)) _o_ = (o); \
387 __typeof__(*(ptr)) _n_ = (n); \
388 (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
389 (unsigned long)_n_, sizeof(*(ptr))); \
394 * We handle most unaligned accesses in hardware. On the other hand
395 * unaligned DMA can be very expensive on some ppc64 IO chips (it does
396 * powers of 2 writes until it reaches sufficient alignment).
398 * Based on this we disable the IP header alignment in network drivers.
399 * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining
400 * cacheline alignment of buffers.
402 #define NET_IP_ALIGN 0
403 #define NET_SKB_PAD L1_CACHE_BYTES
405 #define cmpxchg64(ptr, o, n) \
407 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
408 cmpxchg((ptr), (o), (n)); \
410 #define cmpxchg64_local(ptr, o, n) \
412 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
413 cmpxchg_local((ptr), (o), (n)); \
417 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
418 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
421 * atomic_add - add integer to atomic variable
422 * @i: integer value to add
423 * @v: pointer of type atomic_t
425 * Atomically adds @a to @v.
427 static __inline__
void atomic_add(int a
, atomic_t
*v
)
431 __asm__
__volatile__(
432 "1: lwarx %0,0,%3 # atomic_add\n\
436 : "=&r" (t
), "+m" (v
->counter
)
437 : "r" (a
), "r" (&v
->counter
)
442 * atomic_sub - subtract the atomic variable
443 * @i: integer value to subtract
444 * @v: pointer of type atomic_t
446 * Atomically subtracts @a from @v.
448 static __inline__
void atomic_sub(int a
, atomic_t
*v
)
452 __asm__
__volatile__(
453 "1: lwarx %0,0,%3 # atomic_sub \n\
457 : "=&r" (t
), "+m" (v
->counter
)
458 : "r" (a
), "r" (&v
->counter
)
462 static __inline__
atomic_sub_return(int a
, atomic_t
*v
)
466 __asm__
__volatile__(
468 1: lwarx %0,0,%2 # atomic_sub_return\n\
474 : "r" (a
), "r" (&v
->counter
)
481 * atomic_sub_and_test - subtract value from variable and test result
482 * @i: integer value to subtract
483 * @v: pointer of type atomic_t
485 * Atomically subtracts @i from @v and returns
486 * true if the result is zero, or false for all
489 static __inline__
int atomic_sub_and_test(int a
, atomic_t
*v
)
491 return atomic_sub_return(a
, v
) == 0;
495 * atomic_inc - increment atomic variable
496 * @v: pointer of type atomic_t
498 * Atomically increments @v by 1.
500 static __inline__
void atomic_inc(atomic_t
*v
)
506 * atomic_dec - decrement atomic variable
507 * @v: pointer of type atomic_t
509 * Atomically decrements @v by 1.
511 static __inline__
void atomic_dec(atomic_t
*v
)
517 * atomic_dec_and_test - decrement and test
518 * @v: pointer of type atomic_t
520 * Atomically decrements @v by 1 and
521 * returns true if the result is 0, or false for all other
524 static __inline__
int atomic_dec_and_test(atomic_t
*v
)
526 return atomic_sub_and_test(1, v
);
530 * atomic_inc_and_test - increment and test
531 * @v: pointer of type atomic_t
533 * Atomically increments @v by 1
534 * and returns true if the result is zero, or false for all
537 static __inline__
int atomic_inc_and_test(atomic_t
*v
)
539 return atomic_inc_return(v
);
543 * atomic_add_return - add and return
544 * @v: pointer of type atomic_t
545 * @i: integer value to add
547 * Atomically adds @i to @v and returns @i + @v
549 static __inline__
int atomic_add_return(int a
, atomic_t
*v
)
553 __asm__
__volatile__(
555 1: lwarx %0,0,%2 # atomic_add_return \n\
561 : "r" (a
), "r" (&v
->counter
)
568 * atomic_add_negative - add and test if negative
569 * @v: pointer of type atomic_t
570 * @i: integer value to add
572 * Atomically adds @i to @v and returns true
573 * if the result is negative, or false when
574 * result is greater than or equal to zero.
576 static __inline__
int atomic_add_negative(int a
, atomic_t
*v
)
578 return atomic_add_return(a
, v
) < 0;
582 * atomic_add_unless - add unless the number is a given value
583 * @v: pointer of type atomic_t
584 * @a: the amount to add to v...
585 * @u: ...unless v is equal to u.
587 * Atomically adds @a to @v, so long as it was not @u.
588 * Returns non-zero if @v was not @u, and zero otherwise.
590 static __inline__
int atomic_add_unless(atomic_t
*v
, int a
, int u
)
594 __asm__
__volatile__(
596 1: lwarx %0,0,%1 # atomic_add_unless\n\
606 : "r" (&v
->counter
), "r" (a
), "r" (u
)
612 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
614 #define atomic_inc_return(v) (atomic_add_return(1,v))
615 #define atomic_dec_return(v) (atomic_sub_return(1,v))
617 /* Atomic operations are already serializing on x86 */
618 #define smp_mb__before_atomic_dec() smp_mb()
619 #define smp_mb__after_atomic_dec() smp_mb()
620 #define smp_mb__before_atomic_inc() smp_mb()
621 #define smp_mb__after_atomic_inc() smp_mb()
623 #endif //0 /* duplicate with arch_atomic.h */
626 * api_pthreads.h: API mapping to pthreads environment.
628 * This program is free software; you can redistribute it and/or modify
629 * it under the terms of the GNU General Public License as published by
630 * the Free Software Foundation; either version 2 of the License, or
631 * (at your option) any later version. However, please note that much
632 * of the code in this file derives from the Linux kernel, and that such
633 * code may not be available except under GPLv2.
635 * This program is distributed in the hope that it will be useful,
636 * but WITHOUT ANY WARRANTY; without even the implied warranty of
637 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
638 * GNU General Public License for more details.
640 * You should have received a copy of the GNU General Public License
641 * along with this program; if not, write to the Free Software
642 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
644 * Copyright (c) 2006 Paul E. McKenney, IBM.
651 #include <sys/types.h>
655 #include <sys/param.h>
656 /* #include "atomic.h" */
661 #define container_of(ptr, type, member) ({ \
662 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
663 (type *)( (char *)__mptr - offsetof(type,member) );})
666 * Default machine parameters.
669 #ifndef CACHE_LINE_SIZE
670 /* #define CACHE_LINE_SIZE 128 */
671 #endif /* #ifndef CACHE_LINE_SIZE */
674 * Exclusive locking primitives.
677 typedef pthread_mutex_t spinlock_t
;
679 #define DEFINE_SPINLOCK(lock) spinlock_t lock = PTHREAD_MUTEX_INITIALIZER;
680 #define __SPIN_LOCK_UNLOCKED(lockp) PTHREAD_MUTEX_INITIALIZER
682 static void spin_lock_init(spinlock_t
*sp
)
684 if (pthread_mutex_init(sp
, NULL
) != 0) {
685 perror("spin_lock_init:pthread_mutex_init");
690 static void spin_lock(spinlock_t
*sp
)
692 if (pthread_mutex_lock(sp
) != 0) {
693 perror("spin_lock:pthread_mutex_lock");
698 static void spin_unlock(spinlock_t
*sp
)
700 if (pthread_mutex_unlock(sp
) != 0) {
701 perror("spin_unlock:pthread_mutex_unlock");
706 #define spin_lock_irqsave(l, f) do { f = 1; spin_lock(l); } while (0)
707 #define spin_unlock_irqrestore(l, f) do { f = 0; spin_unlock(l); } while (0)
710 * Thread creation/destruction primitives.
713 typedef pthread_t thread_id_t
;
715 #define NR_THREADS 128
717 #define __THREAD_ID_MAP_EMPTY 0
718 #define __THREAD_ID_MAP_WAITING 1
719 thread_id_t __thread_id_map
[NR_THREADS
];
720 spinlock_t __thread_id_map_mutex
;
722 #define for_each_thread(t) \
723 for (t = 0; t < NR_THREADS; t++)
725 #define for_each_running_thread(t) \
726 for (t = 0; t < NR_THREADS; t++) \
727 if ((__thread_id_map[t] != __THREAD_ID_MAP_EMPTY) && \
728 (__thread_id_map[t] != __THREAD_ID_MAP_WAITING))
730 #define for_each_tid(t, tid) \
731 for (t = 0; t < NR_THREADS; t++) \
732 if ((((tid) = __thread_id_map[t]) != __THREAD_ID_MAP_EMPTY) && \
733 ((tid) != __THREAD_ID_MAP_WAITING))
735 pthread_key_t thread_id_key
;
737 static int __smp_thread_id(void)
740 thread_id_t tid
= pthread_self();
742 for (i
= 0; i
< NR_THREADS
; i
++) {
743 if (__thread_id_map
[i
] == tid
) {
744 long v
= i
+ 1; /* must be non-NULL. */
746 if (pthread_setspecific(thread_id_key
, (void *)v
) != 0) {
747 perror("pthread_setspecific");
753 spin_lock(&__thread_id_map_mutex
);
754 for (i
= 0; i
< NR_THREADS
; i
++) {
755 if (__thread_id_map
[i
] == tid
)
756 spin_unlock(&__thread_id_map_mutex
);
759 spin_unlock(&__thread_id_map_mutex
);
760 fprintf(stderr
, "smp_thread_id: Rogue thread, id: %d(%#x)\n",
765 static int smp_thread_id(void)
769 id
= pthread_getspecific(thread_id_key
);
771 return __smp_thread_id();
772 return (long)(id
- 1);
775 static thread_id_t
create_thread(void *(*func
)(void *), void *arg
)
780 spin_lock(&__thread_id_map_mutex
);
781 for (i
= 0; i
< NR_THREADS
; i
++) {
782 if (__thread_id_map
[i
] == __THREAD_ID_MAP_EMPTY
)
785 if (i
>= NR_THREADS
) {
786 spin_unlock(&__thread_id_map_mutex
);
787 fprintf(stderr
, "Thread limit of %d exceeded!\n", NR_THREADS
);
790 __thread_id_map
[i
] = __THREAD_ID_MAP_WAITING
;
791 spin_unlock(&__thread_id_map_mutex
);
792 if (pthread_create(&tid
, NULL
, func
, arg
) != 0) {
793 perror("create_thread:pthread_create");
796 __thread_id_map
[i
] = tid
;
800 static void *wait_thread(thread_id_t tid
)
805 for (i
= 0; i
< NR_THREADS
; i
++) {
806 if (__thread_id_map
[i
] == tid
)
809 if (i
>= NR_THREADS
){
810 fprintf(stderr
, "wait_thread: bad tid = %d(%#x)\n",
814 if (pthread_join(tid
, &vp
) != 0) {
815 perror("wait_thread:pthread_join");
818 __thread_id_map
[i
] = __THREAD_ID_MAP_EMPTY
;
822 static void wait_all_threads(void)
827 for (i
= 1; i
< NR_THREADS
; i
++) {
828 tid
= __thread_id_map
[i
];
829 if (tid
!= __THREAD_ID_MAP_EMPTY
&&
830 tid
!= __THREAD_ID_MAP_WAITING
)
831 (void)wait_thread(tid
);
835 static void run_on(int cpu
)
841 sched_setaffinity(0, sizeof(mask
), &mask
);
845 * timekeeping -- very crude -- should use MONOTONIC...
848 long long get_microseconds(void)
852 if (gettimeofday(&tv
, NULL
) != 0)
854 return ((long long)tv
.tv_sec
) * 1000000LL + (long long)tv
.tv_usec
;
858 * Per-thread variables.
861 #define DEFINE_PER_THREAD(type, name) \
864 __attribute__((__aligned__(CACHE_LINE_SIZE))); \
865 } __per_thread_##name[NR_THREADS];
866 #define DECLARE_PER_THREAD(type, name) extern DEFINE_PER_THREAD(type, name)
868 #define per_thread(name, thread) __per_thread_##name[thread].v
869 #define __get_thread_var(name) per_thread(name, smp_thread_id())
871 #define init_per_thread(name, v) \
874 for (__i_p_t_i = 0; __i_p_t_i < NR_THREADS; __i_p_t_i++) \
875 per_thread(name, __i_p_t_i) = v; \
879 * CPU traversal primitives.
884 #endif /* #ifndef NR_CPUS */
886 #define for_each_possible_cpu(cpu) \
887 for (cpu = 0; cpu < NR_CPUS; cpu++)
888 #define for_each_online_cpu(cpu) \
889 for (cpu = 0; cpu < NR_CPUS; cpu++)
895 #define DEFINE_PER_CPU(type, name) \
898 __attribute__((__aligned__(CACHE_LINE_SIZE))); \
899 } __per_cpu_##name[NR_CPUS]
900 #define DECLARE_PER_CPU(type, name) extern DEFINE_PER_CPU(type, name)
902 DEFINE_PER_THREAD(int, smp_processor_id
);
904 #define per_cpu(name, thread) __per_cpu_##name[thread].v
905 #define __get_cpu_var(name) per_cpu(name, smp_processor_id())
907 #define init_per_cpu(name, v) \
910 for (__i_p_c_i = 0; __i_p_c_i < NR_CPUS; __i_p_c_i++) \
911 per_cpu(name, __i_p_c_i) = v; \
915 * CPU state checking (crowbarred).
918 #define idle_cpu(cpu) 0
919 #define in_softirq() 1
920 #define hardirq_count() 0
921 #define PREEMPT_SHIFT 0
922 #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
923 #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
924 #define PREEMPT_BITS 8
925 #define SOFTIRQ_BITS 8
931 struct notifier_block
{
932 int (*notifier_call
)(struct notifier_block
*, unsigned long, void *);
933 struct notifier_block
*next
;
937 #define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
938 #define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
939 #define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */
940 #define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
941 #define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
942 #define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
943 #define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task,
944 * not handling interrupts, soon dead */
945 #define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
948 /* Used for CPU hotplug events occuring while tasks are frozen due to a suspend
949 * operation in progress
951 #define CPU_TASKS_FROZEN 0x0010
953 #define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN)
954 #define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN)
955 #define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
956 #define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
957 #define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
958 #define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
959 #define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
961 /* Hibernation and suspend events */
962 #define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */
963 #define PM_POST_HIBERNATION 0x0002 /* Hibernation finished */
964 #define PM_SUSPEND_PREPARE 0x0003 /* Going to suspend the system */
965 #define PM_POST_SUSPEND 0x0004 /* Suspend finished */
966 #define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */
967 #define PM_POST_RESTORE 0x0006 /* Restore failed */
969 #define NOTIFY_DONE 0x0000 /* Don't care */
970 #define NOTIFY_OK 0x0001 /* Suits me */
971 #define NOTIFY_STOP_MASK 0x8000 /* Don't call further */
972 #define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002)
973 /* Bad/Veto action */
975 * Clean way to return from the notifier and stop further calls.
977 #define NOTIFY_STOP (NOTIFY_OK|NOTIFY_STOP_MASK)
983 #define BUG_ON(c) do { if (!(c)) abort(); } while (0)
986 * Initialization -- Must be called before calling any primitives.
989 static void smp_init(void)
993 spin_lock_init(&__thread_id_map_mutex
);
994 __thread_id_map
[0] = pthread_self();
995 for (i
= 1; i
< NR_THREADS
; i
++)
996 __thread_id_map
[i
] = __THREAD_ID_MAP_EMPTY
;
997 init_per_thread(smp_processor_id
, 0);
998 if (pthread_key_create(&thread_id_key
, NULL
) != 0) {
999 perror("pthread_key_create");
1004 /* Taken from the Linux kernel source tree, so GPLv2-only!!! */
1006 #ifndef _LINUX_LIST_H
1007 #define _LINUX_LIST_H
1009 #define LIST_POISON1 ((void *) 0x00100100)
1010 #define LIST_POISON2 ((void *) 0x00200200)
1012 #define container_of(ptr, type, member) ({ \
1013 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
1014 (type *)( (char *)__mptr - offsetof(type,member) );})
1019 * Simple doubly linked list implementation.
1021 * Some of the internal functions ("__xxx") are useful when
1022 * manipulating whole lists rather than single entries, as
1023 * sometimes we already know the next/prev entries and we can
1024 * generate better code by using them directly rather than
1025 * using the generic single-entry routines.
1029 struct list_head
*next
, *prev
;
1032 #define LIST_HEAD_INIT(name) { &(name), &(name) }
1034 #define LIST_HEAD(name) \
1035 struct list_head name = LIST_HEAD_INIT(name)
1037 static inline void INIT_LIST_HEAD(struct list_head
*list
)
1044 * Insert a new entry between two known consecutive entries.
1046 * This is only for internal list manipulation where we know
1047 * the prev/next entries already!
1049 #ifndef CONFIG_DEBUG_LIST
1050 static inline void __list_add(struct list_head
*new,
1051 struct list_head
*prev
,
1052 struct list_head
*next
)
1060 extern void __list_add(struct list_head
*new,
1061 struct list_head
*prev
,
1062 struct list_head
*next
);
1066 * list_add - add a new entry
1067 * @new: new entry to be added
1068 * @head: list head to add it after
1070 * Insert a new entry after the specified head.
1071 * This is good for implementing stacks.
1073 static inline void list_add(struct list_head
*new, struct list_head
*head
)
1075 __list_add(new, head
, head
->next
);
1080 * list_add_tail - add a new entry
1081 * @new: new entry to be added
1082 * @head: list head to add it before
1084 * Insert a new entry before the specified head.
1085 * This is useful for implementing queues.
1087 static inline void list_add_tail(struct list_head
*new, struct list_head
*head
)
1089 __list_add(new, head
->prev
, head
);
1093 * Delete a list entry by making the prev/next entries
1094 * point to each other.
1096 * This is only for internal list manipulation where we know
1097 * the prev/next entries already!
1099 static inline void __list_del(struct list_head
* prev
, struct list_head
* next
)
1106 * list_del - deletes entry from list.
1107 * @entry: the element to delete from the list.
1108 * Note: list_empty() on entry does not return true after this, the entry is
1109 * in an undefined state.
1111 #ifndef CONFIG_DEBUG_LIST
1112 static inline void list_del(struct list_head
*entry
)
1114 __list_del(entry
->prev
, entry
->next
);
1115 entry
->next
= LIST_POISON1
;
1116 entry
->prev
= LIST_POISON2
;
1119 extern void list_del(struct list_head
*entry
);
1123 * list_replace - replace old entry by new one
1124 * @old : the element to be replaced
1125 * @new : the new element to insert
1127 * If @old was empty, it will be overwritten.
1129 static inline void list_replace(struct list_head
*old
,
1130 struct list_head
*new)
1132 new->next
= old
->next
;
1133 new->next
->prev
= new;
1134 new->prev
= old
->prev
;
1135 new->prev
->next
= new;
1138 static inline void list_replace_init(struct list_head
*old
,
1139 struct list_head
*new)
1141 list_replace(old
, new);
1142 INIT_LIST_HEAD(old
);
1146 * list_del_init - deletes entry from list and reinitialize it.
1147 * @entry: the element to delete from the list.
1149 static inline void list_del_init(struct list_head
*entry
)
1151 __list_del(entry
->prev
, entry
->next
);
1152 INIT_LIST_HEAD(entry
);
1156 * list_move - delete from one list and add as another's head
1157 * @list: the entry to move
1158 * @head: the head that will precede our entry
1160 static inline void list_move(struct list_head
*list
, struct list_head
*head
)
1162 __list_del(list
->prev
, list
->next
);
1163 list_add(list
, head
);
1167 * list_move_tail - delete from one list and add as another's tail
1168 * @list: the entry to move
1169 * @head: the head that will follow our entry
1171 static inline void list_move_tail(struct list_head
*list
,
1172 struct list_head
*head
)
1174 __list_del(list
->prev
, list
->next
);
1175 list_add_tail(list
, head
);
1179 * list_is_last - tests whether @list is the last entry in list @head
1180 * @list: the entry to test
1181 * @head: the head of the list
1183 static inline int list_is_last(const struct list_head
*list
,
1184 const struct list_head
*head
)
1186 return list
->next
== head
;
1190 * list_empty - tests whether a list is empty
1191 * @head: the list to test.
1193 static inline int list_empty(const struct list_head
*head
)
1195 return head
->next
== head
;
1199 * list_empty_careful - tests whether a list is empty and not being modified
1200 * @head: the list to test
1203 * tests whether a list is empty _and_ checks that no other CPU might be
1204 * in the process of modifying either member (next or prev)
1206 * NOTE: using list_empty_careful() without synchronization
1207 * can only be safe if the only activity that can happen
1208 * to the list entry is list_del_init(). Eg. it cannot be used
1209 * if another CPU could re-list_add() it.
1211 static inline int list_empty_careful(const struct list_head
*head
)
1213 struct list_head
*next
= head
->next
;
1214 return (next
== head
) && (next
== head
->prev
);
1218 * list_is_singular - tests whether a list has just one entry.
1219 * @head: the list to test.
1221 static inline int list_is_singular(const struct list_head
*head
)
1223 return !list_empty(head
) && (head
->next
== head
->prev
);
1226 static inline void __list_cut_position(struct list_head
*list
,
1227 struct list_head
*head
, struct list_head
*entry
)
1229 struct list_head
*new_first
= entry
->next
;
1230 list
->next
= head
->next
;
1231 list
->next
->prev
= list
;
1234 head
->next
= new_first
;
1235 new_first
->prev
= head
;
1239 * list_cut_position - cut a list into two
1240 * @list: a new list to add all removed entries
1241 * @head: a list with entries
1242 * @entry: an entry within head, could be the head itself
1243 * and if so we won't cut the list
1245 * This helper moves the initial part of @head, up to and
1246 * including @entry, from @head to @list. You should
1247 * pass on @entry an element you know is on @head. @list
1248 * should be an empty list or a list you do not care about
1252 static inline void list_cut_position(struct list_head
*list
,
1253 struct list_head
*head
, struct list_head
*entry
)
1255 if (list_empty(head
))
1257 if (list_is_singular(head
) &&
1258 (head
->next
!= entry
&& head
!= entry
))
1261 INIT_LIST_HEAD(list
);
1263 __list_cut_position(list
, head
, entry
);
1266 static inline void __list_splice(const struct list_head
*list
,
1267 struct list_head
*prev
,
1268 struct list_head
*next
)
1270 struct list_head
*first
= list
->next
;
1271 struct list_head
*last
= list
->prev
;
1281 * list_splice - join two lists, this is designed for stacks
1282 * @list: the new list to add.
1283 * @head: the place to add it in the first list.
1285 static inline void list_splice(const struct list_head
*list
,
1286 struct list_head
*head
)
1288 if (!list_empty(list
))
1289 __list_splice(list
, head
, head
->next
);
1293 * list_splice_tail - join two lists, each list being a queue
1294 * @list: the new list to add.
1295 * @head: the place to add it in the first list.
1297 static inline void list_splice_tail(struct list_head
*list
,
1298 struct list_head
*head
)
1300 if (!list_empty(list
))
1301 __list_splice(list
, head
->prev
, head
);
1305 * list_splice_init - join two lists and reinitialise the emptied list.
1306 * @list: the new list to add.
1307 * @head: the place to add it in the first list.
1309 * The list at @list is reinitialised
1311 static inline void list_splice_init(struct list_head
*list
,
1312 struct list_head
*head
)
1314 if (!list_empty(list
)) {
1315 __list_splice(list
, head
, head
->next
);
1316 INIT_LIST_HEAD(list
);
1321 * list_splice_tail_init - join two lists and reinitialise the emptied list
1322 * @list: the new list to add.
1323 * @head: the place to add it in the first list.
1325 * Each of the lists is a queue.
1326 * The list at @list is reinitialised
1328 static inline void list_splice_tail_init(struct list_head
*list
,
1329 struct list_head
*head
)
1331 if (!list_empty(list
)) {
1332 __list_splice(list
, head
->prev
, head
);
1333 INIT_LIST_HEAD(list
);
1338 * list_entry - get the struct for this entry
1339 * @ptr: the &struct list_head pointer.
1340 * @type: the type of the struct this is embedded in.
1341 * @member: the name of the list_struct within the struct.
1343 #define list_entry(ptr, type, member) \
1344 container_of(ptr, type, member)
1347 * list_first_entry - get the first element from a list
1348 * @ptr: the list head to take the element from.
1349 * @type: the type of the struct this is embedded in.
1350 * @member: the name of the list_struct within the struct.
1352 * Note, that list is expected to be not empty.
1354 #define list_first_entry(ptr, type, member) \
1355 list_entry((ptr)->next, type, member)
1358 * list_for_each - iterate over a list
1359 * @pos: the &struct list_head to use as a loop cursor.
1360 * @head: the head for your list.
1362 #define list_for_each(pos, head) \
1363 for (pos = (head)->next; prefetch(pos->next), pos != (head); \
1367 * __list_for_each - iterate over a list
1368 * @pos: the &struct list_head to use as a loop cursor.
1369 * @head: the head for your list.
1371 * This variant differs from list_for_each() in that it's the
1372 * simplest possible list iteration code, no prefetching is done.
1373 * Use this for code that knows the list to be very short (empty
1374 * or 1 entry) most of the time.
1376 #define __list_for_each(pos, head) \
1377 for (pos = (head)->next; pos != (head); pos = pos->next)
1380 * list_for_each_prev - iterate over a list backwards
1381 * @pos: the &struct list_head to use as a loop cursor.
1382 * @head: the head for your list.
1384 #define list_for_each_prev(pos, head) \
1385 for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \
1389 * list_for_each_safe - iterate over a list safe against removal of list entry
1390 * @pos: the &struct list_head to use as a loop cursor.
1391 * @n: another &struct list_head to use as temporary storage
1392 * @head: the head for your list.
1394 #define list_for_each_safe(pos, n, head) \
1395 for (pos = (head)->next, n = pos->next; pos != (head); \
1396 pos = n, n = pos->next)
1399 * list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
1400 * @pos: the &struct list_head to use as a loop cursor.
1401 * @n: another &struct list_head to use as temporary storage
1402 * @head: the head for your list.
1404 #define list_for_each_prev_safe(pos, n, head) \
1405 for (pos = (head)->prev, n = pos->prev; \
1406 prefetch(pos->prev), pos != (head); \
1407 pos = n, n = pos->prev)
1410 * list_for_each_entry - iterate over list of given type
1411 * @pos: the type * to use as a loop cursor.
1412 * @head: the head for your list.
1413 * @member: the name of the list_struct within the struct.
1415 #define list_for_each_entry(pos, head, member) \
1416 for (pos = list_entry((head)->next, typeof(*pos), member); \
1417 prefetch(pos->member.next), &pos->member != (head); \
1418 pos = list_entry(pos->member.next, typeof(*pos), member))
1421 * list_for_each_entry_reverse - iterate backwards over list of given type.
1422 * @pos: the type * to use as a loop cursor.
1423 * @head: the head for your list.
1424 * @member: the name of the list_struct within the struct.
1426 #define list_for_each_entry_reverse(pos, head, member) \
1427 for (pos = list_entry((head)->prev, typeof(*pos), member); \
1428 prefetch(pos->member.prev), &pos->member != (head); \
1429 pos = list_entry(pos->member.prev, typeof(*pos), member))
1432 * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
1433 * @pos: the type * to use as a start point
1434 * @head: the head of the list
1435 * @member: the name of the list_struct within the struct.
1437 * Prepares a pos entry for use as a start point in list_for_each_entry_continue().
1439 #define list_prepare_entry(pos, head, member) \
1440 ((pos) ? : list_entry(head, typeof(*pos), member))
1443 * list_for_each_entry_continue - continue iteration over list of given type
1444 * @pos: the type * to use as a loop cursor.
1445 * @head: the head for your list.
1446 * @member: the name of the list_struct within the struct.
1448 * Continue to iterate over list of given type, continuing after
1449 * the current position.
1451 #define list_for_each_entry_continue(pos, head, member) \
1452 for (pos = list_entry(pos->member.next, typeof(*pos), member); \
1453 prefetch(pos->member.next), &pos->member != (head); \
1454 pos = list_entry(pos->member.next, typeof(*pos), member))
1457 * list_for_each_entry_continue_reverse - iterate backwards from the given point
1458 * @pos: the type * to use as a loop cursor.
1459 * @head: the head for your list.
1460 * @member: the name of the list_struct within the struct.
1462 * Start to iterate over list of given type backwards, continuing after
1463 * the current position.
1465 #define list_for_each_entry_continue_reverse(pos, head, member) \
1466 for (pos = list_entry(pos->member.prev, typeof(*pos), member); \
1467 prefetch(pos->member.prev), &pos->member != (head); \
1468 pos = list_entry(pos->member.prev, typeof(*pos), member))
1471 * list_for_each_entry_from - iterate over list of given type from the current point
1472 * @pos: the type * to use as a loop cursor.
1473 * @head: the head for your list.
1474 * @member: the name of the list_struct within the struct.
1476 * Iterate over list of given type, continuing from current position.
1478 #define list_for_each_entry_from(pos, head, member) \
1479 for (; prefetch(pos->member.next), &pos->member != (head); \
1480 pos = list_entry(pos->member.next, typeof(*pos), member))
1483 * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
1484 * @pos: the type * to use as a loop cursor.
1485 * @n: another type * to use as temporary storage
1486 * @head: the head for your list.
1487 * @member: the name of the list_struct within the struct.
1489 #define list_for_each_entry_safe(pos, n, head, member) \
1490 for (pos = list_entry((head)->next, typeof(*pos), member), \
1491 n = list_entry(pos->member.next, typeof(*pos), member); \
1492 &pos->member != (head); \
1493 pos = n, n = list_entry(n->member.next, typeof(*n), member))
1496 * list_for_each_entry_safe_continue
1497 * @pos: the type * to use as a loop cursor.
1498 * @n: another type * to use as temporary storage
1499 * @head: the head for your list.
1500 * @member: the name of the list_struct within the struct.
1502 * Iterate over list of given type, continuing after current point,
1503 * safe against removal of list entry.
1505 #define list_for_each_entry_safe_continue(pos, n, head, member) \
1506 for (pos = list_entry(pos->member.next, typeof(*pos), member), \
1507 n = list_entry(pos->member.next, typeof(*pos), member); \
1508 &pos->member != (head); \
1509 pos = n, n = list_entry(n->member.next, typeof(*n), member))
1512 * list_for_each_entry_safe_from
1513 * @pos: the type * to use as a loop cursor.
1514 * @n: another type * to use as temporary storage
1515 * @head: the head for your list.
1516 * @member: the name of the list_struct within the struct.
1518 * Iterate over list of given type from current point, safe against
1519 * removal of list entry.
1521 #define list_for_each_entry_safe_from(pos, n, head, member) \
1522 for (n = list_entry(pos->member.next, typeof(*pos), member); \
1523 &pos->member != (head); \
1524 pos = n, n = list_entry(n->member.next, typeof(*n), member))
1527 * list_for_each_entry_safe_reverse
1528 * @pos: the type * to use as a loop cursor.
1529 * @n: another type * to use as temporary storage
1530 * @head: the head for your list.
1531 * @member: the name of the list_struct within the struct.
1533 * Iterate backwards over list of given type, safe against removal
1536 #define list_for_each_entry_safe_reverse(pos, n, head, member) \
1537 for (pos = list_entry((head)->prev, typeof(*pos), member), \
1538 n = list_entry(pos->member.prev, typeof(*pos), member); \
1539 &pos->member != (head); \
1540 pos = n, n = list_entry(n->member.prev, typeof(*n), member))
1545 * Double linked lists with a single pointer list head.
1546 * Mostly useful for hash tables where the two pointer list head is
1548 * You lose the ability to access the tail in O(1).
1552 struct hlist_node
*first
;
1556 struct hlist_node
*next
, **pprev
;
1559 #define HLIST_HEAD_INIT { .first = NULL }
1560 #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
1561 #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
1562 static inline void INIT_HLIST_NODE(struct hlist_node
*h
)
1568 static inline int hlist_unhashed(const struct hlist_node
*h
)
1573 static inline int hlist_empty(const struct hlist_head
*h
)
1578 static inline void __hlist_del(struct hlist_node
*n
)
1580 struct hlist_node
*next
= n
->next
;
1581 struct hlist_node
**pprev
= n
->pprev
;
1584 next
->pprev
= pprev
;
1587 static inline void hlist_del(struct hlist_node
*n
)
1590 n
->next
= LIST_POISON1
;
1591 n
->pprev
= LIST_POISON2
;
1594 static inline void hlist_del_init(struct hlist_node
*n
)
1596 if (!hlist_unhashed(n
)) {
1602 static inline void hlist_add_head(struct hlist_node
*n
, struct hlist_head
*h
)
1604 struct hlist_node
*first
= h
->first
;
1607 first
->pprev
= &n
->next
;
1609 n
->pprev
= &h
->first
;
1612 /* next must be != NULL */
1613 static inline void hlist_add_before(struct hlist_node
*n
,
1614 struct hlist_node
*next
)
1616 n
->pprev
= next
->pprev
;
1618 next
->pprev
= &n
->next
;
1622 static inline void hlist_add_after(struct hlist_node
*n
,
1623 struct hlist_node
*next
)
1625 next
->next
= n
->next
;
1627 next
->pprev
= &n
->next
;
1630 next
->next
->pprev
= &next
->next
;
1634 * Move a list from one list head to another. Fixup the pprev
1635 * reference of the first entry if it exists.
1637 static inline void hlist_move_list(struct hlist_head
*old
,
1638 struct hlist_head
*new)
1640 new->first
= old
->first
;
1642 new->first
->pprev
= &new->first
;
1646 #define hlist_entry(ptr, type, member) container_of(ptr,type,member)
1648 #define hlist_for_each(pos, head) \
1649 for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
1652 #define hlist_for_each_safe(pos, n, head) \
1653 for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
1657 * hlist_for_each_entry - iterate over list of given type
1658 * @tpos: the type * to use as a loop cursor.
1659 * @pos: the &struct hlist_node to use as a loop cursor.
1660 * @head: the head for your list.
1661 * @member: the name of the hlist_node within the struct.
1663 #define hlist_for_each_entry(tpos, pos, head, member) \
1664 for (pos = (head)->first; \
1665 pos && ({ prefetch(pos->next); 1;}) && \
1666 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
1670 * hlist_for_each_entry_continue - iterate over a hlist continuing after current point
1671 * @tpos: the type * to use as a loop cursor.
1672 * @pos: the &struct hlist_node to use as a loop cursor.
1673 * @member: the name of the hlist_node within the struct.
1675 #define hlist_for_each_entry_continue(tpos, pos, member) \
1676 for (pos = (pos)->next; \
1677 pos && ({ prefetch(pos->next); 1;}) && \
1678 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
1682 * hlist_for_each_entry_from - iterate over a hlist continuing from current point
1683 * @tpos: the type * to use as a loop cursor.
1684 * @pos: the &struct hlist_node to use as a loop cursor.
1685 * @member: the name of the hlist_node within the struct.
1687 #define hlist_for_each_entry_from(tpos, pos, member) \
1688 for (; pos && ({ prefetch(pos->next); 1;}) && \
1689 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
1693 * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
1694 * @tpos: the type * to use as a loop cursor.
1695 * @pos: the &struct hlist_node to use as a loop cursor.
1696 * @n: another &struct hlist_node to use as temporary storage
1697 * @head: the head for your list.
1698 * @member: the name of the hlist_node within the struct.
1700 #define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
1701 for (pos = (head)->first; \
1702 pos && ({ n = pos->next; 1; }) && \
1703 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \