4 * common.h: Common Linux kernel-isms.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; but version 2 of the License only due
9 * to code included from the Linux kernel.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 * Copyright (c) 2006 Paul E. McKenney, IBM.
22 * Much code taken from the Linux kernel. For such code, the option
23 * to redistribute under later versions of GPL might not be available.
26 #ifndef __always_inline
27 #define __always_inline inline
30 #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
31 #define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1)
34 # define stringify_in_c(...) __VA_ARGS__
35 # define ASM_CONST(x) x
37 /* This version of stringify will deal with commas... */
38 # define __stringify_in_c(...) #__VA_ARGS__
39 # define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " "
40 # define __ASM_CONST(x) x##UL
41 # define ASM_CONST(x) __ASM_CONST(x)
46 * arch-i386.h: Expose x86 atomic instructions. 80486 and better only.
48 * This program is free software; you can redistribute it and/or modify
49 * it under the terms of the GNU General Public License as published by
50 * the Free Software Foundation, but version 2 only due to inclusion
51 * of Linux-kernel code.
53 * This program is distributed in the hope that it will be useful,
54 * but WITHOUT ANY WARRANTY; without even the implied warranty of
55 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
56 * GNU General Public License for more details.
58 * You should have received a copy of the GNU General Public License
59 * along with this program; if not, write to the Free Software
60 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
62 * Copyright (c) 2006 Paul E. McKenney, IBM.
64 * Much code taken from the Linux kernel. For such code, the option
65 * to redistribute under later versions of GPL might not be available.
72 #define CACHE_LINE_SIZE 64
73 #define ____cacheline_internodealigned_in_smp \
74 __attribute__((__aligned__(1 << 6)))
76 #define LOCK_PREFIX "lock ; "
78 #if 0 /* duplicate with arch_atomic.h */
80 * Atomic data structure, initialization, and access.
83 typedef struct { volatile int counter
; } atomic_t
;
85 #define ATOMIC_INIT(i) { (i) }
87 #define atomic_read(v) ((v)->counter)
88 #define atomic_set(v, i) (((v)->counter) = (i))
95 * atomic_add - add integer to atomic variable
96 * @i: integer value to add
97 * @v: pointer of type atomic_t
99 * Atomically adds @i to @v.
102 static __inline__
void atomic_add(int i
, atomic_t
*v
)
104 (void)__sync_fetch_and_add(&v
->counter
, i
);
108 * atomic_sub - subtract the atomic variable
109 * @i: integer value to subtract
110 * @v: pointer of type atomic_t
112 * Atomically subtracts @i from @v.
114 static __inline__
void atomic_sub(int i
, atomic_t
*v
)
116 (void)__sync_fetch_and_add(&v
->counter
, -i
);
120 * atomic_sub_and_test - subtract value from variable and test result
121 * @i: integer value to subtract
122 * @v: pointer of type atomic_t
124 * Atomically subtracts @i from @v and returns
125 * true if the result is zero, or false for all
128 static __inline__
int atomic_sub_and_test(int i
, atomic_t
*v
)
130 return __sync_add_and_fetch(&v
->counter
, -i
) == 0;
134 * atomic_inc - increment atomic variable
135 * @v: pointer of type atomic_t
137 * Atomically increments @v by 1.
139 static __inline__
void atomic_inc(atomic_t
*v
)
141 (void)__sync_fetch_and_add(&v
->counter
, 1);
145 * atomic_dec - decrement atomic variable
146 * @v: pointer of type atomic_t
148 * Atomically decrements @v by 1.
150 static __inline__
void atomic_dec(atomic_t
*v
)
152 (void)__sync_fetch_and_add(&v
->counter
, -1);
156 * atomic_dec_and_test - decrement and test
157 * @v: pointer of type atomic_t
159 * Atomically decrements @v by 1 and
160 * returns true if the result is 0, or false for all other
163 static __inline__
int atomic_dec_and_test(atomic_t
*v
)
165 return __sync_add_and_fetch(&v
->counter
, -1) == 0;
169 * atomic_inc_and_test - increment and test
170 * @v: pointer of type atomic_t
172 * Atomically increments @v by 1
173 * and returns true if the result is zero, or false for all
176 static __inline__
int atomic_inc_and_test(atomic_t
*v
)
178 return __sync_add_and_fetch(&v
->counter
, 1) == 0;
182 * atomic_add_negative - add and test if negative
183 * @v: pointer of type atomic_t
184 * @i: integer value to add
186 * Atomically adds @i to @v and returns true
187 * if the result is negative, or false when
188 * result is greater than or equal to zero.
190 static __inline__
int atomic_add_negative(int i
, atomic_t
*v
)
192 return __sync_add_and_fetch(&v
->counter
, i
) < 0;
196 * atomic_add_return - add and return
197 * @v: pointer of type atomic_t
198 * @i: integer value to add
200 * Atomically adds @i to @v and returns @i + @v
202 static __inline__
int atomic_add_return(int i
, atomic_t
*v
)
204 return __sync_add_and_fetch(&v
->counter
, i
);
207 static __inline__
int atomic_sub_return(int i
, atomic_t
*v
)
209 return atomic_add_return(-i
,v
);
212 static inline unsigned int
213 cmpxchg(volatile long *ptr
, long oldval
, long newval
)
215 return __sync_val_compare_and_swap(ptr
, oldval
, newval
);
218 #define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
219 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
222 * atomic_add_unless - add unless the number is a given value
223 * @v: pointer of type atomic_t
224 * @a: the amount to add to v...
225 * @u: ...unless v is equal to u.
227 * Atomically adds @a to @v, so long as it was not @u.
228 * Returns non-zero if @v was not @u, and zero otherwise.
230 #define atomic_add_unless(v, a, u) \
233 c = atomic_read(v); \
235 if (unlikely(c == (u))) \
237 old = atomic_cmpxchg((v), c, c + (a)); \
238 if (likely(old == c)) \
244 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
246 #define atomic_inc_return(v) (atomic_add_return(1,v))
247 #define atomic_dec_return(v) (atomic_sub_return(1,v))
249 /* Atomic operations are already serializing on x86 */
250 #define smp_mb__before_atomic_dec() barrier()
251 #define smp_mb__after_atomic_dec() barrier()
252 #define smp_mb__before_atomic_inc() barrier()
253 #define smp_mb__after_atomic_inc() barrier()
255 #endif //0 /* duplicate with arch_atomic.h */
258 * api_pthreads.h: API mapping to pthreads environment.
260 * This program is free software; you can redistribute it and/or modify
261 * it under the terms of the GNU General Public License as published by
262 * the Free Software Foundation; either version 2 of the License, or
263 * (at your option) any later version. However, please note that much
264 * of the code in this file derives from the Linux kernel, and that such
265 * code may not be available except under GPLv2.
267 * This program is distributed in the hope that it will be useful,
268 * but WITHOUT ANY WARRANTY; without even the implied warranty of
269 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
270 * GNU General Public License for more details.
272 * You should have received a copy of the GNU General Public License
273 * along with this program; if not, write to the Free Software
274 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
276 * Copyright (c) 2006 Paul E. McKenney, IBM.
283 #include <sys/types.h>
287 #include <sys/param.h>
289 /* #include "atomic.h" */
294 #define container_of(ptr, type, member) ({ \
295 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
296 (type *)( (char *)__mptr - offsetof(type,member) );})
299 * Default machine parameters.
302 #ifndef CACHE_LINE_SIZE
303 #define CACHE_LINE_SIZE 128
304 #endif /* #ifndef CACHE_LINE_SIZE */
307 * Exclusive locking primitives.
310 typedef pthread_mutex_t spinlock_t
;
312 #define DEFINE_SPINLOCK(lock) spinlock_t lock = PTHREAD_MUTEX_INITIALIZER;
313 #define __SPIN_LOCK_UNLOCKED(lockp) PTHREAD_MUTEX_INITIALIZER
315 static void spin_lock_init(spinlock_t
*sp
)
317 if (pthread_mutex_init(sp
, NULL
) != 0) {
318 perror("spin_lock_init:pthread_mutex_init");
323 static void spin_lock(spinlock_t
*sp
)
325 if (pthread_mutex_lock(sp
) != 0) {
326 perror("spin_lock:pthread_mutex_lock");
331 static void spin_unlock(spinlock_t
*sp
)
333 if (pthread_mutex_unlock(sp
) != 0) {
334 perror("spin_unlock:pthread_mutex_unlock");
339 #define spin_lock_irqsave(l, f) do { f = 1; spin_lock(l); } while (0)
340 #define spin_unlock_irqrestore(l, f) do { f = 0; spin_unlock(l); } while (0)
343 * Thread creation/destruction primitives.
346 typedef pthread_t thread_id_t
;
348 #define NR_THREADS 128
350 #define __THREAD_ID_MAP_EMPTY 0
351 #define __THREAD_ID_MAP_WAITING 1
352 thread_id_t __thread_id_map
[NR_THREADS
];
353 spinlock_t __thread_id_map_mutex
;
355 #define for_each_thread(t) \
356 for (t = 0; t < NR_THREADS; t++)
358 #define for_each_running_thread(t) \
359 for (t = 0; t < NR_THREADS; t++) \
360 if ((__thread_id_map[t] != __THREAD_ID_MAP_EMPTY) && \
361 (__thread_id_map[t] != __THREAD_ID_MAP_WAITING))
363 pthread_key_t thread_id_key
;
365 static int __smp_thread_id(void)
368 thread_id_t tid
= pthread_self();
370 for (i
= 0; i
< NR_THREADS
; i
++) {
371 if (__thread_id_map
[i
] == tid
) {
372 long v
= i
+ 1; /* must be non-NULL. */
374 if (pthread_setspecific(thread_id_key
, (void *)v
) != 0) {
375 perror("pthread_setspecific");
381 spin_lock(&__thread_id_map_mutex
);
382 for (i
= 0; i
< NR_THREADS
; i
++) {
383 if (__thread_id_map
[i
] == tid
)
384 spin_unlock(&__thread_id_map_mutex
);
387 spin_unlock(&__thread_id_map_mutex
);
388 fprintf(stderr
, "smp_thread_id: Rogue thread, id: %d(%#x)\n",
393 static int smp_thread_id(void)
397 id
= pthread_getspecific(thread_id_key
);
399 return __smp_thread_id();
400 return (long)(id
- 1);
403 static thread_id_t
create_thread(void *(*func
)(void *), void *arg
)
408 spin_lock(&__thread_id_map_mutex
);
409 for (i
= 0; i
< NR_THREADS
; i
++) {
410 if (__thread_id_map
[i
] == __THREAD_ID_MAP_EMPTY
)
413 if (i
>= NR_THREADS
) {
414 spin_unlock(&__thread_id_map_mutex
);
415 fprintf(stderr
, "Thread limit of %d exceeded!\n", NR_THREADS
);
418 __thread_id_map
[i
] = __THREAD_ID_MAP_WAITING
;
419 spin_unlock(&__thread_id_map_mutex
);
420 if (pthread_create(&tid
, NULL
, func
, arg
) != 0) {
421 perror("create_thread:pthread_create");
424 __thread_id_map
[i
] = tid
;
428 static void *wait_thread(thread_id_t tid
)
433 for (i
= 0; i
< NR_THREADS
; i
++) {
434 if (__thread_id_map
[i
] == tid
)
437 if (i
>= NR_THREADS
){
438 fprintf(stderr
, "wait_thread: bad tid = %d(%#x)\n",
442 if (pthread_join(tid
, &vp
) != 0) {
443 perror("wait_thread:pthread_join");
446 __thread_id_map
[i
] = __THREAD_ID_MAP_EMPTY
;
450 static void wait_all_threads(void)
455 for (i
= 1; i
< NR_THREADS
; i
++) {
456 tid
= __thread_id_map
[i
];
457 if (tid
!= __THREAD_ID_MAP_EMPTY
&&
458 tid
!= __THREAD_ID_MAP_WAITING
)
459 (void)wait_thread(tid
);
463 static void run_on(int cpu
)
469 sched_setaffinity(0, sizeof(mask
), &mask
);
473 * timekeeping -- very crude -- should use MONOTONIC...
476 long long get_microseconds(void)
480 if (gettimeofday(&tv
, NULL
) != 0)
482 return ((long long)tv
.tv_sec
) * 1000000LL + (long long)tv
.tv_usec
;
486 * Per-thread variables.
489 #define DEFINE_PER_THREAD(type, name) \
492 __attribute__((__aligned__(CACHE_LINE_SIZE))); \
493 } __per_thread_##name[NR_THREADS];
494 #define DECLARE_PER_THREAD(type, name) extern DEFINE_PER_THREAD(type, name)
496 #define per_thread(name, thread) __per_thread_##name[thread].v
497 #define __get_thread_var(name) per_thread(name, smp_thread_id())
499 #define init_per_thread(name, v) \
502 for (__i_p_t_i = 0; __i_p_t_i < NR_THREADS; __i_p_t_i++) \
503 per_thread(name, __i_p_t_i) = v; \
507 * CPU traversal primitives.
512 #endif /* #ifndef NR_CPUS */
514 #define for_each_possible_cpu(cpu) \
515 for (cpu = 0; cpu < NR_CPUS; cpu++)
516 #define for_each_online_cpu(cpu) \
517 for (cpu = 0; cpu < NR_CPUS; cpu++)
523 #define DEFINE_PER_CPU(type, name) \
526 __attribute__((__aligned__(CACHE_LINE_SIZE))); \
527 } __per_cpu_##name[NR_CPUS]
528 #define DECLARE_PER_CPU(type, name) extern DEFINE_PER_CPU(type, name)
530 DEFINE_PER_THREAD(int, smp_processor_id
);
532 #define per_cpu(name, thread) __per_cpu_##name[thread].v
533 #define __get_cpu_var(name) per_cpu(name, smp_processor_id())
535 #define init_per_cpu(name, v) \
538 for (__i_p_c_i = 0; __i_p_c_i < NR_CPUS; __i_p_c_i++) \
539 per_cpu(name, __i_p_c_i) = v; \
543 * CPU state checking (crowbarred).
546 #define idle_cpu(cpu) 0
547 #define in_softirq() 1
548 #define hardirq_count() 0
549 #define PREEMPT_SHIFT 0
550 #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
551 #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
552 #define PREEMPT_BITS 8
553 #define SOFTIRQ_BITS 8
559 struct notifier_block
{
560 int (*notifier_call
)(struct notifier_block
*, unsigned long, void *);
561 struct notifier_block
*next
;
565 #define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
566 #define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
567 #define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */
568 #define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
569 #define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
570 #define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
571 #define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task,
572 * not handling interrupts, soon dead */
573 #define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
576 /* Used for CPU hotplug events occuring while tasks are frozen due to a suspend
577 * operation in progress
579 #define CPU_TASKS_FROZEN 0x0010
581 #define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN)
582 #define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN)
583 #define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
584 #define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
585 #define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
586 #define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
587 #define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
589 /* Hibernation and suspend events */
590 #define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */
591 #define PM_POST_HIBERNATION 0x0002 /* Hibernation finished */
592 #define PM_SUSPEND_PREPARE 0x0003 /* Going to suspend the system */
593 #define PM_POST_SUSPEND 0x0004 /* Suspend finished */
594 #define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */
595 #define PM_POST_RESTORE 0x0006 /* Restore failed */
597 #define NOTIFY_DONE 0x0000 /* Don't care */
598 #define NOTIFY_OK 0x0001 /* Suits me */
599 #define NOTIFY_STOP_MASK 0x8000 /* Don't call further */
600 #define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002)
601 /* Bad/Veto action */
603 * Clean way to return from the notifier and stop further calls.
605 #define NOTIFY_STOP (NOTIFY_OK|NOTIFY_STOP_MASK)
611 #define BUG_ON(c) do { if (!(c)) abort(); } while (0)
614 * Initialization -- Must be called before calling any primitives.
617 static void smp_init(void)
621 spin_lock_init(&__thread_id_map_mutex
);
622 __thread_id_map
[0] = pthread_self();
623 for (i
= 1; i
< NR_THREADS
; i
++)
624 __thread_id_map
[i
] = __THREAD_ID_MAP_EMPTY
;
625 init_per_thread(smp_processor_id
, 0);
626 if (pthread_key_create(&thread_id_key
, NULL
) != 0) {
627 perror("pthread_key_create");
632 /* Taken from the Linux kernel source tree, so GPLv2-only!!! */
634 #ifndef _LINUX_LIST_H
635 #define _LINUX_LIST_H
637 #define LIST_POISON1 ((void *) 0x00100100)
638 #define LIST_POISON2 ((void *) 0x00200200)
640 #define container_of(ptr, type, member) ({ \
641 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
642 (type *)( (char *)__mptr - offsetof(type,member) );})
645 * Simple doubly linked list implementation.
647 * Some of the internal functions ("__xxx") are useful when
648 * manipulating whole lists rather than single entries, as
649 * sometimes we already know the next/prev entries and we can
650 * generate better code by using them directly rather than
651 * using the generic single-entry routines.
655 struct list_head
*next
, *prev
;
658 #define LIST_HEAD_INIT(name) { &(name), &(name) }
660 #define LIST_HEAD(name) \
661 struct list_head name = LIST_HEAD_INIT(name)
663 static inline void INIT_LIST_HEAD(struct list_head
*list
)
670 * Insert a new entry between two known consecutive entries.
672 * This is only for internal list manipulation where we know
673 * the prev/next entries already!
675 #ifndef CONFIG_DEBUG_LIST
676 static inline void __list_add(struct list_head
*new,
677 struct list_head
*prev
,
678 struct list_head
*next
)
686 extern void __list_add(struct list_head
*new,
687 struct list_head
*prev
,
688 struct list_head
*next
);
692 * list_add - add a new entry
693 * @new: new entry to be added
694 * @head: list head to add it after
696 * Insert a new entry after the specified head.
697 * This is good for implementing stacks.
699 static inline void list_add(struct list_head
*new, struct list_head
*head
)
701 __list_add(new, head
, head
->next
);
706 * list_add_tail - add a new entry
707 * @new: new entry to be added
708 * @head: list head to add it before
710 * Insert a new entry before the specified head.
711 * This is useful for implementing queues.
713 static inline void list_add_tail(struct list_head
*new, struct list_head
*head
)
715 __list_add(new, head
->prev
, head
);
719 * Delete a list entry by making the prev/next entries
720 * point to each other.
722 * This is only for internal list manipulation where we know
723 * the prev/next entries already!
725 static inline void __list_del(struct list_head
* prev
, struct list_head
* next
)
732 * list_del - deletes entry from list.
733 * @entry: the element to delete from the list.
734 * Note: list_empty() on entry does not return true after this, the entry is
735 * in an undefined state.
737 #ifndef CONFIG_DEBUG_LIST
738 static inline void list_del(struct list_head
*entry
)
740 __list_del(entry
->prev
, entry
->next
);
741 entry
->next
= LIST_POISON1
;
742 entry
->prev
= LIST_POISON2
;
745 extern void list_del(struct list_head
*entry
);
749 * list_replace - replace old entry by new one
750 * @old : the element to be replaced
751 * @new : the new element to insert
753 * If @old was empty, it will be overwritten.
755 static inline void list_replace(struct list_head
*old
,
756 struct list_head
*new)
758 new->next
= old
->next
;
759 new->next
->prev
= new;
760 new->prev
= old
->prev
;
761 new->prev
->next
= new;
764 static inline void list_replace_init(struct list_head
*old
,
765 struct list_head
*new)
767 list_replace(old
, new);
772 * list_del_init - deletes entry from list and reinitialize it.
773 * @entry: the element to delete from the list.
775 static inline void list_del_init(struct list_head
*entry
)
777 __list_del(entry
->prev
, entry
->next
);
778 INIT_LIST_HEAD(entry
);
782 * list_move - delete from one list and add as another's head
783 * @list: the entry to move
784 * @head: the head that will precede our entry
786 static inline void list_move(struct list_head
*list
, struct list_head
*head
)
788 __list_del(list
->prev
, list
->next
);
789 list_add(list
, head
);
793 * list_move_tail - delete from one list and add as another's tail
794 * @list: the entry to move
795 * @head: the head that will follow our entry
797 static inline void list_move_tail(struct list_head
*list
,
798 struct list_head
*head
)
800 __list_del(list
->prev
, list
->next
);
801 list_add_tail(list
, head
);
805 * list_is_last - tests whether @list is the last entry in list @head
806 * @list: the entry to test
807 * @head: the head of the list
809 static inline int list_is_last(const struct list_head
*list
,
810 const struct list_head
*head
)
812 return list
->next
== head
;
816 * list_empty - tests whether a list is empty
817 * @head: the list to test.
819 static inline int list_empty(const struct list_head
*head
)
821 return head
->next
== head
;
825 * list_empty_careful - tests whether a list is empty and not being modified
826 * @head: the list to test
829 * tests whether a list is empty _and_ checks that no other CPU might be
830 * in the process of modifying either member (next or prev)
832 * NOTE: using list_empty_careful() without synchronization
833 * can only be safe if the only activity that can happen
834 * to the list entry is list_del_init(). Eg. it cannot be used
835 * if another CPU could re-list_add() it.
837 static inline int list_empty_careful(const struct list_head
*head
)
839 struct list_head
*next
= head
->next
;
840 return (next
== head
) && (next
== head
->prev
);
844 * list_is_singular - tests whether a list has just one entry.
845 * @head: the list to test.
847 static inline int list_is_singular(const struct list_head
*head
)
849 return !list_empty(head
) && (head
->next
== head
->prev
);
852 static inline void __list_cut_position(struct list_head
*list
,
853 struct list_head
*head
, struct list_head
*entry
)
855 struct list_head
*new_first
= entry
->next
;
856 list
->next
= head
->next
;
857 list
->next
->prev
= list
;
860 head
->next
= new_first
;
861 new_first
->prev
= head
;
865 * list_cut_position - cut a list into two
866 * @list: a new list to add all removed entries
867 * @head: a list with entries
868 * @entry: an entry within head, could be the head itself
869 * and if so we won't cut the list
871 * This helper moves the initial part of @head, up to and
872 * including @entry, from @head to @list. You should
873 * pass on @entry an element you know is on @head. @list
874 * should be an empty list or a list you do not care about
878 static inline void list_cut_position(struct list_head
*list
,
879 struct list_head
*head
, struct list_head
*entry
)
881 if (list_empty(head
))
883 if (list_is_singular(head
) &&
884 (head
->next
!= entry
&& head
!= entry
))
887 INIT_LIST_HEAD(list
);
889 __list_cut_position(list
, head
, entry
);
892 static inline void __list_splice(const struct list_head
*list
,
893 struct list_head
*prev
,
894 struct list_head
*next
)
896 struct list_head
*first
= list
->next
;
897 struct list_head
*last
= list
->prev
;
907 * list_splice - join two lists, this is designed for stacks
908 * @list: the new list to add.
909 * @head: the place to add it in the first list.
911 static inline void list_splice(const struct list_head
*list
,
912 struct list_head
*head
)
914 if (!list_empty(list
))
915 __list_splice(list
, head
, head
->next
);
919 * list_splice_tail - join two lists, each list being a queue
920 * @list: the new list to add.
921 * @head: the place to add it in the first list.
923 static inline void list_splice_tail(struct list_head
*list
,
924 struct list_head
*head
)
926 if (!list_empty(list
))
927 __list_splice(list
, head
->prev
, head
);
931 * list_splice_init - join two lists and reinitialise the emptied list.
932 * @list: the new list to add.
933 * @head: the place to add it in the first list.
935 * The list at @list is reinitialised
937 static inline void list_splice_init(struct list_head
*list
,
938 struct list_head
*head
)
940 if (!list_empty(list
)) {
941 __list_splice(list
, head
, head
->next
);
942 INIT_LIST_HEAD(list
);
947 * list_splice_tail_init - join two lists and reinitialise the emptied list
948 * @list: the new list to add.
949 * @head: the place to add it in the first list.
951 * Each of the lists is a queue.
952 * The list at @list is reinitialised
954 static inline void list_splice_tail_init(struct list_head
*list
,
955 struct list_head
*head
)
957 if (!list_empty(list
)) {
958 __list_splice(list
, head
->prev
, head
);
959 INIT_LIST_HEAD(list
);
964 * list_entry - get the struct for this entry
965 * @ptr: the &struct list_head pointer.
966 * @type: the type of the struct this is embedded in.
967 * @member: the name of the list_struct within the struct.
969 #define list_entry(ptr, type, member) \
970 container_of(ptr, type, member)
973 * list_first_entry - get the first element from a list
974 * @ptr: the list head to take the element from.
975 * @type: the type of the struct this is embedded in.
976 * @member: the name of the list_struct within the struct.
978 * Note, that list is expected to be not empty.
980 #define list_first_entry(ptr, type, member) \
981 list_entry((ptr)->next, type, member)
984 * list_for_each - iterate over a list
985 * @pos: the &struct list_head to use as a loop cursor.
986 * @head: the head for your list.
988 #define list_for_each(pos, head) \
989 for (pos = (head)->next; prefetch(pos->next), pos != (head); \
993 * __list_for_each - iterate over a list
994 * @pos: the &struct list_head to use as a loop cursor.
995 * @head: the head for your list.
997 * This variant differs from list_for_each() in that it's the
998 * simplest possible list iteration code, no prefetching is done.
999 * Use this for code that knows the list to be very short (empty
1000 * or 1 entry) most of the time.
1002 #define __list_for_each(pos, head) \
1003 for (pos = (head)->next; pos != (head); pos = pos->next)
1006 * list_for_each_prev - iterate over a list backwards
1007 * @pos: the &struct list_head to use as a loop cursor.
1008 * @head: the head for your list.
1010 #define list_for_each_prev(pos, head) \
1011 for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \
1015 * list_for_each_safe - iterate over a list safe against removal of list entry
1016 * @pos: the &struct list_head to use as a loop cursor.
1017 * @n: another &struct list_head to use as temporary storage
1018 * @head: the head for your list.
1020 #define list_for_each_safe(pos, n, head) \
1021 for (pos = (head)->next, n = pos->next; pos != (head); \
1022 pos = n, n = pos->next)
1025 * list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
1026 * @pos: the &struct list_head to use as a loop cursor.
1027 * @n: another &struct list_head to use as temporary storage
1028 * @head: the head for your list.
1030 #define list_for_each_prev_safe(pos, n, head) \
1031 for (pos = (head)->prev, n = pos->prev; \
1032 prefetch(pos->prev), pos != (head); \
1033 pos = n, n = pos->prev)
1036 * list_for_each_entry - iterate over list of given type
1037 * @pos: the type * to use as a loop cursor.
1038 * @head: the head for your list.
1039 * @member: the name of the list_struct within the struct.
1041 #define list_for_each_entry(pos, head, member) \
1042 for (pos = list_entry((head)->next, typeof(*pos), member); \
1043 prefetch(pos->member.next), &pos->member != (head); \
1044 pos = list_entry(pos->member.next, typeof(*pos), member))
1047 * list_for_each_entry_reverse - iterate backwards over list of given type.
1048 * @pos: the type * to use as a loop cursor.
1049 * @head: the head for your list.
1050 * @member: the name of the list_struct within the struct.
1052 #define list_for_each_entry_reverse(pos, head, member) \
1053 for (pos = list_entry((head)->prev, typeof(*pos), member); \
1054 prefetch(pos->member.prev), &pos->member != (head); \
1055 pos = list_entry(pos->member.prev, typeof(*pos), member))
1058 * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
1059 * @pos: the type * to use as a start point
1060 * @head: the head of the list
1061 * @member: the name of the list_struct within the struct.
1063 * Prepares a pos entry for use as a start point in list_for_each_entry_continue().
1065 #define list_prepare_entry(pos, head, member) \
1066 ((pos) ? : list_entry(head, typeof(*pos), member))
1069 * list_for_each_entry_continue - continue iteration over list of given type
1070 * @pos: the type * to use as a loop cursor.
1071 * @head: the head for your list.
1072 * @member: the name of the list_struct within the struct.
1074 * Continue to iterate over list of given type, continuing after
1075 * the current position.
1077 #define list_for_each_entry_continue(pos, head, member) \
1078 for (pos = list_entry(pos->member.next, typeof(*pos), member); \
1079 prefetch(pos->member.next), &pos->member != (head); \
1080 pos = list_entry(pos->member.next, typeof(*pos), member))
1083 * list_for_each_entry_continue_reverse - iterate backwards from the given point
1084 * @pos: the type * to use as a loop cursor.
1085 * @head: the head for your list.
1086 * @member: the name of the list_struct within the struct.
1088 * Start to iterate over list of given type backwards, continuing after
1089 * the current position.
1091 #define list_for_each_entry_continue_reverse(pos, head, member) \
1092 for (pos = list_entry(pos->member.prev, typeof(*pos), member); \
1093 prefetch(pos->member.prev), &pos->member != (head); \
1094 pos = list_entry(pos->member.prev, typeof(*pos), member))
1097 * list_for_each_entry_from - iterate over list of given type from the current point
1098 * @pos: the type * to use as a loop cursor.
1099 * @head: the head for your list.
1100 * @member: the name of the list_struct within the struct.
1102 * Iterate over list of given type, continuing from current position.
1104 #define list_for_each_entry_from(pos, head, member) \
1105 for (; prefetch(pos->member.next), &pos->member != (head); \
1106 pos = list_entry(pos->member.next, typeof(*pos), member))
1109 * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
1110 * @pos: the type * to use as a loop cursor.
1111 * @n: another type * to use as temporary storage
1112 * @head: the head for your list.
1113 * @member: the name of the list_struct within the struct.
1115 #define list_for_each_entry_safe(pos, n, head, member) \
1116 for (pos = list_entry((head)->next, typeof(*pos), member), \
1117 n = list_entry(pos->member.next, typeof(*pos), member); \
1118 &pos->member != (head); \
1119 pos = n, n = list_entry(n->member.next, typeof(*n), member))
1122 * list_for_each_entry_safe_continue
1123 * @pos: the type * to use as a loop cursor.
1124 * @n: another type * to use as temporary storage
1125 * @head: the head for your list.
1126 * @member: the name of the list_struct within the struct.
1128 * Iterate over list of given type, continuing after current point,
1129 * safe against removal of list entry.
1131 #define list_for_each_entry_safe_continue(pos, n, head, member) \
1132 for (pos = list_entry(pos->member.next, typeof(*pos), member), \
1133 n = list_entry(pos->member.next, typeof(*pos), member); \
1134 &pos->member != (head); \
1135 pos = n, n = list_entry(n->member.next, typeof(*n), member))
1138 * list_for_each_entry_safe_from
1139 * @pos: the type * to use as a loop cursor.
1140 * @n: another type * to use as temporary storage
1141 * @head: the head for your list.
1142 * @member: the name of the list_struct within the struct.
1144 * Iterate over list of given type from current point, safe against
1145 * removal of list entry.
1147 #define list_for_each_entry_safe_from(pos, n, head, member) \
1148 for (n = list_entry(pos->member.next, typeof(*pos), member); \
1149 &pos->member != (head); \
1150 pos = n, n = list_entry(n->member.next, typeof(*n), member))
1153 * list_for_each_entry_safe_reverse
1154 * @pos: the type * to use as a loop cursor.
1155 * @n: another type * to use as temporary storage
1156 * @head: the head for your list.
1157 * @member: the name of the list_struct within the struct.
1159 * Iterate backwards over list of given type, safe against removal
1162 #define list_for_each_entry_safe_reverse(pos, n, head, member) \
1163 for (pos = list_entry((head)->prev, typeof(*pos), member), \
1164 n = list_entry(pos->member.prev, typeof(*pos), member); \
1165 &pos->member != (head); \
1166 pos = n, n = list_entry(n->member.prev, typeof(*n), member))
1169 * Double linked lists with a single pointer list head.
1170 * Mostly useful for hash tables where the two pointer list head is
1172 * You lose the ability to access the tail in O(1).
1176 struct hlist_node
*first
;
1180 struct hlist_node
*next
, **pprev
;
1183 #define HLIST_HEAD_INIT { .first = NULL }
1184 #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
1185 #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
1186 static inline void INIT_HLIST_NODE(struct hlist_node
*h
)
1192 static inline int hlist_unhashed(const struct hlist_node
*h
)
1197 static inline int hlist_empty(const struct hlist_head
*h
)
1202 static inline void __hlist_del(struct hlist_node
*n
)
1204 struct hlist_node
*next
= n
->next
;
1205 struct hlist_node
**pprev
= n
->pprev
;
1208 next
->pprev
= pprev
;
1211 static inline void hlist_del(struct hlist_node
*n
)
1214 n
->next
= LIST_POISON1
;
1215 n
->pprev
= LIST_POISON2
;
1218 static inline void hlist_del_init(struct hlist_node
*n
)
1220 if (!hlist_unhashed(n
)) {
1226 static inline void hlist_add_head(struct hlist_node
*n
, struct hlist_head
*h
)
1228 struct hlist_node
*first
= h
->first
;
1231 first
->pprev
= &n
->next
;
1233 n
->pprev
= &h
->first
;
1236 /* next must be != NULL */
1237 static inline void hlist_add_before(struct hlist_node
*n
,
1238 struct hlist_node
*next
)
1240 n
->pprev
= next
->pprev
;
1242 next
->pprev
= &n
->next
;
1246 static inline void hlist_add_after(struct hlist_node
*n
,
1247 struct hlist_node
*next
)
1249 next
->next
= n
->next
;
1251 next
->pprev
= &n
->next
;
1254 next
->next
->pprev
= &next
->next
;
1258 * Move a list from one list head to another. Fixup the pprev
1259 * reference of the first entry if it exists.
1261 static inline void hlist_move_list(struct hlist_head
*old
,
1262 struct hlist_head
*new)
1264 new->first
= old
->first
;
1266 new->first
->pprev
= &new->first
;
1270 #define hlist_entry(ptr, type, member) container_of(ptr,type,member)
1272 #define hlist_for_each(pos, head) \
1273 for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
1276 #define hlist_for_each_safe(pos, n, head) \
1277 for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
1281 * hlist_for_each_entry - iterate over list of given type
1282 * @tpos: the type * to use as a loop cursor.
1283 * @pos: the &struct hlist_node to use as a loop cursor.
1284 * @head: the head for your list.
1285 * @member: the name of the hlist_node within the struct.
1287 #define hlist_for_each_entry(tpos, pos, head, member) \
1288 for (pos = (head)->first; \
1289 pos && ({ prefetch(pos->next); 1;}) && \
1290 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
1294 * hlist_for_each_entry_continue - iterate over a hlist continuing after current point
1295 * @tpos: the type * to use as a loop cursor.
1296 * @pos: the &struct hlist_node to use as a loop cursor.
1297 * @member: the name of the hlist_node within the struct.
1299 #define hlist_for_each_entry_continue(tpos, pos, member) \
1300 for (pos = (pos)->next; \
1301 pos && ({ prefetch(pos->next); 1;}) && \
1302 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
1306 * hlist_for_each_entry_from - iterate over a hlist continuing from current point
1307 * @tpos: the type * to use as a loop cursor.
1308 * @pos: the &struct hlist_node to use as a loop cursor.
1309 * @member: the name of the hlist_node within the struct.
1311 #define hlist_for_each_entry_from(tpos, pos, member) \
1312 for (; pos && ({ prefetch(pos->next); 1;}) && \
1313 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
1317 * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
1318 * @tpos: the type * to use as a loop cursor.
1319 * @pos: the &struct hlist_node to use as a loop cursor.
1320 * @n: another &struct hlist_node to use as temporary storage
1321 * @head: the head for your list.
1322 * @member: the name of the hlist_node within the struct.
1324 #define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
1325 for (pos = (head)->first; \
1326 pos && ({ n = pos->next; 1; }) && \
1327 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \