2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
7 #include <asm/atomic.h>
8 #include <asm/hw_irq.h>
12 * The sync instruction guarantees that all memory accesses initiated
13 * by this processor have been performed (with respect to all other
14 * mechanisms that access memory). The eieio instruction is a barrier
15 * providing an ordering (separately) for (a) cacheable stores and (b)
16 * loads and stores to non-cacheable memory (e.g. I/O devices).
18 * mb() prevents loads and stores being reordered across this point.
19 * rmb() prevents loads being reordered across this point.
20 * wmb() prevents stores being reordered across this point.
21 * read_barrier_depends() prevents data-dependent loads being reordered
22 * across this point (nop on PPC).
24 * We can use the eieio instruction for wmb, but since it doesn't
25 * give any ordering guarantees about loads, we have to use the
26 * stronger but slower sync instruction for mb and rmb.
28 #define mb() __asm__ __volatile__ ("sync" : : : "memory")
29 #define rmb() __asm__ __volatile__ ("sync" : : : "memory")
30 #define wmb() __asm__ __volatile__ ("eieio" : : : "memory")
31 #define read_barrier_depends() do { } while(0)
33 #define set_mb(var, value) do { var = value; mb(); } while (0)
34 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
38 #define smp_rmb() rmb()
39 #define smp_wmb() wmb()
40 #define smp_read_barrier_depends() read_barrier_depends()
42 #define smp_mb() barrier()
43 #define smp_rmb() barrier()
44 #define smp_wmb() barrier()
45 #define smp_read_barrier_depends() do { } while(0)
46 #endif /* CONFIG_SMP */
48 static inline unsigned long
49 xchg_u32(volatile void *p
, unsigned long val
)
53 __asm__
__volatile__ ("\n\
58 : "=&r" (prev
), "=m" (*(volatile unsigned long *)p
)
59 : "r" (p
), "r" (val
), "m" (*(volatile unsigned long *)p
)
66 * This function doesn't exist, so you'll get a linker error
67 * if something tries to do an invalid xchg().
69 extern void __xchg_called_with_bad_pointer(void);
71 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
72 #define tas(ptr) (xchg((ptr),1))
74 static inline unsigned long __xchg(unsigned long x
, volatile void *ptr
, int size
)
78 return (unsigned long) xchg_u32(ptr
, x
);
79 #if 0 /* xchg_u64 doesn't exist on 32-bit PPC */
81 return (unsigned long) xchg_u64(ptr
, x
);
84 __xchg_called_with_bad_pointer();
90 extern inline void * xchg_ptr(void * m
, void * val
)
92 return (void *) xchg_u32(m
, (unsigned long) val
);
96 #define __HAVE_ARCH_CMPXCHG 1
98 static inline unsigned long
99 __cmpxchg_u32(volatile unsigned int *p
, unsigned int old
, unsigned int new)
103 __asm__
__volatile__ ("\n\
110 #if 0 //only using one CPU at a time (LTT) // def CONFIG_SMP
112 #endif /* CONFIG_SMP */
114 : "=&r" (prev
), "=m" (*p
)
115 : "r" (p
), "r" (old
), "r" (new), "m" (*p
)
121 /* This function doesn't exist, so you'll get a linker error
122 if something tries to do an invalid cmpxchg(). */
123 extern void __cmpxchg_called_with_bad_pointer(void);
125 static inline unsigned long
126 __cmpxchg(volatile void *ptr
, unsigned long old
, unsigned long new, int size
)
130 return __cmpxchg_u32(ptr
, old
, new);
131 #if 0 /* we don't have __cmpxchg_u64 on 32-bit PPC */
133 return __cmpxchg_u64(ptr
, old
, new);
136 __cmpxchg_called_with_bad_pointer();
140 #define cmpxchg(ptr,o,n) \
142 __typeof__(*(ptr)) _o_ = (o); \
143 __typeof__(*(ptr)) _n_ = (n); \
144 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
145 (unsigned long)_n_, sizeof(*(ptr))); \
148 #define arch_align_stack(x) (x)
150 #endif /* __PPC_SYSTEM_H */
This page took 0.035347 seconds and 4 git commands to generate.