2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
7 #include <asm/atomic.h>
8 #include <asm/hw_irq.h>
16 * The sync instruction guarantees that all memory accesses initiated
17 * by this processor have been performed (with respect to all other
18 * mechanisms that access memory). The eieio instruction is a barrier
19 * providing an ordering (separately) for (a) cacheable stores and (b)
20 * loads and stores to non-cacheable memory (e.g. I/O devices).
22 * mb() prevents loads and stores being reordered across this point.
23 * rmb() prevents loads being reordered across this point.
24 * wmb() prevents stores being reordered across this point.
25 * read_barrier_depends() prevents data-dependent loads being reordered
26 * across this point (nop on PPC).
28 * We can use the eieio instruction for wmb, but since it doesn't
29 * give any ordering guarantees about loads, we have to use the
30 * stronger but slower sync instruction for mb and rmb.
32 #define mb() __asm__ __volatile__ ("sync" : : : "memory")
33 #define rmb() __asm__ __volatile__ ("sync" : : : "memory")
34 #define wmb() __asm__ __volatile__ ("eieio" : : : "memory")
35 #define read_barrier_depends() do { } while(0)
37 #define set_mb(var, value) do { var = value; mb(); } while (0)
38 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
42 #define smp_rmb() rmb()
43 #define smp_wmb() wmb()
44 #define smp_read_barrier_depends() read_barrier_depends()
46 #define smp_mb() barrier()
47 #define smp_rmb() barrier()
48 #define smp_wmb() barrier()
49 #define smp_read_barrier_depends() do { } while(0)
50 #endif /* CONFIG_SMP */
52 static inline unsigned long
53 xchg_u32(volatile void *p
, unsigned long val
)
57 __asm__
__volatile__ ("\n\
62 : "=&r" (prev
), "=m" (*(volatile unsigned long *)p
)
63 : "r" (p
), "r" (val
), "m" (*(volatile unsigned long *)p
)
70 * This function doesn't exist, so you'll get a linker error
71 * if something tries to do an invalid xchg().
73 extern void __xchg_called_with_bad_pointer(void);
75 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
76 #define tas(ptr) (xchg((ptr),1))
78 static inline unsigned long __xchg(unsigned long x
, volatile void *ptr
, int size
)
82 return (unsigned long) xchg_u32(ptr
, x
);
83 #if 0 /* xchg_u64 doesn't exist on 32-bit PPC */
85 return (unsigned long) xchg_u64(ptr
, x
);
88 __xchg_called_with_bad_pointer();
94 extern inline void * xchg_ptr(void * m
, void * val
)
96 return (void *) xchg_u32(m
, (unsigned long) val
);
100 #define __HAVE_ARCH_CMPXCHG 1
102 static inline unsigned long
103 __cmpxchg_u32(volatile unsigned int *p
, unsigned int old
, unsigned int new)
107 __asm__
__volatile__ ("\n\
114 #if 0 //only using one CPU at a time (LTT) // def CONFIG_SMP
116 #endif /* CONFIG_SMP */
118 : "=&r" (prev
), "=m" (*p
)
119 : "r" (p
), "r" (old
), "r" (new), "m" (*p
)
125 /* This function doesn't exist, so you'll get a linker error
126 if something tries to do an invalid cmpxchg(). */
127 extern void __cmpxchg_called_with_bad_pointer(void);
129 static inline unsigned long
130 __cmpxchg(volatile void *ptr
, unsigned long old
, unsigned long new, int size
)
134 return __cmpxchg_u32(ptr
, old
, new);
135 #if 0 /* we don't have __cmpxchg_u64 on 32-bit PPC */
137 return __cmpxchg_u64(ptr
, old
, new);
140 __cmpxchg_called_with_bad_pointer();
144 #define cmpxchg(ptr,o,n) \
146 __typeof__(*(ptr)) _o_ = (o); \
147 __typeof__(*(ptr)) _n_ = (n); \
148 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
149 (unsigned long)_n_, sizeof(*(ptr))); \
152 #define arch_align_stack(x) (x)
155 } /* end of extern "C" */
158 #endif /* __PPC_SYSTEM_H */
This page took 0.036213 seconds and 4 git commands to generate.