1 #ifndef __PPC64_SYSTEM_H
2 #define __PPC64_SYSTEM_H
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
15 //#include <linux/config.h>
16 //#include <linux/compiler.h>
18 #include <asm/processor.h>
19 #include <asm/hw_irq.h>
20 #include <asm/memory.h>
24 * The sync instruction guarantees that all memory accesses initiated
25 * by this processor have been performed (with respect to all other
26 * mechanisms that access memory). The eieio instruction is a barrier
27 * providing an ordering (separately) for (a) cacheable stores and (b)
28 * loads and stores to non-cacheable memory (e.g. I/O devices).
30 * mb() prevents loads and stores being reordered across this point.
31 * rmb() prevents loads being reordered across this point.
32 * wmb() prevents stores being reordered across this point.
33 * read_barrier_depends() prevents data-dependent loads being reordered
34 * across this point (nop on PPC).
36 * We have to use the sync instructions for mb(), since lwsync doesn't
37 * order loads with respect to previous stores. Lwsync is fine for
39 * For wmb(), we use sync since wmb is used in drivers to order
40 * stores to system memory with respect to writes to the device.
41 * However, smp_wmb() can be a lighter-weight eieio barrier on
42 * SMP since it is only used to order updates to system memory.
44 #define mb() __asm__ __volatile__ ("sync" : : : "memory")
45 #define rmb() __asm__ __volatile__ ("lwsync" : : : "memory")
46 #define wmb() __asm__ __volatile__ ("sync" : : : "memory")
47 #define read_barrier_depends() do { } while(0)
49 #define set_mb(var, value) do { var = value; smp_mb(); } while (0)
50 #define set_wmb(var, value) do { var = value; smp_wmb(); } while (0)
54 #define smp_rmb() rmb()
55 #define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory")
56 #define smp_read_barrier_depends() read_barrier_depends()
58 #define smp_mb() __asm__ __volatile__("": : :"memory")
59 #define smp_rmb() __asm__ __volatile__("": : :"memory")
60 #define smp_wmb() __asm__ __volatile__("": : :"memory")
61 #define smp_read_barrier_depends() do { } while(0)
62 #endif /* CONFIG_SMP */
67 * Changes the memory location '*ptr' to be val and returns
68 * the previous value stored there.
70 * Inline asm pulled from arch/ppc/kernel/misc.S so ppc64
71 * is more like most of the other architectures.
73 static inline unsigned long
74 __xchg_u32(volatile int *m
, unsigned long val
)
80 "1: lwarx %0,0,%3 # __xchg_u32\n\
84 : "=&r" (dummy
), "=m" (*m
)
91 static inline unsigned long
92 __xchg_u64(volatile long *m
, unsigned long val
)
98 "1: ldarx %0,0,%3 # __xchg_u64\n\
102 : "=&r" (dummy
), "=m" (*m
)
110 * This function doesn't exist, so you'll get a linker error
111 * if something tries to do an invalid xchg().
113 extern void __xchg_called_with_bad_pointer(void);
115 static inline unsigned long
116 __xchg(volatile void *ptr
, unsigned long x
, int size
)
120 return __xchg_u32(ptr
, x
);
122 return __xchg_u64(ptr
, x
);
124 __xchg_called_with_bad_pointer();
128 #define xchg(ptr,x) \
130 __typeof__(*(ptr)) _x_ = (x); \
131 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
134 #define tas(ptr) (xchg((ptr),1))
136 #define __HAVE_ARCH_CMPXCHG 1
138 static inline unsigned long
139 __cmpxchg_u32(volatile int *p
, int old
, int new)
143 __asm__
__volatile__ (
145 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
153 : "=&r" (prev
), "=m" (*p
)
154 : "r" (p
), "r" (old
), "r" (new), "m" (*p
)
160 static inline unsigned long
161 __cmpxchg_u64(volatile long *p
, unsigned long old
, unsigned long new)
165 __asm__
__volatile__ (
167 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
175 : "=&r" (prev
), "=m" (*p
)
176 : "r" (p
), "r" (old
), "r" (new), "m" (*p
)
182 /* This function doesn't exist, so you'll get a linker error
183 if something tries to do an invalid cmpxchg(). */
184 extern void __cmpxchg_called_with_bad_pointer(void);
186 static inline unsigned long
187 __cmpxchg(volatile void *ptr
, unsigned long old
, unsigned long new, int size
)
191 return __cmpxchg_u32(ptr
, old
, new);
193 return __cmpxchg_u64(ptr
, old
, new);
195 __cmpxchg_called_with_bad_pointer();
199 #define cmpxchg(ptr,o,n) \
201 __typeof__(*(ptr)) _o_ = (o); \
202 __typeof__(*(ptr)) _n_ = (n); \
203 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
204 (unsigned long)_n_, sizeof(*(ptr))); \
208 * We handle most unaligned accesses in hardware. On the other hand
209 * unaligned DMA can be very expensive on some ppc64 IO chips (it does
210 * powers of 2 writes until it reaches sufficient alignment).
212 * Based on this we disable the IP header alignment in network drivers.
214 #define NET_IP_ALIGN 0
216 #define arch_align_stack(x) (x)
219 } /* end of extern "C" */
This page took 0.044674 seconds and 4 git commands to generate.