1 #ifndef __PPC64_SYSTEM_H
2 #define __PPC64_SYSTEM_H
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
11 //#include <linux/config.h>
12 //#include <linux/compiler.h>
14 #include <asm/processor.h>
15 #include <asm/hw_irq.h>
16 #include <asm/memory.h>
20 * The sync instruction guarantees that all memory accesses initiated
21 * by this processor have been performed (with respect to all other
22 * mechanisms that access memory). The eieio instruction is a barrier
23 * providing an ordering (separately) for (a) cacheable stores and (b)
24 * loads and stores to non-cacheable memory (e.g. I/O devices).
26 * mb() prevents loads and stores being reordered across this point.
27 * rmb() prevents loads being reordered across this point.
28 * wmb() prevents stores being reordered across this point.
29 * read_barrier_depends() prevents data-dependent loads being reordered
30 * across this point (nop on PPC).
32 * We have to use the sync instructions for mb(), since lwsync doesn't
33 * order loads with respect to previous stores. Lwsync is fine for
35 * For wmb(), we use sync since wmb is used in drivers to order
36 * stores to system memory with respect to writes to the device.
37 * However, smp_wmb() can be a lighter-weight eieio barrier on
38 * SMP since it is only used to order updates to system memory.
40 #define mb() __asm__ __volatile__ ("sync" : : : "memory")
41 #define rmb() __asm__ __volatile__ ("lwsync" : : : "memory")
42 #define wmb() __asm__ __volatile__ ("sync" : : : "memory")
43 #define read_barrier_depends() do { } while(0)
45 #define set_mb(var, value) do { var = value; smp_mb(); } while (0)
46 #define set_wmb(var, value) do { var = value; smp_wmb(); } while (0)
50 #define smp_rmb() rmb()
51 #define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory")
52 #define smp_read_barrier_depends() read_barrier_depends()
54 #define smp_mb() __asm__ __volatile__("": : :"memory")
55 #define smp_rmb() __asm__ __volatile__("": : :"memory")
56 #define smp_wmb() __asm__ __volatile__("": : :"memory")
57 #define smp_read_barrier_depends() do { } while(0)
58 #endif /* CONFIG_SMP */
63 * Changes the memory location '*ptr' to be val and returns
64 * the previous value stored there.
66 * Inline asm pulled from arch/ppc/kernel/misc.S so ppc64
67 * is more like most of the other architectures.
69 static inline unsigned long
70 __xchg_u32(volatile int *m
, unsigned long val
)
76 "1: lwarx %0,0,%3 # __xchg_u32\n\
80 : "=&r" (dummy
), "=m" (*m
)
87 static inline unsigned long
88 __xchg_u64(volatile long *m
, unsigned long val
)
94 "1: ldarx %0,0,%3 # __xchg_u64\n\
98 : "=&r" (dummy
), "=m" (*m
)
106 * This function doesn't exist, so you'll get a linker error
107 * if something tries to do an invalid xchg().
109 extern void __xchg_called_with_bad_pointer(void);
111 static inline unsigned long
112 __xchg(volatile void *ptr
, unsigned long x
, int size
)
116 return __xchg_u32(ptr
, x
);
118 return __xchg_u64(ptr
, x
);
120 __xchg_called_with_bad_pointer();
124 #define xchg(ptr,x) \
126 __typeof__(*(ptr)) _x_ = (x); \
127 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
130 #define tas(ptr) (xchg((ptr),1))
132 #define __HAVE_ARCH_CMPXCHG 1
134 static inline unsigned long
135 __cmpxchg_u32(volatile int *p
, int old
, int new)
139 __asm__
__volatile__ (
141 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
149 : "=&r" (prev
), "=m" (*p
)
150 : "r" (p
), "r" (old
), "r" (new), "m" (*p
)
156 static inline unsigned long
157 __cmpxchg_u64(volatile long *p
, unsigned long old
, unsigned long new)
161 __asm__
__volatile__ (
163 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
171 : "=&r" (prev
), "=m" (*p
)
172 : "r" (p
), "r" (old
), "r" (new), "m" (*p
)
178 /* This function doesn't exist, so you'll get a linker error
179 if something tries to do an invalid cmpxchg(). */
180 extern void __cmpxchg_called_with_bad_pointer(void);
182 static inline unsigned long
183 __cmpxchg(volatile void *ptr
, unsigned long old
, unsigned long new, int size
)
187 return __cmpxchg_u32(ptr
, old
, new);
189 return __cmpxchg_u64(ptr
, old
, new);
191 __cmpxchg_called_with_bad_pointer();
195 #define cmpxchg(ptr,o,n) \
197 __typeof__(*(ptr)) _o_ = (o); \
198 __typeof__(*(ptr)) _n_ = (n); \
199 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
200 (unsigned long)_n_, sizeof(*(ptr))); \
204 * We handle most unaligned accesses in hardware. On the other hand
205 * unaligned DMA can be very expensive on some ppc64 IO chips (it does
206 * powers of 2 writes until it reaches sufficient alignment).
208 * Based on this we disable the IP header alignment in network drivers.
210 #define NET_IP_ALIGN 0
212 #define arch_align_stack(x) (x)
This page took 0.062286 seconds and 4 git commands to generate.