8e66edd7 |
1 | #ifndef __PPC64_SYSTEM_H |
2 | #define __PPC64_SYSTEM_H |
3 | |
4 | /* |
5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU General Public License |
7 | * as published by the Free Software Foundation; either version |
8 | * 2 of the License, or (at your option) any later version. |
9 | */ |
10 | |
60009e26 |
11 | //#include <linux/config.h> |
12 | //#include <linux/compiler.h> |
8e66edd7 |
13 | #include <asm/page.h> |
14 | #include <asm/processor.h> |
15 | #include <asm/hw_irq.h> |
16 | #include <asm/memory.h> |
17 | |
18 | /* |
19 | * Memory barrier. |
20 | * The sync instruction guarantees that all memory accesses initiated |
21 | * by this processor have been performed (with respect to all other |
22 | * mechanisms that access memory). The eieio instruction is a barrier |
23 | * providing an ordering (separately) for (a) cacheable stores and (b) |
24 | * loads and stores to non-cacheable memory (e.g. I/O devices). |
25 | * |
26 | * mb() prevents loads and stores being reordered across this point. |
27 | * rmb() prevents loads being reordered across this point. |
28 | * wmb() prevents stores being reordered across this point. |
29 | * read_barrier_depends() prevents data-dependent loads being reordered |
30 | * across this point (nop on PPC). |
31 | * |
32 | * We have to use the sync instructions for mb(), since lwsync doesn't |
33 | * order loads with respect to previous stores. Lwsync is fine for |
34 | * rmb(), though. |
35 | * For wmb(), we use sync since wmb is used in drivers to order |
36 | * stores to system memory with respect to writes to the device. |
37 | * However, smp_wmb() can be a lighter-weight eieio barrier on |
38 | * SMP since it is only used to order updates to system memory. |
39 | */ |
40 | #define mb() __asm__ __volatile__ ("sync" : : : "memory") |
41 | #define rmb() __asm__ __volatile__ ("lwsync" : : : "memory") |
42 | #define wmb() __asm__ __volatile__ ("sync" : : : "memory") |
43 | #define read_barrier_depends() do { } while(0) |
44 | |
45 | #define set_mb(var, value) do { var = value; smp_mb(); } while (0) |
46 | #define set_wmb(var, value) do { var = value; smp_wmb(); } while (0) |
47 | |
48 | #ifdef CONFIG_SMP |
49 | #define smp_mb() mb() |
50 | #define smp_rmb() rmb() |
51 | #define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory") |
52 | #define smp_read_barrier_depends() read_barrier_depends() |
53 | #else |
54 | #define smp_mb() __asm__ __volatile__("": : :"memory") |
55 | #define smp_rmb() __asm__ __volatile__("": : :"memory") |
56 | #define smp_wmb() __asm__ __volatile__("": : :"memory") |
57 | #define smp_read_barrier_depends() do { } while(0) |
58 | #endif /* CONFIG_SMP */ |
59 | |
8e66edd7 |
60 | /* |
61 | * Atomic exchange |
62 | * |
63 | * Changes the memory location '*ptr' to be val and returns |
64 | * the previous value stored there. |
65 | * |
66 | * Inline asm pulled from arch/ppc/kernel/misc.S so ppc64 |
67 | * is more like most of the other architectures. |
68 | */ |
69 | static __inline__ unsigned long |
70 | __xchg_u32(volatile int *m, unsigned long val) |
71 | { |
72 | unsigned long dummy; |
73 | |
74 | __asm__ __volatile__( |
75 | EIEIO_ON_SMP |
76 | "1: lwarx %0,0,%3 # __xchg_u32\n\ |
77 | stwcx. %2,0,%3\n\ |
78 | 2: bne- 1b" |
79 | ISYNC_ON_SMP |
80 | : "=&r" (dummy), "=m" (*m) |
81 | : "r" (val), "r" (m) |
82 | : "cc", "memory"); |
83 | |
84 | return (dummy); |
85 | } |
86 | |
87 | static __inline__ unsigned long |
88 | __xchg_u64(volatile long *m, unsigned long val) |
89 | { |
90 | unsigned long dummy; |
91 | |
92 | __asm__ __volatile__( |
93 | EIEIO_ON_SMP |
94 | "1: ldarx %0,0,%3 # __xchg_u64\n\ |
95 | stdcx. %2,0,%3\n\ |
96 | 2: bne- 1b" |
97 | ISYNC_ON_SMP |
98 | : "=&r" (dummy), "=m" (*m) |
99 | : "r" (val), "r" (m) |
100 | : "cc", "memory"); |
101 | |
102 | return (dummy); |
103 | } |
104 | |
105 | /* |
106 | * This function doesn't exist, so you'll get a linker error |
107 | * if something tries to do an invalid xchg(). |
108 | */ |
109 | extern void __xchg_called_with_bad_pointer(void); |
110 | |
111 | static __inline__ unsigned long |
112 | __xchg(volatile void *ptr, unsigned long x, int size) |
113 | { |
114 | switch (size) { |
115 | case 4: |
116 | return __xchg_u32(ptr, x); |
117 | case 8: |
118 | return __xchg_u64(ptr, x); |
119 | } |
120 | __xchg_called_with_bad_pointer(); |
121 | return x; |
122 | } |
123 | |
124 | #define xchg(ptr,x) \ |
125 | ({ \ |
126 | __typeof__(*(ptr)) _x_ = (x); \ |
127 | (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ |
128 | }) |
129 | |
130 | #define tas(ptr) (xchg((ptr),1)) |
131 | |
132 | #define __HAVE_ARCH_CMPXCHG 1 |
133 | |
134 | static __inline__ unsigned long |
135 | __cmpxchg_u32(volatile int *p, int old, int new) |
136 | { |
137 | unsigned int prev; |
138 | |
139 | __asm__ __volatile__ ( |
140 | EIEIO_ON_SMP |
141 | "1: lwarx %0,0,%2 # __cmpxchg_u32\n\ |
142 | cmpw 0,%0,%3\n\ |
143 | bne- 2f\n\ |
144 | stwcx. %4,0,%2\n\ |
145 | bne- 1b" |
146 | ISYNC_ON_SMP |
147 | "\n\ |
148 | 2:" |
149 | : "=&r" (prev), "=m" (*p) |
150 | : "r" (p), "r" (old), "r" (new), "m" (*p) |
151 | : "cc", "memory"); |
152 | |
153 | return prev; |
154 | } |
155 | |
156 | static __inline__ unsigned long |
157 | __cmpxchg_u64(volatile long *p, unsigned long old, unsigned long new) |
158 | { |
159 | unsigned long prev; |
160 | |
161 | __asm__ __volatile__ ( |
162 | EIEIO_ON_SMP |
163 | "1: ldarx %0,0,%2 # __cmpxchg_u64\n\ |
164 | cmpd 0,%0,%3\n\ |
165 | bne- 2f\n\ |
166 | stdcx. %4,0,%2\n\ |
167 | bne- 1b" |
168 | ISYNC_ON_SMP |
169 | "\n\ |
170 | 2:" |
171 | : "=&r" (prev), "=m" (*p) |
172 | : "r" (p), "r" (old), "r" (new), "m" (*p) |
173 | : "cc", "memory"); |
174 | |
175 | return prev; |
176 | } |
177 | |
178 | /* This function doesn't exist, so you'll get a linker error |
179 | if something tries to do an invalid cmpxchg(). */ |
180 | extern void __cmpxchg_called_with_bad_pointer(void); |
181 | |
182 | static __inline__ unsigned long |
183 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) |
184 | { |
185 | switch (size) { |
186 | case 4: |
187 | return __cmpxchg_u32(ptr, old, new); |
188 | case 8: |
189 | return __cmpxchg_u64(ptr, old, new); |
190 | } |
191 | __cmpxchg_called_with_bad_pointer(); |
192 | return old; |
193 | } |
194 | |
195 | #define cmpxchg(ptr,o,n) \ |
196 | ({ \ |
197 | __typeof__(*(ptr)) _o_ = (o); \ |
198 | __typeof__(*(ptr)) _n_ = (n); \ |
199 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ |
200 | (unsigned long)_n_, sizeof(*(ptr))); \ |
201 | }) |
202 | |
203 | /* |
204 | * We handle most unaligned accesses in hardware. On the other hand |
205 | * unaligned DMA can be very expensive on some ppc64 IO chips (it does |
206 | * powers of 2 writes until it reaches sufficient alignment). |
207 | * |
208 | * Based on this we disable the IP header alignment in network drivers. |
209 | */ |
210 | #define NET_IP_ALIGN 0 |
211 | |
212 | #define arch_align_stack(x) (x) |
213 | |
8e66edd7 |
214 | #endif |