8e66edd7 |
1 | #ifndef __PPC64_SYSTEM_H |
2 | #define __PPC64_SYSTEM_H |
3 | |
895ad115 |
4 | #ifdef __cplusplus |
5 | extern "C" { |
6 | #endif |
7 | |
8e66edd7 |
8 | /* |
9 | * This program is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU General Public License |
11 | * as published by the Free Software Foundation; either version |
12 | * 2 of the License, or (at your option) any later version. |
13 | */ |
14 | |
60009e26 |
15 | //#include <linux/config.h> |
16 | //#include <linux/compiler.h> |
8e66edd7 |
17 | #include <asm/page.h> |
18 | #include <asm/processor.h> |
19 | #include <asm/hw_irq.h> |
20 | #include <asm/memory.h> |
21 | |
22 | /* |
23 | * Memory barrier. |
24 | * The sync instruction guarantees that all memory accesses initiated |
25 | * by this processor have been performed (with respect to all other |
26 | * mechanisms that access memory). The eieio instruction is a barrier |
27 | * providing an ordering (separately) for (a) cacheable stores and (b) |
28 | * loads and stores to non-cacheable memory (e.g. I/O devices). |
29 | * |
30 | * mb() prevents loads and stores being reordered across this point. |
31 | * rmb() prevents loads being reordered across this point. |
32 | * wmb() prevents stores being reordered across this point. |
33 | * read_barrier_depends() prevents data-dependent loads being reordered |
34 | * across this point (nop on PPC). |
35 | * |
36 | * We have to use the sync instructions for mb(), since lwsync doesn't |
37 | * order loads with respect to previous stores. Lwsync is fine for |
38 | * rmb(), though. |
39 | * For wmb(), we use sync since wmb is used in drivers to order |
40 | * stores to system memory with respect to writes to the device. |
41 | * However, smp_wmb() can be a lighter-weight eieio barrier on |
42 | * SMP since it is only used to order updates to system memory. |
43 | */ |
44 | #define mb() __asm__ __volatile__ ("sync" : : : "memory") |
45 | #define rmb() __asm__ __volatile__ ("lwsync" : : : "memory") |
46 | #define wmb() __asm__ __volatile__ ("sync" : : : "memory") |
47 | #define read_barrier_depends() do { } while(0) |
48 | |
49 | #define set_mb(var, value) do { var = value; smp_mb(); } while (0) |
50 | #define set_wmb(var, value) do { var = value; smp_wmb(); } while (0) |
51 | |
52 | #ifdef CONFIG_SMP |
53 | #define smp_mb() mb() |
54 | #define smp_rmb() rmb() |
55 | #define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory") |
56 | #define smp_read_barrier_depends() read_barrier_depends() |
57 | #else |
58 | #define smp_mb() __asm__ __volatile__("": : :"memory") |
59 | #define smp_rmb() __asm__ __volatile__("": : :"memory") |
60 | #define smp_wmb() __asm__ __volatile__("": : :"memory") |
61 | #define smp_read_barrier_depends() do { } while(0) |
62 | #endif /* CONFIG_SMP */ |
63 | |
8e66edd7 |
64 | /* |
65 | * Atomic exchange |
66 | * |
67 | * Changes the memory location '*ptr' to be val and returns |
68 | * the previous value stored there. |
69 | * |
70 | * Inline asm pulled from arch/ppc/kernel/misc.S so ppc64 |
71 | * is more like most of the other architectures. |
72 | */ |
3a4541a5 |
73 | static inline unsigned long |
8e66edd7 |
74 | __xchg_u32(volatile int *m, unsigned long val) |
75 | { |
76 | unsigned long dummy; |
77 | |
78 | __asm__ __volatile__( |
79 | EIEIO_ON_SMP |
80 | "1: lwarx %0,0,%3 # __xchg_u32\n\ |
81 | stwcx. %2,0,%3\n\ |
82 | 2: bne- 1b" |
83 | ISYNC_ON_SMP |
84 | : "=&r" (dummy), "=m" (*m) |
85 | : "r" (val), "r" (m) |
86 | : "cc", "memory"); |
87 | |
88 | return (dummy); |
89 | } |
90 | |
3a4541a5 |
91 | static inline unsigned long |
8e66edd7 |
92 | __xchg_u64(volatile long *m, unsigned long val) |
93 | { |
94 | unsigned long dummy; |
95 | |
96 | __asm__ __volatile__( |
97 | EIEIO_ON_SMP |
98 | "1: ldarx %0,0,%3 # __xchg_u64\n\ |
99 | stdcx. %2,0,%3\n\ |
100 | 2: bne- 1b" |
101 | ISYNC_ON_SMP |
102 | : "=&r" (dummy), "=m" (*m) |
103 | : "r" (val), "r" (m) |
104 | : "cc", "memory"); |
105 | |
106 | return (dummy); |
107 | } |
108 | |
109 | /* |
110 | * This function doesn't exist, so you'll get a linker error |
111 | * if something tries to do an invalid xchg(). |
112 | */ |
113 | extern void __xchg_called_with_bad_pointer(void); |
114 | |
3a4541a5 |
115 | static inline unsigned long |
8e66edd7 |
116 | __xchg(volatile void *ptr, unsigned long x, int size) |
117 | { |
118 | switch (size) { |
119 | case 4: |
120 | return __xchg_u32(ptr, x); |
121 | case 8: |
122 | return __xchg_u64(ptr, x); |
123 | } |
124 | __xchg_called_with_bad_pointer(); |
125 | return x; |
126 | } |
127 | |
128 | #define xchg(ptr,x) \ |
129 | ({ \ |
130 | __typeof__(*(ptr)) _x_ = (x); \ |
131 | (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ |
132 | }) |
133 | |
134 | #define tas(ptr) (xchg((ptr),1)) |
135 | |
136 | #define __HAVE_ARCH_CMPXCHG 1 |
137 | |
3a4541a5 |
138 | static inline unsigned long |
8e66edd7 |
139 | __cmpxchg_u32(volatile int *p, int old, int new) |
140 | { |
141 | unsigned int prev; |
142 | |
143 | __asm__ __volatile__ ( |
144 | EIEIO_ON_SMP |
145 | "1: lwarx %0,0,%2 # __cmpxchg_u32\n\ |
146 | cmpw 0,%0,%3\n\ |
147 | bne- 2f\n\ |
148 | stwcx. %4,0,%2\n\ |
149 | bne- 1b" |
150 | ISYNC_ON_SMP |
151 | "\n\ |
152 | 2:" |
153 | : "=&r" (prev), "=m" (*p) |
154 | : "r" (p), "r" (old), "r" (new), "m" (*p) |
155 | : "cc", "memory"); |
156 | |
157 | return prev; |
158 | } |
159 | |
3a4541a5 |
160 | static inline unsigned long |
8e66edd7 |
161 | __cmpxchg_u64(volatile long *p, unsigned long old, unsigned long new) |
162 | { |
163 | unsigned long prev; |
164 | |
165 | __asm__ __volatile__ ( |
166 | EIEIO_ON_SMP |
167 | "1: ldarx %0,0,%2 # __cmpxchg_u64\n\ |
168 | cmpd 0,%0,%3\n\ |
169 | bne- 2f\n\ |
170 | stdcx. %4,0,%2\n\ |
171 | bne- 1b" |
172 | ISYNC_ON_SMP |
173 | "\n\ |
174 | 2:" |
175 | : "=&r" (prev), "=m" (*p) |
176 | : "r" (p), "r" (old), "r" (new), "m" (*p) |
177 | : "cc", "memory"); |
178 | |
179 | return prev; |
180 | } |
181 | |
182 | /* This function doesn't exist, so you'll get a linker error |
183 | if something tries to do an invalid cmpxchg(). */ |
184 | extern void __cmpxchg_called_with_bad_pointer(void); |
185 | |
3a4541a5 |
186 | static inline unsigned long |
8e66edd7 |
187 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) |
188 | { |
189 | switch (size) { |
190 | case 4: |
191 | return __cmpxchg_u32(ptr, old, new); |
192 | case 8: |
193 | return __cmpxchg_u64(ptr, old, new); |
194 | } |
195 | __cmpxchg_called_with_bad_pointer(); |
196 | return old; |
197 | } |
198 | |
199 | #define cmpxchg(ptr,o,n) \ |
200 | ({ \ |
201 | __typeof__(*(ptr)) _o_ = (o); \ |
202 | __typeof__(*(ptr)) _n_ = (n); \ |
203 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ |
204 | (unsigned long)_n_, sizeof(*(ptr))); \ |
205 | }) |
206 | |
207 | /* |
208 | * We handle most unaligned accesses in hardware. On the other hand |
209 | * unaligned DMA can be very expensive on some ppc64 IO chips (it does |
210 | * powers of 2 writes until it reaches sufficient alignment). |
211 | * |
212 | * Based on this we disable the IP header alignment in network drivers. |
213 | */ |
214 | #define NET_IP_ALIGN 0 |
215 | |
216 | #define arch_align_stack(x) (x) |
217 | |
895ad115 |
218 | #ifdef __cplusplus |
219 | } /* end of extern "C" */ |
220 | #endif |
221 | |
8e66edd7 |
222 | #endif |