8e66edd7 |
1 | #ifndef __PPC64_SYSTEM_H |
2 | #define __PPC64_SYSTEM_H |
3 | |
4 | /* |
5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU General Public License |
7 | * as published by the Free Software Foundation; either version |
8 | * 2 of the License, or (at your option) any later version. |
9 | */ |
10 | |
11 | #include <linux/config.h> |
12 | #include <linux/compiler.h> |
13 | #include <asm/page.h> |
14 | #include <asm/processor.h> |
15 | #include <asm/hw_irq.h> |
16 | #include <asm/memory.h> |
17 | |
18 | /* |
19 | * Memory barrier. |
20 | * The sync instruction guarantees that all memory accesses initiated |
21 | * by this processor have been performed (with respect to all other |
22 | * mechanisms that access memory). The eieio instruction is a barrier |
23 | * providing an ordering (separately) for (a) cacheable stores and (b) |
24 | * loads and stores to non-cacheable memory (e.g. I/O devices). |
25 | * |
26 | * mb() prevents loads and stores being reordered across this point. |
27 | * rmb() prevents loads being reordered across this point. |
28 | * wmb() prevents stores being reordered across this point. |
29 | * read_barrier_depends() prevents data-dependent loads being reordered |
30 | * across this point (nop on PPC). |
31 | * |
32 | * We have to use the sync instructions for mb(), since lwsync doesn't |
33 | * order loads with respect to previous stores. Lwsync is fine for |
34 | * rmb(), though. |
35 | * For wmb(), we use sync since wmb is used in drivers to order |
36 | * stores to system memory with respect to writes to the device. |
37 | * However, smp_wmb() can be a lighter-weight eieio barrier on |
38 | * SMP since it is only used to order updates to system memory. |
39 | */ |
40 | #define mb() __asm__ __volatile__ ("sync" : : : "memory") |
41 | #define rmb() __asm__ __volatile__ ("lwsync" : : : "memory") |
42 | #define wmb() __asm__ __volatile__ ("sync" : : : "memory") |
43 | #define read_barrier_depends() do { } while(0) |
44 | |
45 | #define set_mb(var, value) do { var = value; smp_mb(); } while (0) |
46 | #define set_wmb(var, value) do { var = value; smp_wmb(); } while (0) |
47 | |
48 | #ifdef CONFIG_SMP |
49 | #define smp_mb() mb() |
50 | #define smp_rmb() rmb() |
51 | #define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory") |
52 | #define smp_read_barrier_depends() read_barrier_depends() |
53 | #else |
54 | #define smp_mb() __asm__ __volatile__("": : :"memory") |
55 | #define smp_rmb() __asm__ __volatile__("": : :"memory") |
56 | #define smp_wmb() __asm__ __volatile__("": : :"memory") |
57 | #define smp_read_barrier_depends() do { } while(0) |
58 | #endif /* CONFIG_SMP */ |
59 | |
60 | #ifdef __KERNEL__ |
61 | struct task_struct; |
62 | struct pt_regs; |
63 | |
64 | #ifdef CONFIG_DEBUGGER |
65 | |
66 | extern int (*__debugger)(struct pt_regs *regs); |
67 | extern int (*__debugger_ipi)(struct pt_regs *regs); |
68 | extern int (*__debugger_bpt)(struct pt_regs *regs); |
69 | extern int (*__debugger_sstep)(struct pt_regs *regs); |
70 | extern int (*__debugger_iabr_match)(struct pt_regs *regs); |
71 | extern int (*__debugger_dabr_match)(struct pt_regs *regs); |
72 | extern int (*__debugger_fault_handler)(struct pt_regs *regs); |
73 | |
74 | #define DEBUGGER_BOILERPLATE(__NAME) \ |
75 | static inline int __NAME(struct pt_regs *regs) \ |
76 | { \ |
77 | if (unlikely(__ ## __NAME)) \ |
78 | return __ ## __NAME(regs); \ |
79 | return 0; \ |
80 | } |
81 | |
82 | DEBUGGER_BOILERPLATE(debugger) |
83 | DEBUGGER_BOILERPLATE(debugger_ipi) |
84 | DEBUGGER_BOILERPLATE(debugger_bpt) |
85 | DEBUGGER_BOILERPLATE(debugger_sstep) |
86 | DEBUGGER_BOILERPLATE(debugger_iabr_match) |
87 | DEBUGGER_BOILERPLATE(debugger_dabr_match) |
88 | DEBUGGER_BOILERPLATE(debugger_fault_handler) |
89 | |
90 | #ifdef CONFIG_XMON |
91 | extern void xmon_init(void); |
92 | #endif |
93 | |
94 | #else |
95 | static inline int debugger(struct pt_regs *regs) { return 0; } |
96 | static inline int debugger_ipi(struct pt_regs *regs) { return 0; } |
97 | static inline int debugger_bpt(struct pt_regs *regs) { return 0; } |
98 | static inline int debugger_sstep(struct pt_regs *regs) { return 0; } |
99 | static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; } |
100 | static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; } |
101 | static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; } |
102 | #endif |
103 | |
104 | extern int fix_alignment(struct pt_regs *regs); |
105 | extern void bad_page_fault(struct pt_regs *regs, unsigned long address, |
106 | int sig); |
107 | extern void show_regs(struct pt_regs * regs); |
108 | extern void low_hash_fault(struct pt_regs *regs, unsigned long address); |
109 | extern int die(const char *str, struct pt_regs *regs, long err); |
110 | |
111 | extern int _get_PVR(void); |
112 | extern void giveup_fpu(struct task_struct *); |
113 | extern void disable_kernel_fp(void); |
114 | extern void flush_fp_to_thread(struct task_struct *); |
115 | extern void enable_kernel_fp(void); |
116 | extern void giveup_altivec(struct task_struct *); |
117 | extern void disable_kernel_altivec(void); |
118 | extern void enable_kernel_altivec(void); |
119 | extern int emulate_altivec(struct pt_regs *); |
120 | extern void cvt_fd(float *from, double *to, unsigned long *fpscr); |
121 | extern void cvt_df(double *from, float *to, unsigned long *fpscr); |
122 | |
123 | #ifdef CONFIG_ALTIVEC |
124 | extern void flush_altivec_to_thread(struct task_struct *); |
125 | #else |
126 | static inline void flush_altivec_to_thread(struct task_struct *t) |
127 | { |
128 | } |
129 | #endif |
130 | |
131 | extern int mem_init_done; /* set on boot once kmalloc can be called */ |
132 | |
133 | /* EBCDIC -> ASCII conversion for [0-9A-Z] on iSeries */ |
134 | extern unsigned char e2a(unsigned char); |
135 | |
136 | extern struct task_struct *__switch_to(struct task_struct *, |
137 | struct task_struct *); |
138 | #define switch_to(prev, next, last) ((last) = __switch_to((prev), (next))) |
139 | |
140 | struct thread_struct; |
141 | extern struct task_struct * _switch(struct thread_struct *prev, |
142 | struct thread_struct *next); |
143 | |
144 | static inline int __is_processor(unsigned long pv) |
145 | { |
146 | unsigned long pvr; |
147 | asm("mfspr %0, 0x11F" : "=r" (pvr)); |
148 | return(PVR_VER(pvr) == pv); |
149 | } |
150 | |
151 | /* |
152 | * Atomic exchange |
153 | * |
154 | * Changes the memory location '*ptr' to be val and returns |
155 | * the previous value stored there. |
156 | * |
157 | * Inline asm pulled from arch/ppc/kernel/misc.S so ppc64 |
158 | * is more like most of the other architectures. |
159 | */ |
160 | static __inline__ unsigned long |
161 | __xchg_u32(volatile int *m, unsigned long val) |
162 | { |
163 | unsigned long dummy; |
164 | |
165 | __asm__ __volatile__( |
166 | EIEIO_ON_SMP |
167 | "1: lwarx %0,0,%3 # __xchg_u32\n\ |
168 | stwcx. %2,0,%3\n\ |
169 | 2: bne- 1b" |
170 | ISYNC_ON_SMP |
171 | : "=&r" (dummy), "=m" (*m) |
172 | : "r" (val), "r" (m) |
173 | : "cc", "memory"); |
174 | |
175 | return (dummy); |
176 | } |
177 | |
178 | static __inline__ unsigned long |
179 | __xchg_u64(volatile long *m, unsigned long val) |
180 | { |
181 | unsigned long dummy; |
182 | |
183 | __asm__ __volatile__( |
184 | EIEIO_ON_SMP |
185 | "1: ldarx %0,0,%3 # __xchg_u64\n\ |
186 | stdcx. %2,0,%3\n\ |
187 | 2: bne- 1b" |
188 | ISYNC_ON_SMP |
189 | : "=&r" (dummy), "=m" (*m) |
190 | : "r" (val), "r" (m) |
191 | : "cc", "memory"); |
192 | |
193 | return (dummy); |
194 | } |
195 | |
196 | /* |
197 | * This function doesn't exist, so you'll get a linker error |
198 | * if something tries to do an invalid xchg(). |
199 | */ |
200 | extern void __xchg_called_with_bad_pointer(void); |
201 | |
202 | static __inline__ unsigned long |
203 | __xchg(volatile void *ptr, unsigned long x, int size) |
204 | { |
205 | switch (size) { |
206 | case 4: |
207 | return __xchg_u32(ptr, x); |
208 | case 8: |
209 | return __xchg_u64(ptr, x); |
210 | } |
211 | __xchg_called_with_bad_pointer(); |
212 | return x; |
213 | } |
214 | |
215 | #define xchg(ptr,x) \ |
216 | ({ \ |
217 | __typeof__(*(ptr)) _x_ = (x); \ |
218 | (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ |
219 | }) |
220 | |
221 | #define tas(ptr) (xchg((ptr),1)) |
222 | |
223 | #define __HAVE_ARCH_CMPXCHG 1 |
224 | |
225 | static __inline__ unsigned long |
226 | __cmpxchg_u32(volatile int *p, int old, int new) |
227 | { |
228 | unsigned int prev; |
229 | |
230 | __asm__ __volatile__ ( |
231 | EIEIO_ON_SMP |
232 | "1: lwarx %0,0,%2 # __cmpxchg_u32\n\ |
233 | cmpw 0,%0,%3\n\ |
234 | bne- 2f\n\ |
235 | stwcx. %4,0,%2\n\ |
236 | bne- 1b" |
237 | ISYNC_ON_SMP |
238 | "\n\ |
239 | 2:" |
240 | : "=&r" (prev), "=m" (*p) |
241 | : "r" (p), "r" (old), "r" (new), "m" (*p) |
242 | : "cc", "memory"); |
243 | |
244 | return prev; |
245 | } |
246 | |
247 | static __inline__ unsigned long |
248 | __cmpxchg_u64(volatile long *p, unsigned long old, unsigned long new) |
249 | { |
250 | unsigned long prev; |
251 | |
252 | __asm__ __volatile__ ( |
253 | EIEIO_ON_SMP |
254 | "1: ldarx %0,0,%2 # __cmpxchg_u64\n\ |
255 | cmpd 0,%0,%3\n\ |
256 | bne- 2f\n\ |
257 | stdcx. %4,0,%2\n\ |
258 | bne- 1b" |
259 | ISYNC_ON_SMP |
260 | "\n\ |
261 | 2:" |
262 | : "=&r" (prev), "=m" (*p) |
263 | : "r" (p), "r" (old), "r" (new), "m" (*p) |
264 | : "cc", "memory"); |
265 | |
266 | return prev; |
267 | } |
268 | |
269 | /* This function doesn't exist, so you'll get a linker error |
270 | if something tries to do an invalid cmpxchg(). */ |
271 | extern void __cmpxchg_called_with_bad_pointer(void); |
272 | |
273 | static __inline__ unsigned long |
274 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) |
275 | { |
276 | switch (size) { |
277 | case 4: |
278 | return __cmpxchg_u32(ptr, old, new); |
279 | case 8: |
280 | return __cmpxchg_u64(ptr, old, new); |
281 | } |
282 | __cmpxchg_called_with_bad_pointer(); |
283 | return old; |
284 | } |
285 | |
286 | #define cmpxchg(ptr,o,n) \ |
287 | ({ \ |
288 | __typeof__(*(ptr)) _o_ = (o); \ |
289 | __typeof__(*(ptr)) _n_ = (n); \ |
290 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ |
291 | (unsigned long)_n_, sizeof(*(ptr))); \ |
292 | }) |
293 | |
294 | /* |
295 | * We handle most unaligned accesses in hardware. On the other hand |
296 | * unaligned DMA can be very expensive on some ppc64 IO chips (it does |
297 | * powers of 2 writes until it reaches sufficient alignment). |
298 | * |
299 | * Based on this we disable the IP header alignment in network drivers. |
300 | */ |
301 | #define NET_IP_ALIGN 0 |
302 | |
303 | #define arch_align_stack(x) (x) |
304 | |
305 | #endif /* __KERNEL__ */ |
306 | #endif |