8e66edd7 |
1 | /* |
2 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> |
3 | */ |
4 | #ifndef __PPC_SYSTEM_H |
5 | #define __PPC_SYSTEM_H |
6 | |
8e66edd7 |
7 | #include <asm/atomic.h> |
8 | #include <asm/hw_irq.h> |
9 | |
895ad115 |
10 | #ifdef __cplusplus |
11 | extern "C" { |
12 | #endif |
13 | |
8e66edd7 |
14 | /* |
15 | * Memory barrier. |
16 | * The sync instruction guarantees that all memory accesses initiated |
17 | * by this processor have been performed (with respect to all other |
18 | * mechanisms that access memory). The eieio instruction is a barrier |
19 | * providing an ordering (separately) for (a) cacheable stores and (b) |
20 | * loads and stores to non-cacheable memory (e.g. I/O devices). |
21 | * |
22 | * mb() prevents loads and stores being reordered across this point. |
23 | * rmb() prevents loads being reordered across this point. |
24 | * wmb() prevents stores being reordered across this point. |
25 | * read_barrier_depends() prevents data-dependent loads being reordered |
26 | * across this point (nop on PPC). |
27 | * |
28 | * We can use the eieio instruction for wmb, but since it doesn't |
29 | * give any ordering guarantees about loads, we have to use the |
30 | * stronger but slower sync instruction for mb and rmb. |
31 | */ |
32 | #define mb() __asm__ __volatile__ ("sync" : : : "memory") |
33 | #define rmb() __asm__ __volatile__ ("sync" : : : "memory") |
34 | #define wmb() __asm__ __volatile__ ("eieio" : : : "memory") |
35 | #define read_barrier_depends() do { } while(0) |
36 | |
37 | #define set_mb(var, value) do { var = value; mb(); } while (0) |
38 | #define set_wmb(var, value) do { var = value; wmb(); } while (0) |
39 | |
40 | #ifdef CONFIG_SMP |
41 | #define smp_mb() mb() |
42 | #define smp_rmb() rmb() |
43 | #define smp_wmb() wmb() |
44 | #define smp_read_barrier_depends() read_barrier_depends() |
45 | #else |
46 | #define smp_mb() barrier() |
47 | #define smp_rmb() barrier() |
48 | #define smp_wmb() barrier() |
49 | #define smp_read_barrier_depends() do { } while(0) |
50 | #endif /* CONFIG_SMP */ |
51 | |
3a4541a5 |
52 | static inline unsigned long |
8e66edd7 |
53 | xchg_u32(volatile void *p, unsigned long val) |
54 | { |
55 | unsigned long prev; |
56 | |
57 | __asm__ __volatile__ ("\n\ |
58 | 1: lwarx %0,0,%2 \n" |
59 | PPC405_ERR77(0,%2) |
60 | " stwcx. %3,0,%2 \n\ |
61 | bne- 1b" |
62 | : "=&r" (prev), "=m" (*(volatile unsigned long *)p) |
63 | : "r" (p), "r" (val), "m" (*(volatile unsigned long *)p) |
64 | : "cc", "memory"); |
65 | |
66 | return prev; |
67 | } |
68 | |
69 | /* |
70 | * This function doesn't exist, so you'll get a linker error |
71 | * if something tries to do an invalid xchg(). |
72 | */ |
73 | extern void __xchg_called_with_bad_pointer(void); |
74 | |
75 | #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) |
76 | #define tas(ptr) (xchg((ptr),1)) |
77 | |
78 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) |
79 | { |
80 | switch (size) { |
81 | case 4: |
82 | return (unsigned long) xchg_u32(ptr, x); |
83 | #if 0 /* xchg_u64 doesn't exist on 32-bit PPC */ |
84 | case 8: |
85 | return (unsigned long) xchg_u64(ptr, x); |
86 | #endif /* 0 */ |
87 | } |
88 | __xchg_called_with_bad_pointer(); |
89 | return x; |
90 | |
91 | |
92 | } |
93 | |
94 | extern inline void * xchg_ptr(void * m, void * val) |
95 | { |
96 | return (void *) xchg_u32(m, (unsigned long) val); |
97 | } |
98 | |
99 | |
100 | #define __HAVE_ARCH_CMPXCHG 1 |
101 | |
3a4541a5 |
102 | static inline unsigned long |
8e66edd7 |
103 | __cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new) |
104 | { |
105 | unsigned int prev; |
106 | |
107 | __asm__ __volatile__ ("\n\ |
108 | 1: lwarx %0,0,%2 \n\ |
109 | cmpw 0,%0,%3 \n\ |
110 | bne 2f \n" |
111 | PPC405_ERR77(0,%2) |
112 | " stwcx. %4,0,%2 \n\ |
113 | bne- 1b\n" |
60009e26 |
114 | #if 0 //only using one CPU at a time (LTT) // def CONFIG_SMP |
8e66edd7 |
115 | " sync\n" |
116 | #endif /* CONFIG_SMP */ |
117 | "2:" |
118 | : "=&r" (prev), "=m" (*p) |
119 | : "r" (p), "r" (old), "r" (new), "m" (*p) |
120 | : "cc", "memory"); |
121 | |
122 | return prev; |
123 | } |
124 | |
125 | /* This function doesn't exist, so you'll get a linker error |
126 | if something tries to do an invalid cmpxchg(). */ |
127 | extern void __cmpxchg_called_with_bad_pointer(void); |
128 | |
3a4541a5 |
129 | static inline unsigned long |
8e66edd7 |
130 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) |
131 | { |
132 | switch (size) { |
133 | case 4: |
134 | return __cmpxchg_u32(ptr, old, new); |
135 | #if 0 /* we don't have __cmpxchg_u64 on 32-bit PPC */ |
136 | case 8: |
137 | return __cmpxchg_u64(ptr, old, new); |
138 | #endif /* 0 */ |
139 | } |
140 | __cmpxchg_called_with_bad_pointer(); |
141 | return old; |
142 | } |
143 | |
144 | #define cmpxchg(ptr,o,n) \ |
145 | ({ \ |
146 | __typeof__(*(ptr)) _o_ = (o); \ |
147 | __typeof__(*(ptr)) _n_ = (n); \ |
148 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ |
149 | (unsigned long)_n_, sizeof(*(ptr))); \ |
150 | }) |
151 | |
152 | #define arch_align_stack(x) (x) |
153 | |
895ad115 |
154 | #ifdef __cplusplus |
155 | } /* end of extern "C" */ |
156 | #endif |
157 | |
8e66edd7 |
158 | #endif /* __PPC_SYSTEM_H */ |