0bc64be5 |
1 | /* |
2 | * PowerPC64 atomic operations |
3 | * |
4 | * Copyright (C) 2001 Paul Mackerras <paulus@au.ibm.com>, IBM |
5 | * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM |
6 | * |
7 | * This program is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License |
9 | * as published by the Free Software Foundation; either version |
10 | * 2 of the License, or (at your option) any later version. |
11 | */ |
12 | |
13 | #ifndef _ASM_PPC64_ATOMIC_H_ |
14 | #define _ASM_PPC64_ATOMIC_H_ |
15 | |
16 | #include <asm/memory.h> |
17 | |
18 | typedef struct { volatile int counter; } atomic_t; |
19 | |
20 | #define ATOMIC_INIT(i) { (i) } |
21 | |
22 | #define atomic_read(v) ((v)->counter) |
23 | #define atomic_set(v,i) (((v)->counter) = (i)) |
24 | |
25 | static __inline__ void atomic_add(int a, atomic_t *v) |
26 | { |
27 | int t; |
28 | |
29 | __asm__ __volatile__( |
30 | "1: lwarx %0,0,%3 # atomic_add\n\ |
31 | add %0,%2,%0\n\ |
32 | stwcx. %0,0,%3\n\ |
33 | bne- 1b" |
34 | : "=&r" (t), "=m" (v->counter) |
35 | : "r" (a), "r" (&v->counter), "m" (v->counter) |
36 | : "cc"); |
37 | } |
38 | |
39 | static __inline__ int atomic_add_return(int a, atomic_t *v) |
40 | { |
41 | int t; |
42 | |
43 | __asm__ __volatile__( |
44 | EIEIO_ON_SMP |
45 | "1: lwarx %0,0,%2 # atomic_add_return\n\ |
46 | add %0,%1,%0\n\ |
47 | stwcx. %0,0,%2\n\ |
48 | bne- 1b" |
49 | ISYNC_ON_SMP |
50 | : "=&r" (t) |
51 | : "r" (a), "r" (&v->counter) |
52 | : "cc", "memory"); |
53 | |
54 | return t; |
55 | } |
56 | |
57 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) |
58 | |
59 | static __inline__ void atomic_sub(int a, atomic_t *v) |
60 | { |
61 | int t; |
62 | |
63 | __asm__ __volatile__( |
64 | "1: lwarx %0,0,%3 # atomic_sub\n\ |
65 | subf %0,%2,%0\n\ |
66 | stwcx. %0,0,%3\n\ |
67 | bne- 1b" |
68 | : "=&r" (t), "=m" (v->counter) |
69 | : "r" (a), "r" (&v->counter), "m" (v->counter) |
70 | : "cc"); |
71 | } |
72 | |
73 | static __inline__ int atomic_sub_return(int a, atomic_t *v) |
74 | { |
75 | int t; |
76 | |
77 | __asm__ __volatile__( |
78 | EIEIO_ON_SMP |
79 | "1: lwarx %0,0,%2 # atomic_sub_return\n\ |
80 | subf %0,%1,%0\n\ |
81 | stwcx. %0,0,%2\n\ |
82 | bne- 1b" |
83 | ISYNC_ON_SMP |
84 | : "=&r" (t) |
85 | : "r" (a), "r" (&v->counter) |
86 | : "cc", "memory"); |
87 | |
88 | return t; |
89 | } |
90 | |
91 | static __inline__ void atomic_inc(atomic_t *v) |
92 | { |
93 | int t; |
94 | |
95 | __asm__ __volatile__( |
96 | "1: lwarx %0,0,%2 # atomic_inc\n\ |
97 | addic %0,%0,1\n\ |
98 | stwcx. %0,0,%2\n\ |
99 | bne- 1b" |
100 | : "=&r" (t), "=m" (v->counter) |
101 | : "r" (&v->counter), "m" (v->counter) |
102 | : "cc"); |
103 | } |
104 | |
105 | static __inline__ int atomic_inc_return(atomic_t *v) |
106 | { |
107 | int t; |
108 | |
109 | __asm__ __volatile__( |
110 | EIEIO_ON_SMP |
111 | "1: lwarx %0,0,%1 # atomic_inc_return\n\ |
112 | addic %0,%0,1\n\ |
113 | stwcx. %0,0,%1\n\ |
114 | bne- 1b" |
115 | ISYNC_ON_SMP |
116 | : "=&r" (t) |
117 | : "r" (&v->counter) |
118 | : "cc", "memory"); |
119 | |
120 | return t; |
121 | } |
122 | |
123 | /* |
124 | * atomic_inc_and_test - increment and test |
125 | * @v: pointer of type atomic_t |
126 | * |
127 | * Atomically increments @v by 1 |
128 | * and returns true if the result is zero, or false for all |
129 | * other cases. |
130 | */ |
131 | #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) |
132 | |
133 | static __inline__ void atomic_dec(atomic_t *v) |
134 | { |
135 | int t; |
136 | |
137 | __asm__ __volatile__( |
138 | "1: lwarx %0,0,%2 # atomic_dec\n\ |
139 | addic %0,%0,-1\n\ |
140 | stwcx. %0,0,%2\n\ |
141 | bne- 1b" |
142 | : "=&r" (t), "=m" (v->counter) |
143 | : "r" (&v->counter), "m" (v->counter) |
144 | : "cc"); |
145 | } |
146 | |
147 | static __inline__ int atomic_dec_return(atomic_t *v) |
148 | { |
149 | int t; |
150 | |
151 | __asm__ __volatile__( |
152 | EIEIO_ON_SMP |
153 | "1: lwarx %0,0,%1 # atomic_dec_return\n\ |
154 | addic %0,%0,-1\n\ |
155 | stwcx. %0,0,%1\n\ |
156 | bne- 1b" |
157 | ISYNC_ON_SMP |
158 | : "=&r" (t) |
159 | : "r" (&v->counter) |
160 | : "cc", "memory"); |
161 | |
162 | return t; |
163 | } |
164 | |
165 | #define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0) |
166 | #define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0) |
167 | |
168 | /* |
169 | * Atomically test *v and decrement if it is greater than 0. |
170 | * The function returns the old value of *v minus 1. |
171 | */ |
172 | static __inline__ int atomic_dec_if_positive(atomic_t *v) |
173 | { |
174 | int t; |
175 | |
176 | __asm__ __volatile__( |
177 | EIEIO_ON_SMP |
178 | "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\ |
179 | addic. %0,%0,-1\n\ |
180 | blt- 2f\n\ |
181 | stwcx. %0,0,%1\n\ |
182 | bne- 1b" |
183 | ISYNC_ON_SMP |
184 | "\n\ |
185 | 2:" : "=&r" (t) |
186 | : "r" (&v->counter) |
187 | : "cc", "memory"); |
188 | |
189 | return t; |
190 | } |
191 | |
192 | #define smp_mb__before_atomic_dec() smp_mb() |
193 | #define smp_mb__after_atomic_dec() smp_mb() |
194 | #define smp_mb__before_atomic_inc() smp_mb() |
195 | #define smp_mb__after_atomic_inc() smp_mb() |
196 | |
197 | #endif /* _ASM_PPC64_ATOMIC_H_ */ |