s390 uatomic arch fix
[urcu.git] / urcu / uatomic_arch_ppc.h
1 #ifndef _URCU_ARCH_UATOMIC_PPC_H
2 #define _URCU_ARCH_UATOMIC_PPC_H
3
4 /*
5 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
6 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
7 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
8 * Copyright (c) 2009 Mathieu Desnoyers
9 *
10 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
11 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
12 *
13 * Permission is hereby granted to use or copy this program
14 * for any purpose, provided the above notices are retained on all copies.
15 * Permission to modify the code and to distribute modified code is granted,
16 * provided the above notices are retained, and a notice that the code was
17 * modified is included with the above copyright notice.
18 *
19 * Code inspired from libuatomic_ops-1.2, inherited in part from the
20 * Boehm-Demers-Weiser conservative garbage collector.
21 */
22
23 #include <urcu/compiler.h>
24 #include <urcu/system.h>
25
26 #ifndef __SIZEOF_LONG__
27 #ifdef __powerpc64__
28 #define __SIZEOF_LONG__ 8
29 #else
30 #define __SIZEOF_LONG__ 4
31 #endif
32 #endif
33
34 #ifndef BITS_PER_LONG
35 #define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
36 #endif
37
38 #define ILLEGAL_INSTR ".long 0xd00d00"
39
40 #define uatomic_set(addr, v) STORE_SHARED(*(addr), (v))
41 #define uatomic_read(addr) LOAD_SHARED(*(addr))
42
43 /*
44 * Using a isync as second barrier for exchange to provide acquire semantic.
45 * According to uatomic_ops/sysdeps/gcc/powerpc.h, the documentation is "fairly
46 * explicit that this also has acquire semantics."
47 * Derived from AO_compare_and_swap(), but removed the comparison.
48 */
49
50 /* xchg */
51
52 static inline __attribute__((always_inline))
53 unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
54 {
55 switch (len) {
56 case 4:
57 {
58 unsigned int result;
59
60 __asm__ __volatile__(
61 "lwsync\n"
62 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
63 "stwcx. %2,0,%1\n" /* else store conditional */
64 "bne- 1b\n" /* retry if lost reservation */
65 "isync\n"
66 : "=&r"(result)
67 : "r"(addr), "r"(val)
68 : "memory", "cc");
69
70 return result;
71 }
72 #if (BITS_PER_LONG == 64)
73 case 8:
74 {
75 unsigned long result;
76
77 __asm__ __volatile__(
78 "lwsync\n"
79 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
80 "stdcx. %2,0,%1\n" /* else store conditional */
81 "bne- 1b\n" /* retry if lost reservation */
82 "isync\n"
83 : "=&r"(result)
84 : "r"(addr), "r"(val)
85 : "memory", "cc");
86
87 return result;
88 }
89 #endif
90 }
91 /* generate an illegal instruction. Cannot catch this with linker tricks
92 * when optimizations are disabled. */
93 __asm__ __volatile__(ILLEGAL_INSTR);
94 return 0;
95 }
96
97 #define uatomic_xchg(addr, v) \
98 ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
99 sizeof(*(addr))))
100 /* cmpxchg */
101
102 static inline __attribute__((always_inline))
103 unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
104 unsigned long _new, int len)
105 {
106 switch (len) {
107 case 4:
108 {
109 unsigned int old_val;
110
111 __asm__ __volatile__(
112 "lwsync\n"
113 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
114 "cmpd %0,%3\n" /* if load is not equal to */
115 "bne 2f\n" /* old, fail */
116 "stwcx. %2,0,%1\n" /* else store conditional */
117 "bne- 1b\n" /* retry if lost reservation */
118 "isync\n"
119 "2:\n"
120 : "=&r"(old_val)
121 : "r"(addr), "r"((unsigned int)_new),
122 "r"((unsigned int)old)
123 : "memory", "cc");
124
125 return old_val;
126 }
127 #if (BITS_PER_LONG == 64)
128 case 8:
129 {
130 unsigned long old_val;
131
132 __asm__ __volatile__(
133 "lwsync\n"
134 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
135 "cmpd %0,%3\n" /* if load is not equal to */
136 "bne 2f\n" /* old, fail */
137 "stdcx. %2,0,%1\n" /* else store conditional */
138 "bne- 1b\n" /* retry if lost reservation */
139 "isync\n"
140 "2:\n"
141 : "=&r"(old_val),
142 : "r"(addr), "r"((unsigned long)_new),
143 "r"((unsigned long)old)
144 : "memory", "cc");
145
146 return old_val;
147 }
148 #endif
149 }
150 /* generate an illegal instruction. Cannot catch this with linker tricks
151 * when optimizations are disabled. */
152 __asm__ __volatile__(ILLEGAL_INSTR);
153 return 0;
154 }
155
156
157 #define uatomic_cmpxchg(addr, old, _new) \
158 ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
159 (unsigned long)(_new), \
160 sizeof(*(addr))))
161
162 /* uatomic_add_return */
163
164 static inline __attribute__((always_inline))
165 unsigned long _uatomic_add_return(void *addr, unsigned long val,
166 int len)
167 {
168 switch (len) {
169 case 4:
170 {
171 unsigned int result;
172
173 __asm__ __volatile__(
174 "lwsync\n"
175 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
176 "add %0,%2,%0\n" /* add val to value loaded */
177 "stwcx. %0,0,%1\n" /* store conditional */
178 "bne- 1b\n" /* retry if lost reservation */
179 "isync\n"
180 : "=&r"(result)
181 : "r"(addr), "r"(val)
182 : "memory", "cc");
183
184 return result;
185 }
186 #if (BITS_PER_LONG == 64)
187 case 8:
188 {
189 unsigned long result;
190
191 __asm__ __volatile__(
192 "lwsync\n"
193 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
194 "add %0,%2,%0\n" /* add val to value loaded */
195 "stdcx. %0,0,%1\n" /* store conditional */
196 "bne- 1b\n" /* retry if lost reservation */
197 "isync\n"
198 : "=&r"(result)
199 : "r"(addr), "r"(val)
200 : "memory", "cc");
201
202 return result;
203 }
204 #endif
205 }
206 /* generate an illegal instruction. Cannot catch this with linker tricks
207 * when optimizations are disabled. */
208 __asm__ __volatile__(ILLEGAL_INSTR);
209 return 0;
210 }
211
212
213 #define uatomic_add_return(addr, v) \
214 ((__typeof__(*(addr))) _uatomic_add_return((addr), \
215 (unsigned long)(v), \
216 sizeof(*(addr))))
217
218 /* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */
219
220 #define uatomic_sub_return(addr, v) uatomic_add_return((addr), -(v))
221
222 #define uatomic_add(addr, v) (void)uatomic_add_return((addr), (v))
223 #define uatomic_sub(addr, v) (void)uatomic_sub_return((addr), (v))
224
225 #define uatomic_inc(addr) uatomic_add((addr), 1)
226 #define uatomic_dec(addr) uatomic_add((addr), -1)
227
228 #define compat_uatomic_cmpxchg(ptr, old, _new) uatomic_cmpxchg(ptr, old, _new)
229
230 #endif /* _URCU_ARCH_UATOMIC_PPC_H */
This page took 0.035132 seconds and 4 git commands to generate.