atomic: provide seq_cst semantics on powerpc
[urcu.git] / compat_arch_x86.c
1 /*
2 * compat_arch_x86.c
3 *
4 * Userspace RCU library - x86 compatibility checks
5 *
6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <stdio.h>
24 #include <pthread.h>
25 #include <signal.h>
26 #include <assert.h>
27 #include <urcu/uatomic.h>
28
29 /*
30 * It does not really matter if the constructor is called before using
31 * the library, as long as the caller checks if __rcu_cas_avail < 0 and calls
32 * compat_arch_init() explicitely if needed.
33 */
34 int __attribute__((constructor)) __rcu_cas_init(void);
35
36 /*
37 * -1: unknown
38 * 1: available
39 * 0: unavailable
40 */
41 int __rcu_cas_avail = -1;
42
43 static pthread_mutex_t compat_mutex = PTHREAD_MUTEX_INITIALIZER;
44
45 /*
46 * get_eflags/set_eflags/compare_and_swap_is_available imported from glibc
47 * 2.3.5. linuxthreads/sysdeps/i386/pt-machine.h.
48 */
49
50 static int get_eflags (void)
51 {
52 int res;
53 __asm__ __volatile__ ("pushfl; popl %0" : "=r" (res) : );
54 return res;
55 }
56
57 static void set_eflags (int newflags)
58 {
59 __asm__ __volatile__ ("pushl %0; popfl" : : "r" (newflags) : "cc");
60 }
61
62 static int compare_and_swap_is_available (void)
63 {
64 int oldflags = get_eflags ();
65 int changed;
66 /* Flip AC bit in EFLAGS. */
67 set_eflags (oldflags ^ 0x40000);
68 /* See if bit changed. */
69 changed = (get_eflags () ^ oldflags) & 0x40000;
70 /* Restore EFLAGS. */
71 set_eflags (oldflags);
72 /* If the AC flag did not change, it's a 386 and it lacks cmpxchg.
73 Otherwise, it's a 486 or above and it has cmpxchg. */
74 return changed != 0;
75 }
76
77 static void mutex_lock_signal_save(pthread_mutex_t *mutex, sigset_t *oldmask)
78 {
79 sigset_t newmask;
80 int ret;
81
82 /* Disable signals */
83 ret = sigemptyset(&newmask);
84 assert(!ret);
85 ret = pthread_sigmask(SIG_SETMASK, &newmask, oldmask);
86 assert(!ret);
87 ret = pthread_mutex_lock(&compat_mutex);
88 assert(!ret);
89 }
90
91 static void mutex_lock_signal_restore(pthread_mutex_t *mutex, sigset_t *oldmask)
92 {
93 int ret;
94
95 ret = pthread_mutex_unlock(&compat_mutex);
96 assert(!ret);
97 ret = pthread_sigmask(SIG_SETMASK, oldmask, NULL);
98 assert(!ret);
99 }
100
101 unsigned long _compat_uatomic_set(void *addr, unsigned long _new, int len)
102 {
103 sigset_t mask;
104 unsigned long result;
105
106 mutex_lock_signal_save(&compat_mutex, &mask);
107 switch (len) {
108 case 1:
109 *(unsigned char *)addr = (unsigned char)_new;
110 result = *(unsigned char *)addr;
111 break;
112 case 2:
113 *(unsigned short *)addr = (unsigned short)_new;
114 result = *(unsigned short *)addr;
115 break;
116 case 4:
117 *(unsigned int *)addr = (unsigned int)_new;
118 result = *(unsigned int *)addr;
119 break;
120 default:
121 /*
122 * generate an illegal instruction. Cannot catch this with
123 * linker tricks when optimizations are disabled.
124 */
125 result = 0;
126 __asm__ __volatile__("ud2");
127 }
128 mutex_lock_signal_restore(&compat_mutex, &mask);
129 return result;
130 }
131
132 unsigned long _compat_uatomic_xchg(void *addr, unsigned long _new, int len)
133 {
134 sigset_t mask;
135 unsigned long retval;
136
137 mutex_lock_signal_save(&compat_mutex, &mask);
138 switch (len) {
139 case 1:
140 retval = *(unsigned char *)addr;
141 *(unsigned char *)addr = (unsigned char)_new;
142 break;
143 case 2:
144 retval = *(unsigned short *)addr;
145 *(unsigned short *)addr = (unsigned short)_new;
146 break;
147 case 4:
148 retval = *(unsigned int *)addr;
149 *(unsigned int *)addr = (unsigned int)_new;
150 break;
151 default:
152 /*
153 * generate an illegal instruction. Cannot catch this with
154 * linker tricks when optimizations are disabled.
155 */
156 retval = 0; /* silence gcc warnings */
157 __asm__ __volatile__("ud2");
158 }
159 mutex_lock_signal_restore(&compat_mutex, &mask);
160 return retval;
161 }
162
163 unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old,
164 unsigned long _new, int len)
165 {
166 unsigned long retval;
167 sigset_t mask;
168
169 mutex_lock_signal_save(&compat_mutex, &mask);
170 switch (len) {
171 case 1:
172 {
173 unsigned char result = *(unsigned char *)addr;
174 if (result == (unsigned char)old)
175 *(unsigned char *)addr = (unsigned char)_new;
176 retval = result;
177 break;
178 }
179 case 2:
180 {
181 unsigned short result = *(unsigned short *)addr;
182 if (result == (unsigned short)old)
183 *(unsigned short *)addr = (unsigned short)_new;
184 retval = result;
185 break;
186 }
187 case 4:
188 {
189 unsigned int result = *(unsigned int *)addr;
190 if (result == (unsigned int)old)
191 *(unsigned int *)addr = (unsigned int)_new;
192 retval = result;
193 break;
194 }
195 default:
196 /*
197 * generate an illegal instruction. Cannot catch this with
198 * linker tricks when optimizations are disabled.
199 */
200 retval = 0; /* silence gcc warnings */
201 __asm__ __volatile__("ud2");
202 }
203 mutex_lock_signal_restore(&compat_mutex, &mask);
204 return retval;
205 }
206
207 void _compat_uatomic_or(void *addr, unsigned long v, int len)
208 {
209 sigset_t mask;
210
211 mutex_lock_signal_save(&compat_mutex, &mask);
212 switch (len) {
213 case 1:
214 *(unsigned char *)addr |= (unsigned char)v;
215 break;
216 case 2:
217 *(unsigned short *)addr |= (unsigned short)v;
218 break;
219 case 4:
220 *(unsigned int *)addr |= (unsigned int)v;
221 break;
222 default:
223 /*
224 * generate an illegal instruction. Cannot catch this with
225 * linker tricks when optimizations are disabled.
226 */
227 __asm__ __volatile__("ud2");
228 }
229 mutex_lock_signal_restore(&compat_mutex, &mask);
230 }
231
232 void _compat_uatomic_and(void *addr, unsigned long v, int len)
233 {
234 sigset_t mask;
235
236 mutex_lock_signal_save(&compat_mutex, &mask);
237 switch (len) {
238 case 1:
239 *(unsigned char *)addr &= (unsigned char)v;
240 break;
241 case 2:
242 *(unsigned short *)addr &= (unsigned short)v;
243 break;
244 case 4:
245 *(unsigned int *)addr &= (unsigned int)v;
246 break;
247 default:
248 /*
249 * generate an illegal instruction. Cannot catch this with
250 * linker tricks when optimizations are disabled.
251 */
252 __asm__ __volatile__("ud2");
253 }
254 mutex_lock_signal_restore(&compat_mutex, &mask);
255 }
256
257 unsigned long _compat_uatomic_add_return(void *addr, unsigned long v, int len)
258 {
259 sigset_t mask;
260 unsigned long result;
261
262 mutex_lock_signal_save(&compat_mutex, &mask);
263 switch (len) {
264 case 1:
265 *(unsigned char *)addr += (unsigned char)v;
266 result = *(unsigned char *)addr;
267 break;
268 case 2:
269 *(unsigned short *)addr += (unsigned short)v;
270 result = *(unsigned short *)addr;
271 break;
272 case 4:
273 *(unsigned int *)addr += (unsigned int)v;
274 result = *(unsigned int *)addr;
275 break;
276 default:
277 /*
278 * generate an illegal instruction. Cannot catch this with
279 * linker tricks when optimizations are disabled.
280 */
281 result = 0; /* silence gcc warnings */
282 __asm__ __volatile__("ud2");
283 }
284 mutex_lock_signal_restore(&compat_mutex, &mask);
285 return result;
286 }
287
288 int __rcu_cas_init(void)
289 {
290 if (__rcu_cas_avail < 0)
291 __rcu_cas_avail = compare_and_swap_is_available();
292 return __rcu_cas_avail;
293 }
This page took 0.036639 seconds and 4 git commands to generate.