Detect unbalanced lock/unlock
[userspace-rcu.git] / compat_arch_x86.c
1 /*
2 * compat_arch_x86.c
3 *
4 * Userspace RCU library - x86 compatibility checks
5 *
6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <stdio.h>
24 #include <pthread.h>
25 #include <signal.h>
26 #include <assert.h>
27 #include <urcu/uatomic.h>
28
29 /*
30 * Using attribute "weak" for __rcu_cas_avail and
31 * __urcu_x86_compat_mutex. Those are globally visible by the entire
32 * program, even though many shared objects may have their own version.
33 * The first version that gets loaded will be used by the entire
34 * program (executable and all shared objects).
35 */
36
37 /*
38 * It does not really matter if the constructor is called before using
39 * the library, as long as the caller checks if __rcu_cas_avail < 0 and calls
40 * compat_arch_init() explicitely if needed.
41 */
42 int __attribute__((constructor)) __rcu_cas_init(void);
43
44 /*
45 * -1: unknown
46 * 1: available
47 * 0: unavailable
48 */
49 __attribute__((weak))
50 int __rcu_cas_avail = -1;
51
52 __attribute__((weak))
53 pthread_mutex_t __urcu_x86_compat_mutex = PTHREAD_MUTEX_INITIALIZER;
54
55 /*
56 * get_eflags/set_eflags/compare_and_swap_is_available imported from glibc
57 * 2.3.5. linuxthreads/sysdeps/i386/pt-machine.h.
58 */
59
60 static int get_eflags (void)
61 {
62 int res;
63 __asm__ __volatile__ ("pushfl; popl %0" : "=r" (res) : );
64 return res;
65 }
66
67 static void set_eflags (int newflags)
68 {
69 __asm__ __volatile__ ("pushl %0; popfl" : : "r" (newflags) : "cc");
70 }
71
72 static int compare_and_swap_is_available (void)
73 {
74 int oldflags = get_eflags ();
75 int changed;
76 /* Flip AC bit in EFLAGS. */
77 set_eflags (oldflags ^ 0x40000);
78 /* See if bit changed. */
79 changed = (get_eflags () ^ oldflags) & 0x40000;
80 /* Restore EFLAGS. */
81 set_eflags (oldflags);
82 /* If the AC flag did not change, it's a 386 and it lacks cmpxchg.
83 Otherwise, it's a 486 or above and it has cmpxchg. */
84 return changed != 0;
85 }
86
87 static void mutex_lock_signal_save(pthread_mutex_t *mutex, sigset_t *oldmask)
88 {
89 sigset_t newmask;
90 int ret;
91
92 /* Disable signals */
93 ret = sigfillset(&newmask);
94 assert(!ret);
95 ret = pthread_sigmask(SIG_BLOCK, &newmask, oldmask);
96 assert(!ret);
97 ret = pthread_mutex_lock(&__urcu_x86_compat_mutex);
98 assert(!ret);
99 }
100
101 static void mutex_lock_signal_restore(pthread_mutex_t *mutex, sigset_t *oldmask)
102 {
103 int ret;
104
105 ret = pthread_mutex_unlock(&__urcu_x86_compat_mutex);
106 assert(!ret);
107 ret = pthread_sigmask(SIG_SETMASK, oldmask, NULL);
108 assert(!ret);
109 }
110
111 unsigned long _compat_uatomic_set(void *addr, unsigned long _new, int len)
112 {
113 sigset_t mask;
114 unsigned long result;
115
116 mutex_lock_signal_save(&__urcu_x86_compat_mutex, &mask);
117 switch (len) {
118 case 1:
119 *(unsigned char *)addr = (unsigned char)_new;
120 result = *(unsigned char *)addr;
121 break;
122 case 2:
123 *(unsigned short *)addr = (unsigned short)_new;
124 result = *(unsigned short *)addr;
125 break;
126 case 4:
127 *(unsigned int *)addr = (unsigned int)_new;
128 result = *(unsigned int *)addr;
129 break;
130 default:
131 /*
132 * generate an illegal instruction. Cannot catch this with
133 * linker tricks when optimizations are disabled.
134 */
135 result = 0;
136 __asm__ __volatile__("ud2");
137 }
138 mutex_lock_signal_restore(&__urcu_x86_compat_mutex, &mask);
139 return result;
140 }
141
142 unsigned long _compat_uatomic_xchg(void *addr, unsigned long _new, int len)
143 {
144 sigset_t mask;
145 unsigned long retval;
146
147 mutex_lock_signal_save(&__urcu_x86_compat_mutex, &mask);
148 switch (len) {
149 case 1:
150 retval = *(unsigned char *)addr;
151 *(unsigned char *)addr = (unsigned char)_new;
152 break;
153 case 2:
154 retval = *(unsigned short *)addr;
155 *(unsigned short *)addr = (unsigned short)_new;
156 break;
157 case 4:
158 retval = *(unsigned int *)addr;
159 *(unsigned int *)addr = (unsigned int)_new;
160 break;
161 default:
162 /*
163 * generate an illegal instruction. Cannot catch this with
164 * linker tricks when optimizations are disabled.
165 */
166 retval = 0; /* silence gcc warnings */
167 __asm__ __volatile__("ud2");
168 }
169 mutex_lock_signal_restore(&__urcu_x86_compat_mutex, &mask);
170 return retval;
171 }
172
173 unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old,
174 unsigned long _new, int len)
175 {
176 unsigned long retval;
177 sigset_t mask;
178
179 mutex_lock_signal_save(&__urcu_x86_compat_mutex, &mask);
180 switch (len) {
181 case 1:
182 {
183 unsigned char result = *(unsigned char *)addr;
184 if (result == (unsigned char)old)
185 *(unsigned char *)addr = (unsigned char)_new;
186 retval = result;
187 break;
188 }
189 case 2:
190 {
191 unsigned short result = *(unsigned short *)addr;
192 if (result == (unsigned short)old)
193 *(unsigned short *)addr = (unsigned short)_new;
194 retval = result;
195 break;
196 }
197 case 4:
198 {
199 unsigned int result = *(unsigned int *)addr;
200 if (result == (unsigned int)old)
201 *(unsigned int *)addr = (unsigned int)_new;
202 retval = result;
203 break;
204 }
205 default:
206 /*
207 * generate an illegal instruction. Cannot catch this with
208 * linker tricks when optimizations are disabled.
209 */
210 retval = 0; /* silence gcc warnings */
211 __asm__ __volatile__("ud2");
212 }
213 mutex_lock_signal_restore(&__urcu_x86_compat_mutex, &mask);
214 return retval;
215 }
216
217 void _compat_uatomic_or(void *addr, unsigned long v, int len)
218 {
219 sigset_t mask;
220
221 mutex_lock_signal_save(&__urcu_x86_compat_mutex, &mask);
222 switch (len) {
223 case 1:
224 *(unsigned char *)addr |= (unsigned char)v;
225 break;
226 case 2:
227 *(unsigned short *)addr |= (unsigned short)v;
228 break;
229 case 4:
230 *(unsigned int *)addr |= (unsigned int)v;
231 break;
232 default:
233 /*
234 * generate an illegal instruction. Cannot catch this with
235 * linker tricks when optimizations are disabled.
236 */
237 __asm__ __volatile__("ud2");
238 }
239 mutex_lock_signal_restore(&__urcu_x86_compat_mutex, &mask);
240 }
241
242 void _compat_uatomic_and(void *addr, unsigned long v, int len)
243 {
244 sigset_t mask;
245
246 mutex_lock_signal_save(&__urcu_x86_compat_mutex, &mask);
247 switch (len) {
248 case 1:
249 *(unsigned char *)addr &= (unsigned char)v;
250 break;
251 case 2:
252 *(unsigned short *)addr &= (unsigned short)v;
253 break;
254 case 4:
255 *(unsigned int *)addr &= (unsigned int)v;
256 break;
257 default:
258 /*
259 * generate an illegal instruction. Cannot catch this with
260 * linker tricks when optimizations are disabled.
261 */
262 __asm__ __volatile__("ud2");
263 }
264 mutex_lock_signal_restore(&__urcu_x86_compat_mutex, &mask);
265 }
266
267 unsigned long _compat_uatomic_add_return(void *addr, unsigned long v, int len)
268 {
269 sigset_t mask;
270 unsigned long result;
271
272 mutex_lock_signal_save(&__urcu_x86_compat_mutex, &mask);
273 switch (len) {
274 case 1:
275 *(unsigned char *)addr += (unsigned char)v;
276 result = *(unsigned char *)addr;
277 break;
278 case 2:
279 *(unsigned short *)addr += (unsigned short)v;
280 result = *(unsigned short *)addr;
281 break;
282 case 4:
283 *(unsigned int *)addr += (unsigned int)v;
284 result = *(unsigned int *)addr;
285 break;
286 default:
287 /*
288 * generate an illegal instruction. Cannot catch this with
289 * linker tricks when optimizations are disabled.
290 */
291 result = 0; /* silence gcc warnings */
292 __asm__ __volatile__("ud2");
293 }
294 mutex_lock_signal_restore(&__urcu_x86_compat_mutex, &mask);
295 return result;
296 }
297
298 int __rcu_cas_init(void)
299 {
300 if (__rcu_cas_avail < 0)
301 __rcu_cas_avail = compare_and_swap_is_available();
302 return __rcu_cas_avail;
303 }
This page took 0.067679 seconds and 4 git commands to generate.