rculfhash: merge node gc into add loop
[userspace-rcu.git] / urcu / uatomic / x86.h
... / ...
CommitLineData
1#ifndef _URCU_ARCH_UATOMIC_X86_H
2#define _URCU_ARCH_UATOMIC_X86_H
3
4/*
5 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
6 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
7 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
8 * Copyright (c) 2009 Mathieu Desnoyers
9 *
10 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
11 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
12 *
13 * Permission is hereby granted to use or copy this program
14 * for any purpose, provided the above notices are retained on all copies.
15 * Permission to modify the code and to distribute modified code is granted,
16 * provided the above notices are retained, and a notice that the code was
17 * modified is included with the above copyright notice.
18 *
19 * Code inspired from libuatomic_ops-1.2, inherited in part from the
20 * Boehm-Demers-Weiser conservative garbage collector.
21 */
22
23#include <urcu/compiler.h>
24#include <urcu/system.h>
25
26#define UATOMIC_HAS_ATOMIC_BYTE
27#define UATOMIC_HAS_ATOMIC_SHORT
28
29#ifdef __cplusplus
30extern "C" {
31#endif
32
33/*
34 * Derived from AO_compare_and_swap() and AO_test_and_set_full().
35 */
36
37struct __uatomic_dummy {
38 unsigned long v[10];
39};
40#define __hp(x) ((struct __uatomic_dummy *)(x))
41
42#define _uatomic_set(addr, v) CMM_STORE_SHARED(*(addr), (v))
43
44/* cmpxchg */
45
46static inline __attribute__((always_inline))
47unsigned long __uatomic_cmpxchg(void *addr, unsigned long old,
48 unsigned long _new, int len)
49{
50 switch (len) {
51 case 1:
52 {
53 unsigned char result = old;
54
55 __asm__ __volatile__(
56 "lock; cmpxchgb %2, %1"
57 : "+a"(result), "+m"(*__hp(addr))
58 : "q"((unsigned char)_new)
59 : "memory");
60 return result;
61 }
62 case 2:
63 {
64 unsigned short result = old;
65
66 __asm__ __volatile__(
67 "lock; cmpxchgw %2, %1"
68 : "+a"(result), "+m"(*__hp(addr))
69 : "r"((unsigned short)_new)
70 : "memory");
71 return result;
72 }
73 case 4:
74 {
75 unsigned int result = old;
76
77 __asm__ __volatile__(
78 "lock; cmpxchgl %2, %1"
79 : "+a"(result), "+m"(*__hp(addr))
80 : "r"((unsigned int)_new)
81 : "memory");
82 return result;
83 }
84#if (CAA_BITS_PER_LONG == 64)
85 case 8:
86 {
87 unsigned long result = old;
88
89 __asm__ __volatile__(
90 "lock; cmpxchgq %2, %1"
91 : "+a"(result), "+m"(*__hp(addr))
92 : "r"((unsigned long)_new)
93 : "memory");
94 return result;
95 }
96#endif
97 }
98 /* generate an illegal instruction. Cannot catch this with linker tricks
99 * when optimizations are disabled. */
100 __asm__ __volatile__("ud2");
101 return 0;
102}
103
104#define _uatomic_cmpxchg(addr, old, _new) \
105 ((__typeof__(*(addr))) __uatomic_cmpxchg((addr), (unsigned long)(old),\
106 (unsigned long)(_new), \
107 sizeof(*(addr))))
108
109/* xchg */
110
111static inline __attribute__((always_inline))
112unsigned long __uatomic_exchange(void *addr, unsigned long val, int len)
113{
114 /* Note: the "xchg" instruction does not need a "lock" prefix. */
115 switch (len) {
116 case 1:
117 {
118 unsigned char result;
119 __asm__ __volatile__(
120 "xchgb %0, %1"
121 : "=q"(result), "+m"(*__hp(addr))
122 : "0" ((unsigned char)val)
123 : "memory");
124 return result;
125 }
126 case 2:
127 {
128 unsigned short result;
129 __asm__ __volatile__(
130 "xchgw %0, %1"
131 : "=r"(result), "+m"(*__hp(addr))
132 : "0" ((unsigned short)val)
133 : "memory");
134 return result;
135 }
136 case 4:
137 {
138 unsigned int result;
139 __asm__ __volatile__(
140 "xchgl %0, %1"
141 : "=r"(result), "+m"(*__hp(addr))
142 : "0" ((unsigned int)val)
143 : "memory");
144 return result;
145 }
146#if (CAA_BITS_PER_LONG == 64)
147 case 8:
148 {
149 unsigned long result;
150 __asm__ __volatile__(
151 "xchgq %0, %1"
152 : "=r"(result), "+m"(*__hp(addr))
153 : "0" ((unsigned long)val)
154 : "memory");
155 return result;
156 }
157#endif
158 }
159 /* generate an illegal instruction. Cannot catch this with linker tricks
160 * when optimizations are disabled. */
161 __asm__ __volatile__("ud2");
162 return 0;
163}
164
165#define _uatomic_xchg(addr, v) \
166 ((__typeof__(*(addr))) __uatomic_exchange((addr), (unsigned long)(v), \
167 sizeof(*(addr))))
168
169/* uatomic_add_return */
170
171static inline __attribute__((always_inline))
172unsigned long __uatomic_add_return(void *addr, unsigned long val,
173 int len)
174{
175 switch (len) {
176 case 1:
177 {
178 unsigned char result = val;
179
180 __asm__ __volatile__(
181 "lock; xaddb %1, %0"
182 : "+m"(*__hp(addr)), "+q" (result)
183 :
184 : "memory");
185 return result + (unsigned char)val;
186 }
187 case 2:
188 {
189 unsigned short result = val;
190
191 __asm__ __volatile__(
192 "lock; xaddw %1, %0"
193 : "+m"(*__hp(addr)), "+r" (result)
194 :
195 : "memory");
196 return result + (unsigned short)val;
197 }
198 case 4:
199 {
200 unsigned int result = val;
201
202 __asm__ __volatile__(
203 "lock; xaddl %1, %0"
204 : "+m"(*__hp(addr)), "+r" (result)
205 :
206 : "memory");
207 return result + (unsigned int)val;
208 }
209#if (CAA_BITS_PER_LONG == 64)
210 case 8:
211 {
212 unsigned long result = val;
213
214 __asm__ __volatile__(
215 "lock; xaddq %1, %0"
216 : "+m"(*__hp(addr)), "+r" (result)
217 :
218 : "memory");
219 return result + (unsigned long)val;
220 }
221#endif
222 }
223 /* generate an illegal instruction. Cannot catch this with linker tricks
224 * when optimizations are disabled. */
225 __asm__ __volatile__("ud2");
226 return 0;
227}
228
229#define _uatomic_add_return(addr, v) \
230 ((__typeof__(*(addr))) __uatomic_add_return((addr), \
231 (unsigned long)(v), \
232 sizeof(*(addr))))
233
234/* uatomic_and */
235
236static inline __attribute__((always_inline))
237void __uatomic_and(void *addr, unsigned long val, int len)
238{
239 switch (len) {
240 case 1:
241 {
242 __asm__ __volatile__(
243 "lock; andb %1, %0"
244 : "=m"(*__hp(addr))
245 : "iq" ((unsigned char)val)
246 : "memory");
247 return;
248 }
249 case 2:
250 {
251 __asm__ __volatile__(
252 "lock; andw %1, %0"
253 : "=m"(*__hp(addr))
254 : "ir" ((unsigned short)val)
255 : "memory");
256 return;
257 }
258 case 4:
259 {
260 __asm__ __volatile__(
261 "lock; andl %1, %0"
262 : "=m"(*__hp(addr))
263 : "ir" ((unsigned int)val)
264 : "memory");
265 return;
266 }
267#if (CAA_BITS_PER_LONG == 64)
268 case 8:
269 {
270 __asm__ __volatile__(
271 "lock; andq %1, %0"
272 : "=m"(*__hp(addr))
273 : "er" ((unsigned long)val)
274 : "memory");
275 return;
276 }
277#endif
278 }
279 /* generate an illegal instruction. Cannot catch this with linker tricks
280 * when optimizations are disabled. */
281 __asm__ __volatile__("ud2");
282 return;
283}
284
285#define _uatomic_and(addr, v) \
286 (__uatomic_and((addr), (unsigned long)(v), sizeof(*(addr))))
287
288/* uatomic_or */
289
290static inline __attribute__((always_inline))
291void __uatomic_or(void *addr, unsigned long val, int len)
292{
293 switch (len) {
294 case 1:
295 {
296 __asm__ __volatile__(
297 "lock; orb %1, %0"
298 : "=m"(*__hp(addr))
299 : "iq" ((unsigned char)val)
300 : "memory");
301 return;
302 }
303 case 2:
304 {
305 __asm__ __volatile__(
306 "lock; orw %1, %0"
307 : "=m"(*__hp(addr))
308 : "ir" ((unsigned short)val)
309 : "memory");
310 return;
311 }
312 case 4:
313 {
314 __asm__ __volatile__(
315 "lock; orl %1, %0"
316 : "=m"(*__hp(addr))
317 : "ir" ((unsigned int)val)
318 : "memory");
319 return;
320 }
321#if (CAA_BITS_PER_LONG == 64)
322 case 8:
323 {
324 __asm__ __volatile__(
325 "lock; orq %1, %0"
326 : "=m"(*__hp(addr))
327 : "er" ((unsigned long)val)
328 : "memory");
329 return;
330 }
331#endif
332 }
333 /* generate an illegal instruction. Cannot catch this with linker tricks
334 * when optimizations are disabled. */
335 __asm__ __volatile__("ud2");
336 return;
337}
338
339#define _uatomic_or(addr, v) \
340 (__uatomic_or((addr), (unsigned long)(v), sizeof(*(addr))))
341
342/* uatomic_add */
343
344static inline __attribute__((always_inline))
345void __uatomic_add(void *addr, unsigned long val, int len)
346{
347 switch (len) {
348 case 1:
349 {
350 __asm__ __volatile__(
351 "lock; addb %1, %0"
352 : "=m"(*__hp(addr))
353 : "iq" ((unsigned char)val)
354 : "memory");
355 return;
356 }
357 case 2:
358 {
359 __asm__ __volatile__(
360 "lock; addw %1, %0"
361 : "=m"(*__hp(addr))
362 : "ir" ((unsigned short)val)
363 : "memory");
364 return;
365 }
366 case 4:
367 {
368 __asm__ __volatile__(
369 "lock; addl %1, %0"
370 : "=m"(*__hp(addr))
371 : "ir" ((unsigned int)val)
372 : "memory");
373 return;
374 }
375#if (CAA_BITS_PER_LONG == 64)
376 case 8:
377 {
378 __asm__ __volatile__(
379 "lock; addq %1, %0"
380 : "=m"(*__hp(addr))
381 : "er" ((unsigned long)val)
382 : "memory");
383 return;
384 }
385#endif
386 }
387 /* generate an illegal instruction. Cannot catch this with linker tricks
388 * when optimizations are disabled. */
389 __asm__ __volatile__("ud2");
390 return;
391}
392
393#define _uatomic_add(addr, v) \
394 (__uatomic_add((addr), (unsigned long)(v), sizeof(*(addr))))
395
396
397/* uatomic_inc */
398
399static inline __attribute__((always_inline))
400void __uatomic_inc(void *addr, int len)
401{
402 switch (len) {
403 case 1:
404 {
405 __asm__ __volatile__(
406 "lock; incb %0"
407 : "=m"(*__hp(addr))
408 :
409 : "memory");
410 return;
411 }
412 case 2:
413 {
414 __asm__ __volatile__(
415 "lock; incw %0"
416 : "=m"(*__hp(addr))
417 :
418 : "memory");
419 return;
420 }
421 case 4:
422 {
423 __asm__ __volatile__(
424 "lock; incl %0"
425 : "=m"(*__hp(addr))
426 :
427 : "memory");
428 return;
429 }
430#if (CAA_BITS_PER_LONG == 64)
431 case 8:
432 {
433 __asm__ __volatile__(
434 "lock; incq %0"
435 : "=m"(*__hp(addr))
436 :
437 : "memory");
438 return;
439 }
440#endif
441 }
442 /* generate an illegal instruction. Cannot catch this with linker tricks
443 * when optimizations are disabled. */
444 __asm__ __volatile__("ud2");
445 return;
446}
447
448#define _uatomic_inc(addr) (__uatomic_inc((addr), sizeof(*(addr))))
449
450/* uatomic_dec */
451
452static inline __attribute__((always_inline))
453void __uatomic_dec(void *addr, int len)
454{
455 switch (len) {
456 case 1:
457 {
458 __asm__ __volatile__(
459 "lock; decb %0"
460 : "=m"(*__hp(addr))
461 :
462 : "memory");
463 return;
464 }
465 case 2:
466 {
467 __asm__ __volatile__(
468 "lock; decw %0"
469 : "=m"(*__hp(addr))
470 :
471 : "memory");
472 return;
473 }
474 case 4:
475 {
476 __asm__ __volatile__(
477 "lock; decl %0"
478 : "=m"(*__hp(addr))
479 :
480 : "memory");
481 return;
482 }
483#if (CAA_BITS_PER_LONG == 64)
484 case 8:
485 {
486 __asm__ __volatile__(
487 "lock; decq %0"
488 : "=m"(*__hp(addr))
489 :
490 : "memory");
491 return;
492 }
493#endif
494 }
495 /* generate an illegal instruction. Cannot catch this with linker tricks
496 * when optimizations are disabled. */
497 __asm__ __volatile__("ud2");
498 return;
499}
500
501#define _uatomic_dec(addr) (__uatomic_dec((addr), sizeof(*(addr))))
502
503#if ((CAA_BITS_PER_LONG != 64) && defined(CONFIG_RCU_COMPAT_ARCH))
504extern int __rcu_cas_avail;
505extern int __rcu_cas_init(void);
506
507#define UATOMIC_COMPAT(insn) \
508 ((likely(__rcu_cas_avail > 0)) \
509 ? (_uatomic_##insn) \
510 : ((unlikely(__rcu_cas_avail < 0) \
511 ? ((__rcu_cas_init() > 0) \
512 ? (_uatomic_##insn) \
513 : (compat_uatomic_##insn)) \
514 : (compat_uatomic_##insn))))
515
516extern unsigned long _compat_uatomic_set(void *addr,
517 unsigned long _new, int len);
518#define compat_uatomic_set(addr, _new) \
519 ((__typeof__(*(addr))) _compat_uatomic_set((addr), \
520 (unsigned long)(_new), \
521 sizeof(*(addr))))
522
523
524extern unsigned long _compat_uatomic_xchg(void *addr,
525 unsigned long _new, int len);
526#define compat_uatomic_xchg(addr, _new) \
527 ((__typeof__(*(addr))) _compat_uatomic_xchg((addr), \
528 (unsigned long)(_new), \
529 sizeof(*(addr))))
530
531extern unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old,
532 unsigned long _new, int len);
533#define compat_uatomic_cmpxchg(addr, old, _new) \
534 ((__typeof__(*(addr))) _compat_uatomic_cmpxchg((addr), \
535 (unsigned long)(old), \
536 (unsigned long)(_new), \
537 sizeof(*(addr))))
538
539extern void _compat_uatomic_and(void *addr, unsigned long _new, int len);
540#define compat_uatomic_and(addr, v) \
541 (_compat_uatomic_and((addr), \
542 (unsigned long)(v), \
543 sizeof(*(addr))))
544
545extern void _compat_uatomic_or(void *addr, unsigned long _new, int len);
546#define compat_uatomic_or(addr, v) \
547 (_compat_uatomic_or((addr), \
548 (unsigned long)(v), \
549 sizeof(*(addr))))
550
551extern unsigned long _compat_uatomic_add_return(void *addr,
552 unsigned long _new, int len);
553#define compat_uatomic_add_return(addr, v) \
554 ((__typeof__(*(addr))) _compat_uatomic_add_return((addr), \
555 (unsigned long)(v), \
556 sizeof(*(addr))))
557
558#define compat_uatomic_add(addr, v) \
559 ((void)compat_uatomic_add_return((addr), (v)))
560#define compat_uatomic_inc(addr) \
561 (compat_uatomic_add((addr), 1))
562#define compat_uatomic_dec(addr) \
563 (compat_uatomic_add((addr), -1))
564
565#else
566#define UATOMIC_COMPAT(insn) (_uatomic_##insn)
567#endif
568
569/* Read is atomic even in compat mode */
570#define uatomic_set(addr, v) \
571 UATOMIC_COMPAT(set(addr, v))
572
573#define uatomic_cmpxchg(addr, old, _new) \
574 UATOMIC_COMPAT(cmpxchg(addr, old, _new))
575#define uatomic_xchg(addr, v) \
576 UATOMIC_COMPAT(xchg(addr, v))
577#define uatomic_and(addr, v) \
578 UATOMIC_COMPAT(and(addr, v))
579#define uatomic_or(addr, v) \
580 UATOMIC_COMPAT(or(addr, v))
581#define uatomic_add_return(addr, v) \
582 UATOMIC_COMPAT(add_return(addr, v))
583
584#define uatomic_add(addr, v) UATOMIC_COMPAT(add(addr, v))
585#define uatomic_inc(addr) UATOMIC_COMPAT(inc(addr))
586#define uatomic_dec(addr) UATOMIC_COMPAT(dec(addr))
587
588#ifdef __cplusplus
589}
590#endif
591
592#include <urcu/uatomic/generic.h>
593
594#endif /* _URCU_ARCH_UATOMIC_X86_H */
This page took 0.023859 seconds and 4 git commands to generate.