1 #ifndef _URCU_ARCH_UATOMIC_X86_H
2 #define _URCU_ARCH_UATOMIC_X86_H
5 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
6 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
7 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
8 * Copyright (c) 2009 Mathieu Desnoyers
10 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
11 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
13 * Permission is hereby granted to use or copy this program
14 * for any purpose, provided the above notices are retained on all copies.
15 * Permission to modify the code and to distribute modified code is granted,
16 * provided the above notices are retained, and a notice that the code was
17 * modified is included with the above copyright notice.
19 * Code inspired from libuatomic_ops-1.2, inherited in part from the
20 * Boehm-Demers-Weiser conservative garbage collector.
23 #include <urcu/config.h>
24 #include <urcu/compiler.h>
25 #include <urcu/system.h>
27 #define UATOMIC_HAS_ATOMIC_BYTE
28 #define UATOMIC_HAS_ATOMIC_SHORT
35 * Derived from AO_compare_and_swap() and AO_test_and_set_full().
39 * The __hp() macro casts the void pointer "x" to a pointer to a structure
40 * containing an array of char of the specified size. This allows passing the
41 * @addr arguments of the following inline functions as "m" and "+m" operands
45 #define __hp(size, x) ((struct { char v[size]; } *)(x))
47 #define _uatomic_set(addr, v) ((void) CMM_STORE_SHARED(*(addr), (v)))
51 static inline __attribute__((always_inline
))
52 unsigned long __uatomic_cmpxchg(void *addr
, unsigned long old
,
53 unsigned long _new
, int len
)
58 unsigned char result
= old
;
61 "lock; cmpxchgb %2, %1"
62 : "+a"(result
), "+m"(*__hp(len
, addr
))
63 : "q"((unsigned char)_new
)
69 unsigned short result
= old
;
72 "lock; cmpxchgw %2, %1"
73 : "+a"(result
), "+m"(*__hp(len
, addr
))
74 : "r"((unsigned short)_new
)
80 unsigned int result
= old
;
83 "lock; cmpxchgl %2, %1"
84 : "+a"(result
), "+m"(*__hp(len
, addr
))
85 : "r"((unsigned int)_new
)
89 #if (CAA_BITS_PER_LONG == 64)
92 unsigned long result
= old
;
95 "lock; cmpxchgq %2, %1"
96 : "+a"(result
), "+m"(*__hp(len
, addr
))
97 : "r"((unsigned long)_new
)
104 * generate an illegal instruction. Cannot catch this with
105 * linker tricks when optimizations are disabled.
107 __asm__
__volatile__("ud2");
111 #define _uatomic_cmpxchg(addr, old, _new) \
112 ((__typeof__(*(addr))) __uatomic_cmpxchg((addr), \
113 caa_cast_long_keep_sign(old), \
114 caa_cast_long_keep_sign(_new),\
119 static inline __attribute__((always_inline
))
120 unsigned long __uatomic_exchange(void *addr
, unsigned long val
, int len
)
122 /* Note: the "xchg" instruction does not need a "lock" prefix. */
126 unsigned char result
;
127 __asm__
__volatile__(
129 : "=q"(result
), "+m"(*__hp(len
, addr
))
130 : "0" ((unsigned char)val
)
136 unsigned short result
;
137 __asm__
__volatile__(
139 : "=r"(result
), "+m"(*__hp(len
, addr
))
140 : "0" ((unsigned short)val
)
147 __asm__
__volatile__(
149 : "=r"(result
), "+m"(*__hp(len
, addr
))
150 : "0" ((unsigned int)val
)
154 #if (CAA_BITS_PER_LONG == 64)
157 unsigned long result
;
158 __asm__
__volatile__(
160 : "=r"(result
), "+m"(*__hp(len
, addr
))
161 : "0" ((unsigned long)val
)
168 * generate an illegal instruction. Cannot catch this with
169 * linker tricks when optimizations are disabled.
171 __asm__
__volatile__("ud2");
175 #define _uatomic_xchg(addr, v) \
176 ((__typeof__(*(addr))) __uatomic_exchange((addr), \
177 caa_cast_long_keep_sign(v), \
180 /* uatomic_add_return */
182 static inline __attribute__((always_inline
))
183 unsigned long __uatomic_add_return(void *addr
, unsigned long val
,
189 unsigned char result
= val
;
191 __asm__
__volatile__(
193 : "+m"(*__hp(len
, addr
)), "+q" (result
)
196 return result
+ (unsigned char)val
;
200 unsigned short result
= val
;
202 __asm__
__volatile__(
204 : "+m"(*__hp(len
, addr
)), "+r" (result
)
207 return result
+ (unsigned short)val
;
211 unsigned int result
= val
;
213 __asm__
__volatile__(
215 : "+m"(*__hp(len
, addr
)), "+r" (result
)
218 return result
+ (unsigned int)val
;
220 #if (CAA_BITS_PER_LONG == 64)
223 unsigned long result
= val
;
225 __asm__
__volatile__(
227 : "+m"(*__hp(len
, addr
)), "+r" (result
)
230 return result
+ (unsigned long)val
;
235 * generate an illegal instruction. Cannot catch this with
236 * linker tricks when optimizations are disabled.
238 __asm__
__volatile__("ud2");
242 #define _uatomic_add_return(addr, v) \
243 ((__typeof__(*(addr))) __uatomic_add_return((addr), \
244 caa_cast_long_keep_sign(v), \
249 static inline __attribute__((always_inline
))
250 void __uatomic_and(void *addr
, unsigned long val
, int len
)
255 __asm__
__volatile__(
257 : "=m"(*__hp(len
, addr
))
258 : "iq" ((unsigned char)val
)
264 __asm__
__volatile__(
266 : "=m"(*__hp(len
, addr
))
267 : "ir" ((unsigned short)val
)
273 __asm__
__volatile__(
275 : "=m"(*__hp(len
, addr
))
276 : "ir" ((unsigned int)val
)
280 #if (CAA_BITS_PER_LONG == 64)
283 __asm__
__volatile__(
285 : "=m"(*__hp(len
, addr
))
286 : "er" ((unsigned long)val
)
293 * generate an illegal instruction. Cannot catch this with
294 * linker tricks when optimizations are disabled.
296 __asm__
__volatile__("ud2");
300 #define _uatomic_and(addr, v) \
301 (__uatomic_and((addr), caa_cast_long_keep_sign(v), sizeof(*(addr))))
305 static inline __attribute__((always_inline
))
306 void __uatomic_or(void *addr
, unsigned long val
, int len
)
311 __asm__
__volatile__(
313 : "=m"(*__hp(len
, addr
))
314 : "iq" ((unsigned char)val
)
320 __asm__
__volatile__(
322 : "=m"(*__hp(len
, addr
))
323 : "ir" ((unsigned short)val
)
329 __asm__
__volatile__(
331 : "=m"(*__hp(len
, addr
))
332 : "ir" ((unsigned int)val
)
336 #if (CAA_BITS_PER_LONG == 64)
339 __asm__
__volatile__(
341 : "=m"(*__hp(len
, addr
))
342 : "er" ((unsigned long)val
)
349 * generate an illegal instruction. Cannot catch this with
350 * linker tricks when optimizations are disabled.
352 __asm__
__volatile__("ud2");
356 #define _uatomic_or(addr, v) \
357 (__uatomic_or((addr), caa_cast_long_keep_sign(v), sizeof(*(addr))))
361 static inline __attribute__((always_inline
))
362 void __uatomic_add(void *addr
, unsigned long val
, int len
)
367 __asm__
__volatile__(
369 : "=m"(*__hp(len
, addr
))
370 : "iq" ((unsigned char)val
)
376 __asm__
__volatile__(
378 : "=m"(*__hp(len
, addr
))
379 : "ir" ((unsigned short)val
)
385 __asm__
__volatile__(
387 : "=m"(*__hp(len
, addr
))
388 : "ir" ((unsigned int)val
)
392 #if (CAA_BITS_PER_LONG == 64)
395 __asm__
__volatile__(
397 : "=m"(*__hp(len
, addr
))
398 : "er" ((unsigned long)val
)
405 * generate an illegal instruction. Cannot catch this with
406 * linker tricks when optimizations are disabled.
408 __asm__
__volatile__("ud2");
412 #define _uatomic_add(addr, v) \
413 (__uatomic_add((addr), caa_cast_long_keep_sign(v), sizeof(*(addr))))
418 static inline __attribute__((always_inline
))
419 void __uatomic_inc(void *addr
, int len
)
424 __asm__
__volatile__(
426 : "=m"(*__hp(len
, addr
))
433 __asm__
__volatile__(
435 : "=m"(*__hp(len
, addr
))
442 __asm__
__volatile__(
444 : "=m"(*__hp(len
, addr
))
449 #if (CAA_BITS_PER_LONG == 64)
452 __asm__
__volatile__(
454 : "=m"(*__hp(len
, addr
))
461 /* generate an illegal instruction. Cannot catch this with linker tricks
462 * when optimizations are disabled. */
463 __asm__
__volatile__("ud2");
467 #define _uatomic_inc(addr) (__uatomic_inc((addr), sizeof(*(addr))))
471 static inline __attribute__((always_inline
))
472 void __uatomic_dec(void *addr
, int len
)
477 __asm__
__volatile__(
479 : "=m"(*__hp(len
, addr
))
486 __asm__
__volatile__(
488 : "=m"(*__hp(len
, addr
))
495 __asm__
__volatile__(
497 : "=m"(*__hp(len
, addr
))
502 #if (CAA_BITS_PER_LONG == 64)
505 __asm__
__volatile__(
507 : "=m"(*__hp(len
, addr
))
515 * generate an illegal instruction. Cannot catch this with
516 * linker tricks when optimizations are disabled.
518 __asm__
__volatile__("ud2");
522 #define _uatomic_dec(addr) (__uatomic_dec((addr), sizeof(*(addr))))
524 #if ((CAA_BITS_PER_LONG != 64) && defined(CONFIG_RCU_COMPAT_ARCH))
525 extern int __rcu_cas_avail
;
526 extern int __rcu_cas_init(void);
528 #define UATOMIC_COMPAT(insn) \
529 ((caa_likely(__rcu_cas_avail > 0)) \
530 ? (_uatomic_##insn) \
531 : ((caa_unlikely(__rcu_cas_avail < 0) \
532 ? ((__rcu_cas_init() > 0) \
533 ? (_uatomic_##insn) \
534 : (compat_uatomic_##insn)) \
535 : (compat_uatomic_##insn))))
538 * We leave the return value so we don't break the ABI, but remove the
539 * return value from the API.
541 extern unsigned long _compat_uatomic_set(void *addr
,
542 unsigned long _new
, int len
);
543 #define compat_uatomic_set(addr, _new) \
544 ((void) _compat_uatomic_set((addr), \
545 caa_cast_long_keep_sign(_new), \
549 extern unsigned long _compat_uatomic_xchg(void *addr
,
550 unsigned long _new
, int len
);
551 #define compat_uatomic_xchg(addr, _new) \
552 ((__typeof__(*(addr))) _compat_uatomic_xchg((addr), \
553 caa_cast_long_keep_sign(_new), \
556 extern unsigned long _compat_uatomic_cmpxchg(void *addr
, unsigned long old
,
557 unsigned long _new
, int len
);
558 #define compat_uatomic_cmpxchg(addr, old, _new) \
559 ((__typeof__(*(addr))) _compat_uatomic_cmpxchg((addr), \
560 caa_cast_long_keep_sign(old), \
561 caa_cast_long_keep_sign(_new), \
564 extern void _compat_uatomic_and(void *addr
, unsigned long _new
, int len
);
565 #define compat_uatomic_and(addr, v) \
566 (_compat_uatomic_and((addr), \
567 caa_cast_long_keep_sign(v), \
570 extern void _compat_uatomic_or(void *addr
, unsigned long _new
, int len
);
571 #define compat_uatomic_or(addr, v) \
572 (_compat_uatomic_or((addr), \
573 caa_cast_long_keep_sign(v), \
576 extern unsigned long _compat_uatomic_add_return(void *addr
,
577 unsigned long _new
, int len
);
578 #define compat_uatomic_add_return(addr, v) \
579 ((__typeof__(*(addr))) _compat_uatomic_add_return((addr), \
580 caa_cast_long_keep_sign(v), \
583 #define compat_uatomic_add(addr, v) \
584 ((void)compat_uatomic_add_return((addr), (v)))
585 #define compat_uatomic_inc(addr) \
586 (compat_uatomic_add((addr), 1))
587 #define compat_uatomic_dec(addr) \
588 (compat_uatomic_add((addr), -1))
591 #define UATOMIC_COMPAT(insn) (_uatomic_##insn)
594 /* Read is atomic even in compat mode */
595 #define uatomic_set(addr, v) \
596 UATOMIC_COMPAT(set(addr, v))
598 #define uatomic_cmpxchg(addr, old, _new) \
599 UATOMIC_COMPAT(cmpxchg(addr, old, _new))
600 #define uatomic_xchg(addr, v) \
601 UATOMIC_COMPAT(xchg(addr, v))
603 #define uatomic_and(addr, v) \
604 UATOMIC_COMPAT(and(addr, v))
605 #define cmm_smp_mb__before_uatomic_and() cmm_barrier()
606 #define cmm_smp_mb__after_uatomic_and() cmm_barrier()
608 #define uatomic_or(addr, v) \
609 UATOMIC_COMPAT(or(addr, v))
610 #define cmm_smp_mb__before_uatomic_or() cmm_barrier()
611 #define cmm_smp_mb__after_uatomic_or() cmm_barrier()
613 #define uatomic_add_return(addr, v) \
614 UATOMIC_COMPAT(add_return(addr, v))
616 #define uatomic_add(addr, v) UATOMIC_COMPAT(add(addr, v))
617 #define cmm_smp_mb__before_uatomic_add() cmm_barrier()
618 #define cmm_smp_mb__after_uatomic_add() cmm_barrier()
620 #define uatomic_inc(addr) UATOMIC_COMPAT(inc(addr))
621 #define cmm_smp_mb__before_uatomic_inc() cmm_barrier()
622 #define cmm_smp_mb__after_uatomic_inc() cmm_barrier()
624 #define uatomic_dec(addr) UATOMIC_COMPAT(dec(addr))
625 #define cmm_smp_mb__before_uatomic_dec() cmm_barrier()
626 #define cmm_smp_mb__after_uatomic_dec() cmm_barrier()
632 #include <urcu/uatomic/generic.h>
634 #endif /* _URCU_ARCH_UATOMIC_X86_H */
This page took 0.046531 seconds and 4 git commands to generate.