a762a5ce8b366b74bbff3333607f776c0d18fbcb
1 #ifndef _ARCH_ATOMIC_X86_H
2 #define _ARCH_ATOMIC_X86_H
5 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
6 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
7 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
8 * Copyright (c) 2009 Mathieu Desnoyers
10 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
11 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
13 * Permission is hereby granted to use or copy this program
14 * for any purpose, provided the above notices are retained on all copies.
15 * Permission to modify the code and to distribute modified code is granted,
16 * provided the above notices are retained, and a notice that the code was
17 * modified is included with the above copyright notice.
19 * Code inspired from libatomic_ops-1.2, inherited in part from the
20 * Boehm-Demers-Weiser conservative garbage collector.
24 #define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
27 #ifndef _INCLUDE_API_H
30 * Derived from AO_compare_and_swap() and AO_test_and_set_full().
33 struct __atomic_dummy
{
36 #define __hp(x) ((struct __atomic_dummy *)(x))
40 static inline __attribute__((always_inline
))
41 unsigned long _atomic_cmpxchg(volatile void *addr
, unsigned long old
,
42 unsigned long _new
, int len
)
47 unsigned char result
= old
;
49 "lock; cmpxchgb %2, %1"
50 : "+a"(result
), "+m"(*__hp(addr
))
51 : "q"((unsigned char)_new
)
57 unsigned short result
= old
;
59 "lock; cmpxchgw %2, %1"
60 : "+a"(result
), "+m"(*__hp(addr
))
61 : "r"((unsigned short)_new
)
67 unsigned int result
= old
;
69 "lock; cmpxchgl %2, %1"
70 : "+a"(result
), "+m"(*__hp(addr
))
71 : "r"((unsigned int)_new
)
75 #if (BITS_PER_LONG == 64)
78 unsigned int result
= old
;
80 "lock; cmpxchgl %2, %1"
81 : "+a"(result
), "+m"(*__hp(addr
))
82 : "r"((unsigned long)_new
)
88 /* generate an illegal instruction. Cannot catch this with linker tricks
89 * when optimizations are disabled. */
90 __asm__
__volatile__("ud2");
94 #define cmpxchg(addr, old, _new) \
95 ((__typeof__(*(addr))) _atomic_cmpxchg((addr), (unsigned long)(old),\
96 (unsigned long)(_new), \
101 static inline __attribute__((always_inline
))
102 unsigned long _atomic_exchange(volatile void *addr
, unsigned long val
, int len
)
104 /* Note: the "xchg" instruction does not need a "lock" prefix. */
108 unsigned char result
;
109 __asm__
__volatile__(
111 : "=q"(result
), "+m"(*__hp(addr
))
112 : "0" ((unsigned char)val
)
118 unsigned short result
;
119 __asm__
__volatile__(
121 : "=r"(result
), "+m"(*__hp(addr
))
122 : "0" ((unsigned short)val
)
129 __asm__
__volatile__(
131 : "=r"(result
), "+m"(*__hp(addr
))
132 : "0" ((unsigned int)val
)
136 #if (BITS_PER_LONG == 64)
139 unsigned long result
;
140 __asm__
__volatile__(
142 : "=r"(result
), "+m"(*__hp(addr
))
143 : "0" ((unsigned long)val
)
149 /* generate an illegal instruction. Cannot catch this with linker tricks
150 * when optimizations are disabled. */
151 __asm__
__volatile__("ud2");
155 #define xchg(addr, v) \
156 ((__typeof__(*(addr))) _atomic_exchange((addr), (unsigned long)(v), \
161 static inline __attribute__((always_inline
))
162 unsigned long _atomic_add(volatile void *addr
, unsigned long val
, int len
)
167 __asm__
__volatile__(
170 : "q" ((unsigned char)val
));
175 __asm__
__volatile__(
178 : "r" ((unsigned short)val
));
183 __asm__
__volatile__(
186 : "r" ((unsigned int)val
));
189 #if (BITS_PER_LONG == 64)
192 __asm__
__volatile__(
195 : "r" ((unsigned long)val
));
200 /* generate an illegal instruction. Cannot catch this with linker tricks
201 * when optimizations are disabled. */
202 __asm__
__volatile__("ud2");
206 #define atomic_add(addr, v) \
207 (_atomic_add((addr), (unsigned long)(v), sizeof(*(addr))))
209 #endif /* #ifndef _INCLUDE_API_H */
211 #endif /* ARCH_ATOMIC_X86_H */
This page took 0.048583 seconds and 4 git commands to generate.