1 #ifndef _ARCH_ATOMIC_PPC_H
2 #define _ARCH_ATOMIC_PPC_H
5 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
6 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
7 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
8 * Copyright (c) 2009 Mathieu Desnoyers
10 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
11 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
13 * Permission is hereby granted to use or copy this program
14 * for any purpose, provided the above notices are retained on all copies.
15 * Permission to modify the code and to distribute modified code is granted,
16 * provided the above notices are retained, and a notice that the code was
17 * modified is included with the above copyright notice.
19 * Code inspired from libatomic_ops-1.2, inherited in part from the
20 * Boehm-Demers-Weiser conservative garbage collector.
25 #ifndef __SIZEOF_LONG__
27 #define __SIZEOF_LONG__ 8
29 #define __SIZEOF_LONG__ 4
34 #define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
37 #define ILLEGAL_INSTR ".long 0xd00d00"
39 #ifndef _INCLUDE_API_H
41 #define atomic_set(addr, v) \
43 ACCESS_ONCE(*(addr)) = (v); \
46 #define atomic_read(addr) ACCESS_ONCE(*(addr))
49 * Using a isync as second barrier for exchange to provide acquire semantic.
50 * According to atomic_ops/sysdeps/gcc/powerpc.h, the documentation is "fairly
51 * explicit that this also has acquire semantics."
52 * Derived from AO_compare_and_swap(), but removed the comparison.
57 static inline __attribute__((always_inline
))
58 unsigned long _atomic_exchange(void *addr
, unsigned long val
, int len
)
67 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
68 "stwcx. %2,0,%1\n" /* else store conditional */
69 "bne- 1b\n" /* retry if lost reservation */
77 #if (BITS_PER_LONG == 64)
84 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
85 "stdcx. %2,0,%1\n" /* else store conditional */
86 "bne- 1b\n" /* retry if lost reservation */
96 /* generate an illegal instruction. Cannot catch this with linker tricks
97 * when optimizations are disabled. */
98 __asm__
__volatile__(ILLEGAL_INSTR
);
102 #define xchg(addr, v) \
103 ((__typeof__(*(addr))) _atomic_exchange((addr), (unsigned long)(v), \
107 static inline __attribute__((always_inline
))
108 unsigned long _atomic_cmpxchg(void *addr
, unsigned long old
,
109 unsigned long _new
, int len
)
114 unsigned int old_val
;
116 __asm__
__volatile__(
118 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
119 "cmpd %0,%3\n" /* if load is not equal to */
120 "bne 2f\n" /* old, fail */
121 "stwcx. %2,0,%1\n" /* else store conditional */
122 "bne- 1b\n" /* retry if lost reservation */
126 : "r"(addr
), "r"((unsigned int)_new
),
127 "r"((unsigned int)old
)
132 #if (BITS_PER_LONG == 64)
135 unsigned long old_val
;
137 __asm__
__volatile__(
139 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
140 "cmpd %0,%3\n" /* if load is not equal to */
141 "bne 2f\n" /* old, fail */
142 "stdcx. %2,0,%1\n" /* else store conditional */
143 "bne- 1b\n" /* retry if lost reservation */
147 : "r"(addr
), "r"((unsigned long)_new
),
148 "r"((unsigned long)old
)
155 /* generate an illegal instruction. Cannot catch this with linker tricks
156 * when optimizations are disabled. */
157 __asm__
__volatile__(ILLEGAL_INSTR
);
162 #define cmpxchg(addr, old, _new) \
163 ((__typeof__(*(addr))) _atomic_cmpxchg((addr), (unsigned long)(old),\
164 (unsigned long)(_new), \
167 /* atomic_add_return */
169 static inline __attribute__((always_inline
))
170 unsigned long _atomic_add_return(void *addr
, unsigned long val
,
178 __asm__
__volatile__(
180 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
181 "add %0,%2,%0\n" /* add val to value loaded */
182 "stwcx. %0,0,%1\n" /* store conditional */
183 "bne- 1b\n" /* retry if lost reservation */
186 : "r"(addr
), "r"(val
)
191 #if (BITS_PER_LONG == 64)
194 unsigned long result
;
196 __asm__
__volatile__(
198 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
199 "add %0,%2,%0\n" /* add val to value loaded */
200 "stdcx. %0,0,%1\n" /* store conditional */
201 "bne- 1b\n" /* retry if lost reservation */
204 : "r"(addr
), "r"(val
)
211 /* generate an illegal instruction. Cannot catch this with linker tricks
212 * when optimizations are disabled. */
213 __asm__
__volatile__(ILLEGAL_INSTR
);
218 #define atomic_add_return(addr, v) \
219 ((__typeof__(*(addr))) _atomic_add_return((addr), \
220 (unsigned long)(v), \
223 /* atomic_sub_return, atomic_add, atomic_sub, atomic_inc, atomic_dec */
225 #define atomic_sub_return(addr, v) atomic_add_return((addr), -(v))
227 #define atomic_add(addr, v) (void)atomic_add_return((addr), (v))
228 #define atomic_sub(addr, v) (void)atomic_sub_return((addr), (v))
230 #define atomic_inc(addr) atomic_add((addr), 1)
231 #define atomic_dec(addr) atomic_add((addr), -1)
233 #endif /* #ifndef _INCLUDE_API_H */
235 #endif /* ARCH_ATOMIC_PPC_H */
This page took 0.043874 seconds and 4 git commands to generate.