1 #ifndef _ARCH_ATOMIC_PPC_H
2 #define _ARCH_ATOMIC_PPC_H
5 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
6 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
7 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
8 * Copyright (c) 2009 Mathieu Desnoyers
10 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
11 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
13 * Permission is hereby granted to use or copy this program
14 * for any purpose, provided the above notices are retained on all copies.
15 * Permission to modify the code and to distribute modified code is granted,
16 * provided the above notices are retained, and a notice that the code was
17 * modified is included with the above copyright notice.
19 * Code inspired from libatomic_ops-1.2, inherited in part from the
20 * Boehm-Demers-Weiser conservative garbage collector.
23 #ifndef __SIZEOF_LONG__
25 #define __SIZEOF_LONG__ 8
27 #define __SIZEOF_LONG__ 4
32 #define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
35 #define ILLEGAL_INSTR ".long 0xd00d00"
37 #ifndef _INCLUDE_API_H
39 #define atomic_set(addr, v) \
41 ACCESS_ONCE(*(addr)) = (v); \
44 #define atomic_read(addr) ACCESS_ONCE(*(addr))
47 * Using a isync as second barrier for exchange to provide acquire semantic.
48 * According to atomic_ops/sysdeps/gcc/powerpc.h, the documentation is "fairly
49 * explicit that this also has acquire semantics."
50 * Derived from AO_compare_and_swap(), but removed the comparison.
55 static __attribute__((always_inline
))
56 unsigned long _atomic_exchange(volatile void *addr
, unsigned long val
, int len
)
65 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
66 "stwcx. %2,0,%1\n" /* else store conditional */
67 "bne- 1b\n" /* retry if lost reservation */
75 #if (BITS_PER_LONG == 64)
82 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
83 "stdcx. %2,0,%1\n" /* else store conditional */
84 "bne- 1b\n" /* retry if lost reservation */
94 /* generate an illegal instruction. Cannot catch this with linker tricks
95 * when optimizations are disabled. */
96 __asm__
__volatile__(ILLEGAL_INSTR
);
100 #define xchg(addr, v) (__typeof__(*(addr))) _atomic_exchange((addr), (v), \
105 static __attribute__((always_inline
))
106 unsigned long _atomic_cmpxchg(volatile void *addr
, unsigned long old
,
107 unsigned long _new
, int len
)
112 unsigned int old_val
;
114 __asm__
__volatile__(
116 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
117 "cmpd %0,%3\n" /* if load is not equal to */
118 "bne 2f\n" /* old, fail */
119 "stwcx. %2,0,%1\n" /* else store conditional */
120 "bne- 1b\n" /* retry if lost reservation */
124 : "r"(addr
), "r"((unsigned int)_new
),
125 "r"((unsigned int)old
)
130 #if (BITS_PER_LONG == 64)
133 unsigned long old_val
;
135 __asm__
__volatile__(
137 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
138 "cmpd %0,%3\n" /* if load is not equal to */
139 "bne 2f\n" /* old, fail */
140 "stdcx. %2,0,%1\n" /* else store conditional */
141 "bne- 1b\n" /* retry if lost reservation */
145 : "r"(addr
), "r"((unsigned long)_new
),
146 "r"((unsigned long)old
)
153 /* generate an illegal instruction. Cannot catch this with linker tricks
154 * when optimizations are disabled. */
155 __asm__
__volatile__(ILLEGAL_INSTR
);
159 #define cmpxchg(addr, old, _new) \
160 (__typeof__(*(addr))) _atomic_cmpxchg((addr), (old), (_new), \
163 /* atomic_add_return */
165 static __attribute__((always_inline
))
166 unsigned long _atomic_add_return(volatile void *addr
, unsigned long val
,
174 __asm__
__volatile__(
176 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
177 "add %0,%2,%0\n" /* add val to value loaded */
178 "stwcx. %0,0,%1\n" /* store conditional */
179 "bne- 1b\n" /* retry if lost reservation */
182 : "r"(addr
), "r"(val
)
187 #if (BITS_PER_LONG == 64)
190 unsigned long result
;
192 __asm__
__volatile__(
194 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
195 "add %0,%2,%0\n" /* add val to value loaded */
196 "stdcx. %0,0,%1\n" /* store conditional */
197 "bne- 1b\n" /* retry if lost reservation */
200 : "r"(addr
), "r"(val
)
207 /* generate an illegal instruction. Cannot catch this with linker tricks
208 * when optimizations are disabled. */
209 __asm__
__volatile__(ILLEGAL_INSTR
);
213 #define atomic_add_return(addr, v) \
214 (__typeof__(*(addr))) _atomic_add((addr), (v), sizeof(*(addr)))
216 /* atomic_sub_return, atomic_add, atomic_sub, atomic_inc, atomic_dec */
218 #define atomic_sub_return(addr, v) atomic_add_return((addr), -(v))
220 #define atomic_add(addr, v) (void)atomic_add_return((addr), (v))
221 #define atomic_sub(addr, v) (void)atomic_sub_return((addr), (v))
223 #define atomic_inc(addr, v) atomic_add((addr), 1)
224 #define atomic_dec(addr, v) atomic_add((addr), -1)
226 #endif /* #ifndef _INCLUDE_API_H */
228 #endif /* ARCH_ATOMIC_PPC_H */
This page took 0.038637 seconds and 5 git commands to generate.