1 #ifndef _URCU_ARCH_UATOMIC_PPC_H
2 #define _URCU_ARCH_UATOMIC_PPC_H
5 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
6 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
7 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
8 * Copyright (c) 2009 Mathieu Desnoyers
10 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
11 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
13 * Permission is hereby granted to use or copy this program
14 * for any purpose, provided the above notices are retained on all copies.
15 * Permission to modify the code and to distribute modified code is granted,
16 * provided the above notices are retained, and a notice that the code was
17 * modified is included with the above copyright notice.
19 * Code inspired from libuatomic_ops-1.2, inherited in part from the
20 * Boehm-Demers-Weiser conservative garbage collector.
23 #include <urcu/compiler.h>
24 #include <urcu/system.h>
31 #define LWSYNC_OPCODE "sync\n"
33 #define LWSYNC_OPCODE "lwsync\n"
36 #define ILLEGAL_INSTR ".long 0xd00d00"
39 * Providing sequential consistency semantic with respect to other
40 * instructions for cmpxchg and add_return family of atomic primitives.
42 * This is achieved with:
43 * lwsync (prior loads can be reordered after following load)
46 * test if success (retry)
49 * Explanation of the sequential consistency provided by this scheme
50 * from Paul E. McKenney:
52 * The reason we can get away with the lwsync before is that if a prior
53 * store reorders with the lwarx, then you have to store to the atomic
54 * variable from some other CPU to detect it.
56 * And if you do that, the lwarx will lose its reservation, so the stwcx
57 * will fail. The atomic operation will retry, so that the caller won't be
58 * able to see the misordering.
63 static inline __attribute__((always_inline
))
64 unsigned long _uatomic_exchange(void *addr
, unsigned long val
, int len
)
73 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
74 "stwcx. %2,0,%1\n" /* else store conditional */
75 "bne- 1b\n" /* retry if lost reservation */
83 #if (CAA_BITS_PER_LONG == 64)
90 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
91 "stdcx. %2,0,%1\n" /* else store conditional */
92 "bne- 1b\n" /* retry if lost reservation */
102 /* generate an illegal instruction. Cannot catch this with linker tricks
103 * when optimizations are disabled. */
104 __asm__
__volatile__(ILLEGAL_INSTR
);
108 #define uatomic_xchg(addr, v) \
109 ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
113 static inline __attribute__((always_inline
))
114 unsigned long _uatomic_cmpxchg(void *addr
, unsigned long old
,
115 unsigned long _new
, int len
)
120 unsigned int old_val
;
122 __asm__
__volatile__(
124 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
125 "cmpw %0,%3\n" /* if load is not equal to */
126 "bne 2f\n" /* old, fail */
127 "stwcx. %2,0,%1\n" /* else store conditional */
128 "bne- 1b\n" /* retry if lost reservation */
132 : "r"(addr
), "r"((unsigned int)_new
),
133 "r"((unsigned int)old
)
138 #if (CAA_BITS_PER_LONG == 64)
141 unsigned long old_val
;
143 __asm__
__volatile__(
145 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
146 "cmpd %0,%3\n" /* if load is not equal to */
147 "bne 2f\n" /* old, fail */
148 "stdcx. %2,0,%1\n" /* else store conditional */
149 "bne- 1b\n" /* retry if lost reservation */
153 : "r"(addr
), "r"((unsigned long)_new
),
154 "r"((unsigned long)old
)
161 /* generate an illegal instruction. Cannot catch this with linker tricks
162 * when optimizations are disabled. */
163 __asm__
__volatile__(ILLEGAL_INSTR
);
168 #define uatomic_cmpxchg(addr, old, _new) \
169 ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
170 (unsigned long)(_new), \
173 /* uatomic_add_return */
175 static inline __attribute__((always_inline
))
176 unsigned long _uatomic_add_return(void *addr
, unsigned long val
,
184 __asm__
__volatile__(
186 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
187 "add %0,%2,%0\n" /* add val to value loaded */
188 "stwcx. %0,0,%1\n" /* store conditional */
189 "bne- 1b\n" /* retry if lost reservation */
192 : "r"(addr
), "r"(val
)
197 #if (CAA_BITS_PER_LONG == 64)
200 unsigned long result
;
202 __asm__
__volatile__(
204 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
205 "add %0,%2,%0\n" /* add val to value loaded */
206 "stdcx. %0,0,%1\n" /* store conditional */
207 "bne- 1b\n" /* retry if lost reservation */
210 : "r"(addr
), "r"(val
)
217 /* generate an illegal instruction. Cannot catch this with linker tricks
218 * when optimizations are disabled. */
219 __asm__
__volatile__(ILLEGAL_INSTR
);
224 #define uatomic_add_return(addr, v) \
225 ((__typeof__(*(addr))) _uatomic_add_return((addr), \
226 (unsigned long)(v), \
233 #include <urcu/uatomic/generic.h>
235 #endif /* _URCU_ARCH_UATOMIC_PPC_H */
This page took 0.034851 seconds and 5 git commands to generate.