fix: add missing SPDX licensing tags
[userspace-rcu.git] / include / urcu / uatomic / ppc.h
CommitLineData
d3d3857f
MJ
1// SPDX-FileCopyrightText: 1991-1994 by Xerox Corporation. All rights reserved.
2// SPDX-FileCopyrightText: 1996-1999 by Silicon Graphics. All rights reserved.
3// SPDX-FileCopyrightText: 1999-2004 Hewlett-Packard Development Company, L.P.
4// SPDX-FileCopyrightText: 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5//
6// SPDX-License-Identifier: LicenseRef-Boehm-GC
7
ec4e58a3
MD
8#ifndef _URCU_ARCH_UATOMIC_PPC_H
9#define _URCU_ARCH_UATOMIC_PPC_H
0114ba7f 10
67ecffc0 11/*
ec4e58a3 12 * Code inspired from libuatomic_ops-1.2, inherited in part from the
0114ba7f
MD
13 * Boehm-Demers-Weiser conservative garbage collector.
14 */
15
ec4e58a3 16#include <urcu/compiler.h>
b46b23cb 17#include <urcu/system.h>
1315d277 18
36bc70a8
MD
19#ifdef __cplusplus
20extern "C" {
67ecffc0 21#endif
36bc70a8 22
e7061ad2 23#define ILLEGAL_INSTR ".long 0xd00d00"
0114ba7f 24
0114ba7f 25/*
dabbe4f8
MD
26 * Providing sequential consistency semantic with respect to other
27 * instructions for cmpxchg and add_return family of atomic primitives.
28 *
29 * This is achieved with:
1a0ddffb 30 * lwsync (prior stores can be reordered after following loads)
dabbe4f8
MD
31 * lwarx
32 * stwcx.
33 * test if success (retry)
34 * sync
35 *
36 * Explanation of the sequential consistency provided by this scheme
37 * from Paul E. McKenney:
38 *
39 * The reason we can get away with the lwsync before is that if a prior
40 * store reorders with the lwarx, then you have to store to the atomic
41 * variable from some other CPU to detect it.
42 *
43 * And if you do that, the lwarx will lose its reservation, so the stwcx
44 * will fail. The atomic operation will retry, so that the caller won't be
45 * able to see the misordering.
0114ba7f
MD
46 */
47
f689dcbc
MD
48/* xchg */
49
da1c1635 50static inline __attribute__((always_inline))
ec4e58a3 51unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
0114ba7f 52{
f689dcbc
MD
53 switch (len) {
54 case 4:
55 {
56 unsigned int result;
57
58 __asm__ __volatile__(
701dd8de 59 LWSYNC_OPCODE
f689dcbc
MD
60 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
61 "stwcx. %2,0,%1\n" /* else store conditional */
62 "bne- 1b\n" /* retry if lost reservation */
dabbe4f8 63 "sync\n"
f689dcbc
MD
64 : "=&r"(result)
65 : "r"(addr), "r"(val)
66 : "memory", "cc");
67
68 return result;
69 }
b39e1761 70#if (CAA_BITS_PER_LONG == 64)
f689dcbc
MD
71 case 8:
72 {
73 unsigned long result;
74
75 __asm__ __volatile__(
701dd8de 76 LWSYNC_OPCODE
f689dcbc
MD
77 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
78 "stdcx. %2,0,%1\n" /* else store conditional */
79 "bne- 1b\n" /* retry if lost reservation */
dabbe4f8 80 "sync\n"
f689dcbc
MD
81 : "=&r"(result)
82 : "r"(addr), "r"(val)
83 : "memory", "cc");
84
85 return result;
86 }
87#endif
88 }
d0bbd9c2
MD
89 /*
90 * generate an illegal instruction. Cannot catch this with
91 * linker tricks when optimizations are disabled.
92 */
f689dcbc
MD
93 __asm__ __volatile__(ILLEGAL_INSTR);
94 return 0;
0114ba7f
MD
95}
96
ec4e58a3 97#define uatomic_xchg(addr, v) \
e56d99bf
MD
98 ((__typeof__(*(addr))) _uatomic_exchange((addr), \
99 caa_cast_long_keep_sign(v), \
da1c1635 100 sizeof(*(addr))))
f689dcbc 101/* cmpxchg */
0114ba7f 102
da1c1635 103static inline __attribute__((always_inline))
ec4e58a3 104unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
f689dcbc 105 unsigned long _new, int len)
0114ba7f 106{
f689dcbc
MD
107 switch (len) {
108 case 4:
109 {
110 unsigned int old_val;
111
112 __asm__ __volatile__(
701dd8de 113 LWSYNC_OPCODE
f689dcbc 114 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
0ecb3fde 115 "cmpw %0,%3\n" /* if load is not equal to */
f689dcbc
MD
116 "bne 2f\n" /* old, fail */
117 "stwcx. %2,0,%1\n" /* else store conditional */
118 "bne- 1b\n" /* retry if lost reservation */
dabbe4f8 119 "sync\n"
f689dcbc 120 "2:\n"
e72f4937 121 : "=&r"(old_val)
f689dcbc
MD
122 : "r"(addr), "r"((unsigned int)_new),
123 "r"((unsigned int)old)
124 : "memory", "cc");
125
126 return old_val;
127 }
b39e1761 128#if (CAA_BITS_PER_LONG == 64)
f689dcbc
MD
129 case 8:
130 {
131 unsigned long old_val;
132
133 __asm__ __volatile__(
701dd8de 134 LWSYNC_OPCODE
f689dcbc
MD
135 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
136 "cmpd %0,%3\n" /* if load is not equal to */
137 "bne 2f\n" /* old, fail */
138 "stdcx. %2,0,%1\n" /* else store conditional */
139 "bne- 1b\n" /* retry if lost reservation */
dabbe4f8 140 "sync\n"
f689dcbc 141 "2:\n"
b96b22e1 142 : "=&r"(old_val)
f689dcbc
MD
143 : "r"(addr), "r"((unsigned long)_new),
144 "r"((unsigned long)old)
145 : "memory", "cc");
146
147 return old_val;
148 }
149#endif
150 }
d0bbd9c2
MD
151 /*
152 * generate an illegal instruction. Cannot catch this with
153 * linker tricks when optimizations are disabled.
154 */
f689dcbc
MD
155 __asm__ __volatile__(ILLEGAL_INSTR);
156 return 0;
0114ba7f
MD
157}
158
da1c1635 159
e56d99bf
MD
160#define uatomic_cmpxchg(addr, old, _new) \
161 ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), \
162 caa_cast_long_keep_sign(old), \
163 caa_cast_long_keep_sign(_new),\
da1c1635 164 sizeof(*(addr))))
f689dcbc 165
ec4e58a3 166/* uatomic_add_return */
0114ba7f 167
da1c1635 168static inline __attribute__((always_inline))
ec4e58a3 169unsigned long _uatomic_add_return(void *addr, unsigned long val,
f689dcbc 170 int len)
0114ba7f
MD
171{
172 switch (len) {
f689dcbc
MD
173 case 4:
174 {
175 unsigned int result;
176
177 __asm__ __volatile__(
701dd8de 178 LWSYNC_OPCODE
f689dcbc
MD
179 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
180 "add %0,%2,%0\n" /* add val to value loaded */
181 "stwcx. %0,0,%1\n" /* store conditional */
182 "bne- 1b\n" /* retry if lost reservation */
dabbe4f8 183 "sync\n"
f689dcbc
MD
184 : "=&r"(result)
185 : "r"(addr), "r"(val)
186 : "memory", "cc");
187
188 return result;
189 }
b39e1761 190#if (CAA_BITS_PER_LONG == 64)
f689dcbc
MD
191 case 8:
192 {
193 unsigned long result;
194
195 __asm__ __volatile__(
701dd8de 196 LWSYNC_OPCODE
f689dcbc
MD
197 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
198 "add %0,%2,%0\n" /* add val to value loaded */
199 "stdcx. %0,0,%1\n" /* store conditional */
200 "bne- 1b\n" /* retry if lost reservation */
dabbe4f8 201 "sync\n"
f689dcbc
MD
202 : "=&r"(result)
203 : "r"(addr), "r"(val)
204 : "memory", "cc");
205
206 return result;
207 }
0114ba7f
MD
208#endif
209 }
d0bbd9c2
MD
210 /*
211 * generate an illegal instruction. Cannot catch this with
212 * linker tricks when optimizations are disabled.
213 */
0114ba7f
MD
214 __asm__ __volatile__(ILLEGAL_INSTR);
215 return 0;
216}
217
da1c1635 218
e56d99bf
MD
219#define uatomic_add_return(addr, v) \
220 ((__typeof__(*(addr))) _uatomic_add_return((addr), \
221 caa_cast_long_keep_sign(v), \
222 sizeof(*(addr))))
f689dcbc 223
67ecffc0 224#ifdef __cplusplus
36bc70a8
MD
225}
226#endif
227
a2e7bf9c 228#include <urcu/uatomic/generic.h>
8760d94e 229
ec4e58a3 230#endif /* _URCU_ARCH_UATOMIC_PPC_H */
This page took 0.062376 seconds and 4 git commands to generate.