Support sparcv9 32-bit build
[urcu.git] / urcu / uatomic_arch_sparc64.h
CommitLineData
58de5a4b
MD
1#ifndef _URCU_ARCH_UATOMIC_SPARC64_H
2#define _URCU_ARCH_UATOMIC_SPARC64_H
3
4/*
5 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
6 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
7 * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
8 * Copyright (c) 2009 Mathieu Desnoyers
9 *
10 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
11 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
12 *
13 * Permission is hereby granted to use or copy this program
14 * for any purpose, provided the above notices are retained on all copies.
15 * Permission to modify the code and to distribute modified code is granted,
16 * provided the above notices are retained, and a notice that the code was
17 * modified is included with the above copyright notice.
18 *
19 * Code inspired from libuatomic_ops-1.2, inherited in part from the
20 * Boehm-Demers-Weiser conservative garbage collector.
21 */
22
23#include <urcu/compiler.h>
24#include <urcu/system.h>
25
26#ifndef __SIZEOF_LONG__
795d506a 27#ifdef __LP64__
58de5a4b
MD
28#define __SIZEOF_LONG__ 8
29#else
30#define __SIZEOF_LONG__ 4
31#endif
32#endif
33
34#ifndef BITS_PER_LONG
35#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
36#endif
37
38#define uatomic_set(addr, v) STORE_SHARED(*(addr), (v))
39#define uatomic_read(addr) LOAD_SHARED(*(addr))
40
41/* cmpxchg */
42
43static inline __attribute__((always_inline))
44unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
45 unsigned long _new, int len)
46{
47 switch (len) {
48 case 4:
49 {
50 __asm__ __volatile__ (
51 "membar #StoreLoad | #LoadLoad\n\t"
52 "cas [%1],%2,%0\n\t"
53 "membar #StoreLoad | #StoreStore\n\t"
54 : "+&r" (_new)
55 : "r" (addr), "r" (old)
56 : "memory");
57
58 return _new;
59 }
60#if (BITS_PER_LONG == 64)
61 case 8:
62 {
63 __asm__ __volatile__ (
64 "membar #StoreLoad | #LoadLoad\n\t"
65 "casx [%1],%2,%0\n\t"
66 "membar #StoreLoad | #StoreStore\n\t"
67 : "+&r" (_new)
68 : "r" (addr), "r" (old)
69 : "memory");
70
71 return _new;
72 }
73#endif
74 }
75 __builtin_trap();
76 return 0;
77}
78
79
80#define uatomic_cmpxchg(addr, old, _new) \
81 ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
82 (unsigned long)(_new), \
83 sizeof(*(addr))))
84
85/* xchg */
86
87static inline __attribute__((always_inline))
88unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
89{
90 switch (len) {
91 case 4:
92 {
c0a68bfa 93 unsigned int old, oldt;
58de5a4b 94
c0a68bfa 95 oldt = uatomic_read((unsigned int *)addr);
58de5a4b
MD
96 do {
97 old = oldt;
98 oldt = _uatomic_cmpxchg(addr, old, val, 4);
99 } while (oldt != old);
100
101 return old;
102 }
103#if (BITS_PER_LONG == 64)
104 case 8:
105 {
c0a68bfa 106 unsigned long old, oldt;
58de5a4b 107
c0a68bfa 108 oldt = uatomic_read((unsigned long *)addr);
58de5a4b
MD
109 do {
110 old = oldt;
111 oldt = _uatomic_cmpxchg(addr, old, val, 8);
112 } while (oldt != old);
113
114 return old;
115 }
116#endif
117 }
118 __builtin_trap();
119 return 0;
120}
121
122#define uatomic_xchg(addr, v) \
123 ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
124 sizeof(*(addr))))
125
126/* uatomic_add_return */
127
128static inline __attribute__((always_inline))
129unsigned long _uatomic_add_return(void *addr, unsigned long val,
130 int len)
131{
132 switch (len) {
133 case 4:
134 {
c0a68bfa 135 unsigned int old, oldt;
58de5a4b 136
c0a68bfa 137 oldt = uatomic_read((unsigned int *)addr);
58de5a4b
MD
138 do {
139 old = oldt;
140 oldt = _uatomic_cmpxchg(addr, old, old + val, 4);
141 } while (oldt != old);
142
143 return old + val;
144 }
145#if (BITS_PER_LONG == 64)
146 case 8:
147 {
c0a68bfa 148 unsigned long old, oldt;
58de5a4b 149
c0a68bfa 150 oldt = uatomic_read((unsigned long *)addr);
58de5a4b
MD
151 do {
152 old = oldt;
153 oldt = _uatomic_cmpxchg(addr, old, old + val, 8);
154 } while (oldt != old);
155
156 return old + val;
157 }
158#endif
159 }
160 __builtin_trap();
161 return 0;
162}
163
164#define uatomic_add_return(addr, v) \
165 ((__typeof__(*(addr))) _uatomic_add_return((addr), \
166 (unsigned long)(v), \
167 sizeof(*(addr))))
168
169/* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */
170
171#define uatomic_sub_return(addr, v) uatomic_add_return((addr), -(v))
172
173#define uatomic_add(addr, v) (void)uatomic_add_return((addr), (v))
174#define uatomic_sub(addr, v) (void)uatomic_sub_return((addr), (v))
175
176#define uatomic_inc(addr) uatomic_add((addr), 1)
177#define uatomic_dec(addr) uatomic_add((addr), -1)
178
179#define URCU_CAS_AVAIL() 1
180#define compat_uatomic_cmpxchg(ptr, old, _new) uatomic_cmpxchg(ptr, old, _new)
181
182#endif /* _URCU_ARCH_UATOMIC_PPC_H */
This page took 0.051721 seconds and 4 git commands to generate.