614867f1d61eb055daebb17bcce0cc7b14dbb023
1 #ifndef _URCU_UATOMIC_ARCH_S390_H
2 #define _URCU_UATOMIC_ARCH_S390_H
5 * Atomic exchange operations for the S390 architecture. Based on information
6 * taken from the Principles of Operation Appendix A "Conditional Swapping
7 * Instructions (CS, CDS)".
9 * Copyright (c) 2009 Novell, Inc.
10 * Author: Jan Blunck <jblunck@suse.de>
11 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this software and associated documentation files (the "Software"), to
15 * deal in the Software without restriction, including without limitation the
16 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
17 * sell copies of the Software, and to permit persons to whom the Software is
18 * furnished to do so, subject to the following conditions:
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 #include <urcu/compiler.h>
33 #include <urcu/system.h>
39 #ifndef __SIZEOF_LONG__
41 #define __SIZEOF_LONG__ 8
43 #define __SIZEOF_LONG__ 4
48 #define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
51 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
52 #define COMPILER_HAVE_SHORT_MEM_OPERAND
56 * MEMOP assembler operand rules:
57 * - op refer to MEMOP_IN operand
58 * - MEMOP_IN can expand to more than a single operand. Use it at the end of
62 #ifdef COMPILER_HAVE_SHORT_MEM_OPERAND
64 #define MEMOP_OUT(addr) "=Q" (*(addr))
65 #define MEMOP_IN(addr) "Q" (*(addr))
66 #define MEMOP_REF(op) #op /* op refer to MEMOP_IN operand */
68 #else /* !COMPILER_HAVE_SHORT_MEM_OPERAND */
70 #define MEMOP_OUT(addr) "=m" (*(addr))
71 #define MEMOP_IN(addr) "a" (addr), "m" (*(addr))
72 #define MEMOP_REF(op) "0(" #op ")" /* op refer to MEMOP_IN operand */
74 #endif /* !COMPILER_HAVE_SHORT_MEM_OPERAND */
76 struct __uatomic_dummy
{
79 #define __hp(x) ((struct __uatomic_dummy *)(x))
81 #define uatomic_set(addr, v) STORE_SHARED(*(addr), (v))
82 #define uatomic_read(addr) LOAD_SHARED(*(addr))
86 static inline __attribute__((always_inline
))
87 unsigned long _uatomic_exchange(volatile void *addr
, unsigned long val
, int len
)
95 "0: cs %0,%2," MEMOP_REF(%3) "\n"
97 : "=&r" (old_val
), MEMOP_OUT (__hp(addr
))
98 : "r" (val
), MEMOP_IN (__hp(addr
))
102 #if (BITS_PER_LONG == 64)
105 unsigned long old_val
;
107 __asm__
__volatile__(
108 "0: csg %0,%2," MEMOP_REF(%3) "\n"
110 : "=&r" (old_val
), MEMOP_OUT (__hp(addr
))
111 : "r" (val
), MEMOP_IN (__hp(addr
))
117 __asm__
__volatile__(".long 0xd00d00");
123 #define uatomic_xchg(addr, v) \
124 (__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
129 static inline __attribute__((always_inline
))
130 unsigned long _uatomic_cmpxchg(void *addr
, unsigned long old
,
131 unsigned long _new
, int len
)
136 unsigned int old_val
= (unsigned int)old
;
138 __asm__
__volatile__(
139 " cs %0,%2," MEMOP_REF(%3) "\n"
140 : "+r" (old_val
), MEMOP_OUT (__hp(addr
))
141 : "r" (_new
), MEMOP_IN (__hp(addr
))
145 #if (BITS_PER_LONG == 64)
148 __asm__
__volatile__(
149 " csg %0,%2," MEMOP_REF(%3) "\n"
150 : "+r" (old
), MEMOP_OUT (__hp(addr
))
151 : "r" (_new
), MEMOP_IN (__hp(addr
))
157 __asm__
__volatile__(".long 0xd00d00");
163 #define uatomic_cmpxchg(addr, old, _new) \
164 (__typeof__(*(addr))) _uatomic_cmpxchg((addr), \
165 (unsigned long)(old), \
166 (unsigned long)(_new), \
169 /* uatomic_add_return */
171 static inline __attribute__((always_inline
))
172 unsigned long _uatomic_add_return(void *addr
, unsigned long val
, int len
)
177 unsigned int old
, oldt
;
179 oldt
= uatomic_read((unsigned int *)addr
);
182 oldt
= _uatomic_cmpxchg(addr
, old
, old
+ val
, 4);
183 } while (oldt
!= old
);
187 #if (BITS_PER_LONG == 64)
190 unsigned long old
, oldt
;
192 oldt
= uatomic_read((unsigned long *)addr
);
195 oldt
= _uatomic_cmpxchg(addr
, old
, old
+ val
, 8);
196 } while (oldt
!= old
);
206 #define uatomic_add_return(addr, v) \
207 ((__typeof__(*(addr))) _uatomic_add_return((addr), \
208 (unsigned long)(v), \
211 /* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */
213 #define uatomic_sub_return(addr, v) uatomic_add_return((addr), -(v))
215 #define uatomic_add(addr, v) (void)uatomic_add_return((addr), (v))
216 #define uatomic_sub(addr, v) (void)uatomic_sub_return((addr), (v))
218 #define uatomic_inc(addr) uatomic_add((addr), 1)
219 #define uatomic_dec(addr) uatomic_add((addr), -1)
225 #endif /* _URCU_UATOMIC_ARCH_S390_H */
This page took 0.033296 seconds and 4 git commands to generate.