#endif /* !COMPILER_HAVE_SHORT_MEM_OPERAND */
-struct __uatomic_dummy {
- unsigned long v[10];
-};
-#define __hp(x) ((struct __uatomic_dummy *)(x))
+/*
+ * The __hp() macro casts the void pointer "x" to a pointer to a structure
+ * containing an array of char of the specified size. This allows passing the
+ * @addr arguments of the following inline functions as "m" and "+m" operands
+ * to the assembly.
+ */
+
+#define __hp(size, x) ((struct { char v[size]; } *)(x))
/* xchg */
__asm__ __volatile__(
"0: cs %0,%2," MEMOP_REF(%3) "\n"
" brc 4,0b\n"
- : "=&r" (old_val), MEMOP_OUT (__hp(addr))
- : "r" (val), MEMOP_IN (__hp(addr))
+ : "=&r" (old_val), MEMOP_OUT (__hp(len, addr))
+ : "r" (val), MEMOP_IN (__hp(len, addr))
: "memory", "cc");
return old_val;
}
__asm__ __volatile__(
"0: csg %0,%2," MEMOP_REF(%3) "\n"
" brc 4,0b\n"
- : "=&r" (old_val), MEMOP_OUT (__hp(addr))
- : "r" (val), MEMOP_IN (__hp(addr))
+ : "=&r" (old_val), MEMOP_OUT (__hp(len, addr))
+ : "r" (val), MEMOP_IN (__hp(len, addr))
: "memory", "cc");
return old_val;
}
__asm__ __volatile__(
" cs %0,%2," MEMOP_REF(%3) "\n"
- : "+r" (old_val), MEMOP_OUT (__hp(addr))
- : "r" (_new), MEMOP_IN (__hp(addr))
+ : "+r" (old_val), MEMOP_OUT (__hp(len, addr))
+ : "r" (_new), MEMOP_IN (__hp(len, addr))
: "memory", "cc");
return old_val;
}
{
__asm__ __volatile__(
" csg %0,%2," MEMOP_REF(%3) "\n"
- : "+r" (old), MEMOP_OUT (__hp(addr))
- : "r" (_new), MEMOP_IN (__hp(addr))
+ : "+r" (old), MEMOP_OUT (__hp(len, addr))
+ : "r" (_new), MEMOP_IN (__hp(len, addr))
: "memory", "cc");
return old;
}
* Derived from AO_compare_and_swap() and AO_test_and_set_full().
*/
-struct __uatomic_dummy {
- unsigned long v[10];
-};
-#define __hp(x) ((struct __uatomic_dummy *)(x))
+/*
+ * The __hp() macro casts the void pointer "x" to a pointer to a structure
+ * containing an array of char of the specified size. This allows passing the
+ * @addr arguments of the following inline functions as "m" and "+m" operands
+ * to the assembly.
+ */
+
+#define __hp(size, x) ((struct { char v[size]; } *)(x))
#define _uatomic_set(addr, v) ((void) CMM_STORE_SHARED(*(addr), (v)))
__asm__ __volatile__(
"lock; cmpxchgb %2, %1"
- : "+a"(result), "+m"(*__hp(addr))
+ : "+a"(result), "+m"(*__hp(len, addr))
: "q"((unsigned char)_new)
: "memory");
return result;
__asm__ __volatile__(
"lock; cmpxchgw %2, %1"
- : "+a"(result), "+m"(*__hp(addr))
+ : "+a"(result), "+m"(*__hp(len, addr))
: "r"((unsigned short)_new)
: "memory");
return result;
__asm__ __volatile__(
"lock; cmpxchgl %2, %1"
- : "+a"(result), "+m"(*__hp(addr))
+ : "+a"(result), "+m"(*__hp(len, addr))
: "r"((unsigned int)_new)
: "memory");
return result;
__asm__ __volatile__(
"lock; cmpxchgq %2, %1"
- : "+a"(result), "+m"(*__hp(addr))
+ : "+a"(result), "+m"(*__hp(len, addr))
: "r"((unsigned long)_new)
: "memory");
return result;
unsigned char result;
__asm__ __volatile__(
"xchgb %0, %1"
- : "=q"(result), "+m"(*__hp(addr))
+ : "=q"(result), "+m"(*__hp(len, addr))
: "0" ((unsigned char)val)
: "memory");
return result;
unsigned short result;
__asm__ __volatile__(
"xchgw %0, %1"
- : "=r"(result), "+m"(*__hp(addr))
+ : "=r"(result), "+m"(*__hp(len, addr))
: "0" ((unsigned short)val)
: "memory");
return result;
unsigned int result;
__asm__ __volatile__(
"xchgl %0, %1"
- : "=r"(result), "+m"(*__hp(addr))
+ : "=r"(result), "+m"(*__hp(len, addr))
: "0" ((unsigned int)val)
: "memory");
return result;
unsigned long result;
__asm__ __volatile__(
"xchgq %0, %1"
- : "=r"(result), "+m"(*__hp(addr))
+ : "=r"(result), "+m"(*__hp(len, addr))
: "0" ((unsigned long)val)
: "memory");
return result;
__asm__ __volatile__(
"lock; xaddb %1, %0"
- : "+m"(*__hp(addr)), "+q" (result)
+ : "+m"(*__hp(len, addr)), "+q" (result)
:
: "memory");
return result + (unsigned char)val;
__asm__ __volatile__(
"lock; xaddw %1, %0"
- : "+m"(*__hp(addr)), "+r" (result)
+ : "+m"(*__hp(len, addr)), "+r" (result)
:
: "memory");
return result + (unsigned short)val;
__asm__ __volatile__(
"lock; xaddl %1, %0"
- : "+m"(*__hp(addr)), "+r" (result)
+ : "+m"(*__hp(len, addr)), "+r" (result)
:
: "memory");
return result + (unsigned int)val;
__asm__ __volatile__(
"lock; xaddq %1, %0"
- : "+m"(*__hp(addr)), "+r" (result)
+ : "+m"(*__hp(len, addr)), "+r" (result)
:
: "memory");
return result + (unsigned long)val;
{
__asm__ __volatile__(
"lock; andb %1, %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(len, addr))
: "iq" ((unsigned char)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; andw %1, %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(len, addr))
: "ir" ((unsigned short)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; andl %1, %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(len, addr))
: "ir" ((unsigned int)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; andq %1, %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(len, addr))
: "er" ((unsigned long)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; orb %1, %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(len, addr))
: "iq" ((unsigned char)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; orw %1, %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(len, addr))
: "ir" ((unsigned short)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; orl %1, %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(len, addr))
: "ir" ((unsigned int)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; orq %1, %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(len, addr))
: "er" ((unsigned long)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; addb %1, %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(len, addr))
: "iq" ((unsigned char)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; addw %1, %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(len, addr))
: "ir" ((unsigned short)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; addl %1, %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(len, addr))
: "ir" ((unsigned int)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; addq %1, %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(len, addr))
: "er" ((unsigned long)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; incb %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(len, addr))
:
: "memory");
return;
{
__asm__ __volatile__(
"lock; incw %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(len, addr))
:
: "memory");
return;
{
__asm__ __volatile__(
"lock; incl %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(len, addr))
:
: "memory");
return;
{
__asm__ __volatile__(
"lock; incq %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(len, addr))
:
: "memory");
return;
{
__asm__ __volatile__(
"lock; decb %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(len, addr))
:
: "memory");
return;
{
__asm__ __volatile__(
"lock; decw %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(len, addr))
:
: "memory");
return;
{
__asm__ __volatile__(
"lock; decl %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(len, addr))
:
: "memory");
return;
{
__asm__ __volatile__(
"lock; decq %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(len, addr))
:
: "memory");
return;