]> git.lttng.org Git - urcu.git/commitdiff
Seperate uatomic and uatomic_mo
authorOlivier Dion <odion@efficios.com>
Mon, 21 Oct 2024 17:18:08 +0000 (13:18 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Mon, 2 Dec 2024 18:17:40 +0000 (13:17 -0500)
The API for uatomic is now defined under `urcu/uatomic/api.h', which is
included by `urcu/uatomic.h'.  All definitions are macros that dispatch
to their `_mo' counterpart.  The default argument is set for the memory
order for backward compatibility.

This means that only the `uatomic_*_mo' must be implemented either
generically or by architecture.

This also remove the C11 compatibility layer in x86.  Indeed, since RMW
operations are always guaranteed to have a full fence, then it is safe
to ignore the memory ordering.  This is because all used sync operations
are documented as been "considered as a full barrier".  See
https://gcc.gnu.org/onlinedocs/gcc/_005f_005fsync-Builtins.html.

Change-Id: I6be8c45b1758f268e7406bb17ab0086f9e9f5d4e
Signed-off-by: Olivier Dion <odion@efficios.com>
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
include/Makefile.am
include/urcu/uatomic.h
include/urcu/uatomic/api.h [new file with mode: 0644]
include/urcu/uatomic/arm.h
include/urcu/uatomic/builtins-generic.h
include/urcu/uatomic/generic.h
include/urcu/uatomic/ppc.h
include/urcu/uatomic/s390.h
include/urcu/uatomic/sparc64.h
include/urcu/uatomic/x86.h

index 4c32a4cd7d0cf499273dcf86303cb1346de36af1..9bc93b45adf7f586890c00efff4331627bb6728e 100644 (file)
@@ -64,6 +64,7 @@ nobase_include_HEADERS = \
        urcu/tls-compat.h \
        urcu/uatomic/aarch64.h \
        urcu/uatomic/alpha.h \
+       urcu/uatomic/api.h \
        urcu/uatomic_arch.h \
        urcu/uatomic/arm.h \
        urcu/uatomic/builtins.h \
index 561c8294f3faf1d8afeedb12ea97bbbde9eacdad..69154d37a35d6003a283e058df66ecb04756ce44 100644 (file)
@@ -63,6 +63,8 @@ static inline int cmm_to_c11(int mo)
        return mo;
 }
 
+#include <urcu/uatomic/api.h>
+
 #if defined(CONFIG_RCU_USE_ATOMIC_BUILTINS)
 #include <urcu/uatomic/builtins.h>
 #elif defined(URCU_ARCH_X86)
diff --git a/include/urcu/uatomic/api.h b/include/urcu/uatomic/api.h
new file mode 100644 (file)
index 0000000..4b92464
--- /dev/null
@@ -0,0 +1,71 @@
+#ifndef _URCU_UATOMIC_API_H
+#define _URCU_UATOMIC_API_H
+
+/*
+ * Select second argument. Use inside macros to implement optional last macro
+ * argument, such as:
+ *
+ * #define macro(_a, _b, _c, _optional...) \
+ *     _uatomic_select_arg1(_, ##_optional, do_default_macro())
+ */
+#define _uatomic_select_arg1(arg0, arg1, ...) arg1
+
+/*
+ * Like _uatomic_select_arg2(), but can be used for selecting a second optional
+ * argument.
+ */
+#define _uatomic_select_arg2(arg0, arg1, arg2, ...) arg2
+
+#define _uatomic_default_mo(dflt, mo...)       \
+       _uatomic_select_arg1(_, ##mo, dflt)
+
+#define _uatomic_default_mo2(dflt, mo...)      \
+       _uatomic_select_arg2(_, ##mo, dflt, dflt)
+
+#define uatomic_load(addr, mo...)              \
+       uatomic_load_mo(addr, _uatomic_default_mo(CMM_RELAXED, ##mo))
+
+#define uatomic_read(addr, mo...)                                      \
+       uatomic_load_mo(addr, _uatomic_default_mo(CMM_RELAXED, ##mo))
+
+#define uatomic_store(addr, value, mo...)                              \
+       uatomic_store_mo(addr, value, _uatomic_default_mo(CMM_RELAXED, ##mo))
+
+#define uatomic_set(addr, value, mo...)                                        \
+       uatomic_store_mo(addr, value, _uatomic_default_mo(CMM_RELAXED, ##mo))
+
+#define uatomic_add_return(addr, v, mo...)                             \
+       uatomic_add_return_mo(addr, v, _uatomic_default_mo(CMM_SEQ_CST_FENCE, ##mo))
+
+#define uatomic_sub_return(addr, v, mo...)                             \
+       uatomic_sub_return_mo(addr, v, _uatomic_default_mo(CMM_SEQ_CST_FENCE, ##mo))
+
+#define uatomic_and(addr, mask, mo...)                                 \
+       uatomic_and_mo(addr, mask, _uatomic_default_mo(CMM_SEQ_CST, ##mo))
+
+#define uatomic_or(addr, mask, mo...)                                          \
+       uatomic_or_mo(addr, mask, _uatomic_default_mo(CMM_RELAXED, ##mo))
+
+#define uatomic_add(addr, v, mo...)                                            \
+       uatomic_add_mo(addr, v, _uatomic_default_mo(CMM_RELAXED, ##mo))
+
+#define uatomic_sub(addr, v, mo...)                                            \
+       uatomic_sub_mo(addr, v, _uatomic_default_mo(CMM_RELAXED, ##mo))
+
+#define uatomic_inc(addr, mo...)                                               \
+       uatomic_inc_mo(addr, _uatomic_default_mo(CMM_RELAXED, ##mo))
+
+#define uatomic_dec(addr, mo...)                                               \
+       uatomic_dec_mo(addr, _uatomic_default_mo(CMM_RELAXED, ##mo))
+
+#define uatomic_xchg(addr, value, mo...)                               \
+       uatomic_xchg_mo(addr, value,                                    \
+                       _uatomic_default_mo(CMM_SEQ_CST_FENCE, ##mo))
+
+#define uatomic_cmpxchg(addr, value, _new, mo...)                      \
+       uatomic_cmpxchg_mo(addr, value, _new,                           \
+                       _uatomic_default_mo(CMM_SEQ_CST_FENCE, ##mo),   \
+                       _uatomic_default_mo2(CMM_RELAXED, ##mo))
+
+
+#endif /* _URUC_UATOMIC_API_H */
index 5124a71aa9d4dfda80c41e04500c6706a60c0638..69233719b79b19e09069d4b2143d5c55404b254b 100644 (file)
@@ -25,6 +25,23 @@ extern "C" {
 #endif
 
 /* xchg */
+static inline void _cmm_compat_c11_smp_mb__before_xchg_mo(enum cmm_memorder mo)
+{
+       switch (mo) {
+       case CMM_SEQ_CST_FENCE:
+       case CMM_SEQ_CST:
+       case CMM_ACQ_REL:
+       case CMM_RELEASE:
+               cmm_smp_mb();
+               break;
+       case CMM_ACQUIRE:
+       case CMM_CONSUME:
+       case CMM_RELAXED:
+               break;
+       default:
+               abort();
+       }
+}
 
 /*
  * Based on [1], __sync_lock_test_and_set() is not a full barrier, but
@@ -34,10 +51,10 @@ extern "C" {
  *
  * [1] https://gcc.gnu.org/onlinedocs/gcc-4.1.0/gcc/Atomic-Builtins.html
  */
-#define uatomic_xchg(addr, v)                          \
-       ({                                              \
-               cmm_smp_mb();                           \
-               __sync_lock_test_and_set(addr, v);      \
+#define uatomic_xchg_mo(addr, v, mo)                           \
+       ({                                                      \
+               _cmm_compat_c11_smp_mb__before_xchg_mo(mo);     \
+               __sync_lock_test_and_set(addr, v);              \
        })
 
 #ifdef __cplusplus
index 641ac53751bf52b88f31961bc1688e323af47514..ca11cf146cefb76a55dae689ebab4213bdd05ace 100644 (file)
 #include <urcu/compiler.h>
 #include <urcu/system.h>
 
-#define uatomic_store(addr, v, mo)                             \
+#define uatomic_store_mo(addr, v, mo)                          \
        do {                                                    \
                __atomic_store_n(cmm_cast_volatile(addr), v,    \
                                cmm_to_c11(mo));                \
                cmm_seq_cst_fence_after_atomic(mo);             \
        } while (0)
 
-#define uatomic_set(addr, v)                           \
-       do {                                            \
-               uatomic_store(addr, v, CMM_RELAXED);    \
-       } while (0)
-
-#define uatomic_load(addr, mo)                                         \
+#define uatomic_load_mo(addr, mo)                                      \
        __extension__                                                   \
        ({                                                              \
                __typeof__(*(addr)) _value =                            \
@@ -35,9 +30,6 @@
                _value;                                                 \
        })
 
-#define uatomic_read(addr)                     \
-       uatomic_load(addr, CMM_RELAXED)
-
 #define uatomic_cmpxchg_mo(addr, old, new, mos, mof)                   \
        __extension__                                                   \
        ({                                                              \
@@ -54,9 +46,6 @@
                _old;                                                   \
        })
 
-#define uatomic_cmpxchg(addr, old, new)                                        \
-       uatomic_cmpxchg_mo(addr, old, new, CMM_SEQ_CST_FENCE, CMM_RELAXED)
-
 #define uatomic_xchg_mo(addr, v, mo)                                   \
        __extension__                                                   \
        ({                                                              \
@@ -67,9 +56,6 @@
                _old;                                                   \
        })
 
-#define uatomic_xchg(addr, v)                          \
-       uatomic_xchg_mo(addr, v, CMM_SEQ_CST_FENCE)
-
 #define uatomic_add_return_mo(addr, v, mo)                             \
        __extension__                                                   \
        ({                                                              \
@@ -80,8 +66,6 @@
                _old;                                                   \
        })
 
-#define uatomic_add_return(addr, v)                            \
-       uatomic_add_return_mo(addr, v, CMM_SEQ_CST_FENCE)
 
 #define uatomic_sub_return_mo(addr, v, mo)                             \
        __extension__                                                   \
@@ -93,8 +77,6 @@
                _old;                                                   \
        })
 
-#define uatomic_sub_return(addr, v)                            \
-       uatomic_sub_return_mo(addr, v, CMM_SEQ_CST_FENCE)
 
 #define uatomic_and_mo(addr, mask, mo)                                 \
        do {                                                            \
                cmm_seq_cst_fence_after_atomic(mo);                     \
        } while (0)
 
-#define uatomic_and(addr, mask)                        \
-       uatomic_and_mo(addr, mask, CMM_SEQ_CST)
 
 #define uatomic_or_mo(addr, mask, mo)                                  \
        do {                                                            \
        } while (0)
 
 
-#define uatomic_or(addr, mask)                 \
-       uatomic_or_mo(addr, mask, CMM_RELAXED)
-
 #define uatomic_add_mo(addr, v, mo)                    \
        (void) uatomic_add_return_mo(addr, v, mo)
 
-#define uatomic_add(addr, v)                   \
-       uatomic_add_mo(addr, v, CMM_RELAXED)
-
 #define uatomic_sub_mo(addr, v, mo)                    \
        (void) uatomic_sub_return_mo(addr, v, mo)
 
-#define uatomic_sub(addr, v)                   \
-       uatomic_sub_mo(addr, v, CMM_RELAXED)
-
 #define uatomic_inc_mo(addr, mo)               \
        uatomic_add_mo(addr, 1, mo)
 
-#define uatomic_inc(addr)                      \
-       uatomic_inc_mo(addr, CMM_RELAXED)
-
 #define uatomic_dec_mo(addr, mo)               \
        uatomic_sub_mo(addr, 1, mo)
 
-#define uatomic_dec(addr)                      \
-       uatomic_dec_mo(addr, CMM_RELAXED)
-
 #define cmm_smp_mb__before_uatomic_and() cmm_smp_mb()
 #define cmm_smp_mb__after_uatomic_and()  cmm_smp_mb()
 
index ed655bb8def13a5120990d049eb12199ee47376a..24a025fc92d1285130efa55d5b994cec10eef362 100644 (file)
 extern "C" {
 #endif
 
-#ifndef uatomic_set
-#define uatomic_set(addr, v)   ((void) CMM_STORE_SHARED(*(addr), (v)))
-#endif
-
 /*
  * Can be defined for the architecture.
  *
  * What needs to be emitted _before_ the `operation' with memory ordering `mo'.
  */
 #ifndef _cmm_compat_c11_smp_mb__before_mo
-# define _cmm_compat_c11_smp_mb__before_mo(operation, mo) cmm_smp_mb()
-#endif
+# define _cmm_compat_c11_smp_mb__before_mo(operation, mo)      \
+       do {                                                    \
+               switch (mo) {                                   \
+               case CMM_SEQ_CST_FENCE:                         \
+               case CMM_SEQ_CST:                               \
+               case CMM_ACQ_REL:                               \
+               case CMM_RELEASE:                               \
+                       cmm_smp_mb();                           \
+                       break;                                  \
+               case CMM_ACQUIRE:                               \
+               case CMM_CONSUME:                               \
+               case CMM_RELAXED:                               \
+                       break;                                  \
+               default:                                        \
+                       abort();                                \
+                       break;                                  \
+                                                               \
+               }                                               \
+       } while(0)
+
+#endif /* _cmm_compat_c11_smp_mb__before_mo */
 
 /*
  * Can be defined for the architecture.
@@ -41,83 +56,54 @@ extern "C" {
  * What needs to be emitted _after_ the `operation' with memory ordering `mo'.
  */
 #ifndef _cmm_compat_c11_smp_mb__after_mo
-# define _cmm_compat_c11_smp_mb__after_mo(operation, mo) cmm_smp_mb()
-#endif
-
-#define uatomic_load_store_return_op(op, addr, v, mo)          \
-       __extension__                                           \
-       ({                                                      \
-               _cmm_compat_c11_smp_mb__before_mo(op, mo);      \
-               __typeof__((*addr)) _value = op(addr, v);       \
-               _cmm_compat_c11_smp_mb__after_mo(op, mo);       \
+# define _cmm_compat_c11_smp_mb__after_mo(operation, mo)       \
+       do {                                                    \
+               switch (mo) {                                   \
+               case CMM_SEQ_CST_FENCE:                         \
+               case CMM_SEQ_CST:                               \
+               case CMM_ACQUIRE:                               \
+               case CMM_CONSUME:                               \
+               case CMM_ACQ_REL:                               \
+                       cmm_smp_mb();                           \
+                       break;                                  \
+               case CMM_RELEASE:                               \
+               case CMM_RELAXED:                               \
+                       break;                                  \
+               default:                                        \
+                       abort();                                \
+                       break;                                  \
                                                                \
-               _value;                                         \
-       })
+               }                                               \
+       } while(0)
+#endif /* _cmm_compat_c11_smp_mb__after_mo */
 
-#define uatomic_load_store_op(op, addr, v, mo)                 \
        do {                                                    \
                _cmm_compat_c11_smp_mb__before_mo(op, mo);      \
                op(addr, v);                                    \
                _cmm_compat_c11_smp_mb__after_mo(op, mo);       \
        } while (0)
 
-#define uatomic_store(addr, v, mo)                                     \
+#define uatomic_store_mo(addr, v, mo)                                  \
        do {                                                            \
                _cmm_compat_c11_smp_mb__before_mo(uatomic_set, mo);     \
                uatomic_set(addr, v);                                   \
                _cmm_compat_c11_smp_mb__after_mo(uatomic_set, mo);      \
        } while (0)
 
-#define uatomic_and_mo(addr, v, mo)                    \
-       uatomic_load_store_op(uatomic_and, addr, v, mo)
-
-#define uatomic_or_mo(addr, v, mo)                     \
-       uatomic_load_store_op(uatomic_or, addr, v, mo)
-
-#define uatomic_add_mo(addr, v, mo)                    \
-       uatomic_load_store_op(uatomic_add, addr, v, mo)
-
-#define uatomic_sub_mo(addr, v, mo)                    \
-       uatomic_load_store_op(uatomic_sub, addr, v, mo)
-
-#define uatomic_inc_mo(addr, mo)                       \
-       uatomic_load_store_op(uatomic_add, addr, 1, mo)
-
-#define uatomic_dec_mo(addr, mo)                               \
-       uatomic_load_store_op(uatomic_add, addr, -1, mo)
 /*
- * NOTE: We can not just do switch (_value == (old) ? mos : mof) otherwise the
- * compiler emit a -Wduplicated-cond warning.
  */
-#define uatomic_cmpxchg_mo(addr, old, new, mos, mof)                   \
        __extension__                                                   \
        ({                                                              \
-               _cmm_compat_c11_smp_mb__before_mo(uatomic_cmpxchg, mos); \
-               __typeof__(*(addr)) _value = uatomic_cmpxchg(addr, old, \
-                                                       new);           \
+               __typeof__(*(addr)) _value =                            \
+                       __atomic_load_n(cmm_cast_volatile(addr),        \
+                                       cmm_to_c11(mo));                \
+               cmm_seq_cst_fence_after_atomic(mo);                     \
                                                                        \
-               if (_value == (old)) {                                  \
-                       _cmm_compat_c11_smp_mb__after_mo(uatomic_cmpxchg, mos); \
-               } else {                                                \
-                       _cmm_compat_c11_smp_mb__after_mo(uatomic_cmpxchg, mof); \
-               }                                                       \
                _value;                                                 \
        })
 
-#define uatomic_xchg_mo(addr, v, mo)                           \
-       uatomic_load_store_return_op(uatomic_xchg, addr, v, mo)
-
-#define uatomic_add_return_mo(addr, v, mo)                             \
-       uatomic_load_store_return_op(uatomic_add_return, addr, v)
 
-#define uatomic_sub_return_mo(addr, v, mo)                             \
-       uatomic_load_store_return_op(uatomic_sub_return, addr, v)
-
-#ifndef uatomic_read
-#define uatomic_read(addr)     CMM_LOAD_SHARED(*(addr))
-#endif
-
-#define uatomic_load(addr, mo)                                         \
+#define uatomic_load_mo(addr, mo)                                              \
        __extension__                                                   \
        ({                                                              \
                _cmm_compat_c11_smp_mb__before_mo(uatomic_read, mo);    \
@@ -146,13 +132,20 @@ void _uatomic_link_error(void)
 }
 #endif
 
+
+/*
+ * NOTE: All RMW operations are implemented using the `__sync' builtins.  All
+ * builtins used are documented to be considered a "full barrier".  Therefore,
+ * for RMW operations, nothing is emitted for any memory order.
+ */
+
 #else /* #if !defined __OPTIMIZE__  || defined UATOMIC_NO_LINK_ERROR */
 extern void _uatomic_link_error(void);
 #endif /* #else #if !defined __OPTIMIZE__  || defined UATOMIC_NO_LINK_ERROR */
 
-/* cmpxchg */
+/* uatomic_cmpxchg_mo */
 
-#ifndef uatomic_cmpxchg
+#ifndef uatomic_cmpxchg_mo
 static inline __attribute__((always_inline))
 unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
                              unsigned long _new, int len)
@@ -181,17 +174,14 @@ unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
        return 0;
 }
 
-
-#define uatomic_cmpxchg(addr, old, _new)                                     \
-       ((__typeof__(*(addr))) _uatomic_cmpxchg((addr),                       \
+#define uatomic_cmpxchg_mo(addr, old, _new, mos, mof)                  \
+       ((__typeof__(*(addr))) _uatomic_cmpxchg((addr),                 \
                                                caa_cast_long_keep_sign(old), \
-                                               caa_cast_long_keep_sign(_new),\
+                                               caa_cast_long_keep_sign(_new), \
                                                sizeof(*(addr))))
+/* uatomic_and_mo */
 
-
-/* uatomic_and */
-
-#ifndef uatomic_and
+#ifndef uatomic_and_mo
 static inline __attribute__((always_inline))
 void _uatomic_and(void *addr, unsigned long val,
                  int len)
@@ -219,7 +209,7 @@ void _uatomic_and(void *addr, unsigned long val,
        _uatomic_link_error();
 }
 
-#define uatomic_and(addr, v)                   \
+#define uatomic_and_mo(addr, v, mo)            \
        (_uatomic_and((addr),                   \
                caa_cast_long_keep_sign(v),     \
                sizeof(*(addr))))
@@ -228,9 +218,9 @@ void _uatomic_and(void *addr, unsigned long val,
 
 #endif
 
-/* uatomic_or */
+/* uatomic_or_mo */
 
-#ifndef uatomic_or
+#ifndef uatomic_or_mo
 static inline __attribute__((always_inline))
 void _uatomic_or(void *addr, unsigned long val,
                 int len)
@@ -259,7 +249,7 @@ void _uatomic_or(void *addr, unsigned long val,
        return;
 }
 
-#define uatomic_or(addr, v)                    \
+#define uatomic_or_mo(addr, v, mo)             \
        (_uatomic_or((addr),                    \
                caa_cast_long_keep_sign(v),     \
                sizeof(*(addr))))
@@ -269,9 +259,9 @@ void _uatomic_or(void *addr, unsigned long val,
 #endif
 
 
-/* uatomic_add_return */
+/* uatomic_add_return_mo */
 
-#ifndef uatomic_add_return
+#ifndef uatomic_add_return_mo
 static inline __attribute__((always_inline))
 unsigned long _uatomic_add_return(void *addr, unsigned long val,
                                 int len)
@@ -297,13 +287,13 @@ unsigned long _uatomic_add_return(void *addr, unsigned long val,
 }
 
 
-#define uatomic_add_return(addr, v)                                        \
+#define uatomic_add_return_mo(addr, v, mo)                             \
        ((__typeof__(*(addr))) _uatomic_add_return((addr),                  \
                                                caa_cast_long_keep_sign(v), \
                                                sizeof(*(addr))))
 #endif /* #ifndef uatomic_add_return */
 
-#ifndef uatomic_xchg
+#ifndef uatomic_xchg_mo
 /* xchg */
 
 static inline __attribute__((always_inline))
@@ -365,16 +355,16 @@ unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
        return 0;
 }
 
-#define uatomic_xchg(addr, v)                                              \
+#define uatomic_xchg_mo(addr, v, mo)                                   \
        ((__typeof__(*(addr))) _uatomic_exchange((addr),                    \
                                                caa_cast_long_keep_sign(v), \
                                                sizeof(*(addr))))
-#endif /* #ifndef uatomic_xchg */
+#endif /* #ifndef uatomic_xchg_mo */
 
-#else /* #ifndef uatomic_cmpxchg */
+#else /* #ifndef uatomic_cmpxchg_mo */
 
-#ifndef uatomic_and
-/* uatomic_and */
+#ifndef uatomic_and_mo
+/* uatomic_and_mo */
 
 static inline __attribute__((always_inline))
 void _uatomic_and(void *addr, unsigned long val, int len)
@@ -436,17 +426,17 @@ void _uatomic_and(void *addr, unsigned long val, int len)
        _uatomic_link_error();
 }
 
-#define uatomic_and(addr, v)                   \
+#define uatomic_and_mo(addr, v, mo)            \
        (_uatomic_and((addr),                   \
                caa_cast_long_keep_sign(v),     \
                sizeof(*(addr))))
 #define cmm_smp_mb__before_uatomic_and()       cmm_barrier()
 #define cmm_smp_mb__after_uatomic_and()                cmm_barrier()
 
-#endif /* #ifndef uatomic_and */
+#endif /* #ifndef uatomic_and_mo */
 
-#ifndef uatomic_or
-/* uatomic_or */
+#ifndef uatomic_or_mo
+/* uatomic_or_mo */
 
 static inline __attribute__((always_inline))
 void _uatomic_or(void *addr, unsigned long val, int len)
@@ -510,17 +500,17 @@ void _uatomic_or(void *addr, unsigned long val, int len)
        _uatomic_link_error();
 }
 
-#define uatomic_or(addr, v)                    \
+#define uatomic_or_mo(addr, v, mo)             \
        (_uatomic_or((addr),                    \
                caa_cast_long_keep_sign(v),     \
                sizeof(*(addr))))
 #define cmm_smp_mb__before_uatomic_or()                cmm_barrier()
 #define cmm_smp_mb__after_uatomic_or()         cmm_barrier()
 
-#endif /* #ifndef uatomic_or */
+#endif /* #ifndef uatomic_or_mo */
 
-#ifndef uatomic_add_return
-/* uatomic_add_return */
+#ifndef uatomic_add_return_mo
+/* uatomic_add_return_mo */
 
 static inline __attribute__((always_inline))
 unsigned long _uatomic_add_return(void *addr, unsigned long val, int len)
@@ -589,14 +579,14 @@ unsigned long _uatomic_add_return(void *addr, unsigned long val, int len)
        return 0;
 }
 
-#define uatomic_add_return(addr, v)                                        \
-       ((__typeof__(*(addr))) _uatomic_add_return((addr),                  \
+#define uatomic_add_return_mo(addr, v, mo)                             \
+       ((__typeof__(*(addr))) _uatomic_add_return((addr),              \
                                                caa_cast_long_keep_sign(v), \
                                                sizeof(*(addr))))
-#endif /* #ifndef uatomic_add_return */
+#endif /* #ifndef uatomic_add_return_mo */
 
-#ifndef uatomic_xchg
-/* xchg */
+#ifndef uatomic_xchg_mo
+/* uatomic_xchg_mo */
 
 static inline __attribute__((always_inline))
 unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
@@ -665,37 +655,37 @@ unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
        return 0;
 }
 
-#define uatomic_xchg(addr, v)                                              \
-       ((__typeof__(*(addr))) _uatomic_exchange((addr),                    \
+#define uatomic_xchg_mo(addr, v, mo)                                   \
+       ((__typeof__(*(addr))) _uatomic_exchange((addr),                \
                                                caa_cast_long_keep_sign(v), \
                                                sizeof(*(addr))))
-#endif /* #ifndef uatomic_xchg */
+#endif /* #ifndef uatomic_xchg_mo */
 
-#endif /* #else #ifndef uatomic_cmpxchg */
+#endif /* #else #ifndef uatomic_cmpxchg_mo */
 
-/* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */
+/* uatomic_sub_return_mo, uatomic_add_mo, uatomic_sub_mo, uatomic_inc_mo, uatomic_dec_mo */
 
-#ifndef uatomic_add
-#define uatomic_add(addr, v)           (void)uatomic_add_return((addr), (v))
+#ifndef uatomic_add_mo
+#define uatomic_add_mo(addr, v, mo)            (void)uatomic_add_return_mo((addr), (v), mo)
 #define cmm_smp_mb__before_uatomic_add()       cmm_barrier()
 #define cmm_smp_mb__after_uatomic_add()                cmm_barrier()
 #endif
 
-#define uatomic_sub_return(addr, v)    \
-       uatomic_add_return((addr), -(caa_cast_long_keep_sign(v)))
-#define uatomic_sub(addr, v)           \
-       uatomic_add((addr), -(caa_cast_long_keep_sign(v)))
+#define uatomic_sub_return_mo(addr, v, mo)                             \
+       uatomic_add_return_mo((addr), -(caa_cast_long_keep_sign(v)), mo)
+#define uatomic_sub_mo(addr, v, mo)                                    \
+       uatomic_add_mo((addr), -(caa_cast_long_keep_sign(v)), mo)
 #define cmm_smp_mb__before_uatomic_sub()       cmm_smp_mb__before_uatomic_add()
 #define cmm_smp_mb__after_uatomic_sub()                cmm_smp_mb__after_uatomic_add()
 
-#ifndef uatomic_inc
-#define uatomic_inc(addr)              uatomic_add((addr), 1)
+#ifndef uatomic_inc_mo
+#define uatomic_inc_mo(addr, mo)               uatomic_add_mo((addr), 1, mo)
 #define cmm_smp_mb__before_uatomic_inc()       cmm_smp_mb__before_uatomic_add()
 #define cmm_smp_mb__after_uatomic_inc()                cmm_smp_mb__after_uatomic_add()
 #endif
 
-#ifndef uatomic_dec
-#define uatomic_dec(addr)              uatomic_add((addr), -1)
+#ifndef uatomic_dec_mo
+#define uatomic_dec_mo(addr, mo)               uatomic_add((addr), -1, mo)
 #define cmm_smp_mb__before_uatomic_dec()       cmm_smp_mb__before_uatomic_add()
 #define cmm_smp_mb__after_uatomic_dec()                cmm_smp_mb__after_uatomic_add()
 #endif
index dc59518c8e0a7472560d5fe5fc4e55b8389bf1ee..1f9a292dd7219152b75fbf6bcf4fc19fe5a98740 100644 (file)
@@ -94,7 +94,7 @@ unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
        return 0;
 }
 
-#define uatomic_xchg(addr, v)                                              \
+#define uatomic_xchg_mo(addr, v, mo)                                   \
        ((__typeof__(*(addr))) _uatomic_exchange((addr),                    \
                                                caa_cast_long_keep_sign(v), \
                                                sizeof(*(addr))))
@@ -157,7 +157,7 @@ unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
 }
 
 
-#define uatomic_cmpxchg(addr, old, _new)                                     \
+#define uatomic_cmpxchg_mo(addr, old, _new, mos, mof)                  \
        ((__typeof__(*(addr))) _uatomic_cmpxchg((addr),                       \
                                                caa_cast_long_keep_sign(old), \
                                                caa_cast_long_keep_sign(_new),\
@@ -216,7 +216,7 @@ unsigned long _uatomic_add_return(void *addr, unsigned long val,
 }
 
 
-#define uatomic_add_return(addr, v)                                        \
+#define uatomic_add_return_mo(addr, v, mo)                             \
        ((__typeof__(*(addr))) _uatomic_add_return((addr),                  \
                                                caa_cast_long_keep_sign(v), \
                                                sizeof(*(addr))))
index 25626961dc320a01c25a2b56f4f5e0928cc3a4e2..40ad5697b20107ab4bd4c88263750f7fea59ef93 100644 (file)
@@ -99,7 +99,7 @@ unsigned long _uatomic_exchange(volatile void *addr, unsigned long val, int len)
        return 0;
 }
 
-#define uatomic_xchg(addr, v)                                              \
+#define uatomic_xchg_mo(addr, v, mo)                                   \
        (__typeof__(*(addr))) _uatomic_exchange((addr),                     \
                                                caa_cast_long_keep_sign(v), \
                                                sizeof(*(addr)))
@@ -140,7 +140,7 @@ unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
        return 0;
 }
 
-#define uatomic_cmpxchg(addr, old, _new)                                    \
+#define uatomic_cmpxchg_mo(addr, old, _new, mos, mof)                  \
        (__typeof__(*(addr))) _uatomic_cmpxchg((addr),                       \
                                               caa_cast_long_keep_sign(old), \
                                               caa_cast_long_keep_sign(_new),\
index 97ecb466d083b21dad6f56420d6b3bf48732a085..c6baeaccf94aa7949246d083fb861de5f3e0b7d2 100644 (file)
@@ -59,7 +59,7 @@ unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
 }
 
 
-#define uatomic_cmpxchg(addr, old, _new)                                      \
+#define uatomic_cmpxchg_mo(addr, old, _new, mos, mof)                  \
        ((__typeof__(*(addr))) _uatomic_cmpxchg((addr),                        \
                                                caa_cast_long_keep_sign(old),  \
                                                caa_cast_long_keep_sign(_new), \
index 616eee9be3ca60b4be62c8a10cadb0aff2d1ea8d..5afb4472d544ee372f145ae9630f8dad5da4812d 100644 (file)
@@ -47,8 +47,6 @@ typedef struct { char v[8]; } __hp_8;
 
 #define __hp(size, x)  ((__hp_##size *)(x))
 
-#define _uatomic_set(addr, v)  ((void) CMM_STORE_SHARED(*(addr), (v)))
-
 /* cmpxchg */
 
 static inline __attribute__((always_inline))
@@ -598,41 +596,45 @@ extern unsigned long _compat_uatomic_add_return(void *addr,
 #define UATOMIC_COMPAT(insn)   (_uatomic_##insn)
 #endif
 
-/* Read is atomic even in compat mode */
-#define uatomic_set(addr, v)                   \
-               UATOMIC_COMPAT(set(addr, v))
+/*
+ * All RMW operations have an implicit lock prefix.  Thus, ignoring memory
+ * ordering for these operations, since they can all be respected by not
+ * emitting any memory barrier.
+ */
 
-#define uatomic_cmpxchg(addr, old, _new)       \
+#define uatomic_cmpxchg_mo(addr, old, _new, mos, mof)          \
                UATOMIC_COMPAT(cmpxchg(addr, old, _new))
-#define uatomic_xchg(addr, v)                  \
+
+#define uatomic_xchg_mo(addr, v, mo)           \
                UATOMIC_COMPAT(xchg(addr, v))
 
-#define uatomic_and(addr, v)           \
+#define uatomic_and_mo(addr, v, mo)            \
                UATOMIC_COMPAT(and(addr, v))
 #define cmm_smp_mb__before_uatomic_and()       cmm_barrier()
 #define cmm_smp_mb__after_uatomic_and()                cmm_barrier()
 
-#define uatomic_or(addr, v)            \
+#define uatomic_or_mo(addr, v, mo)             \
                UATOMIC_COMPAT(or(addr, v))
 #define cmm_smp_mb__before_uatomic_or()                cmm_barrier()
 #define cmm_smp_mb__after_uatomic_or()         cmm_barrier()
 
-#define uatomic_add_return(addr, v)            \
+#define uatomic_add_return_mo(addr, v, mo)             \
                UATOMIC_COMPAT(add_return(addr, v))
 
-#define uatomic_add(addr, v)   UATOMIC_COMPAT(add(addr, v))
+#define uatomic_add_mo(addr, v, mo)    UATOMIC_COMPAT(add(addr, v))
 #define cmm_smp_mb__before_uatomic_add()       cmm_barrier()
 #define cmm_smp_mb__after_uatomic_add()                cmm_barrier()
 
-#define uatomic_inc(addr)      UATOMIC_COMPAT(inc(addr))
+#define uatomic_inc_mo(addr, mo)       UATOMIC_COMPAT(inc(addr))
 #define cmm_smp_mb__before_uatomic_inc()       cmm_barrier()
 #define cmm_smp_mb__after_uatomic_inc()                cmm_barrier()
 
-#define uatomic_dec(addr)      UATOMIC_COMPAT(dec(addr))
+#define uatomic_dec_mo(addr, mo)       UATOMIC_COMPAT(dec(addr))
 #define cmm_smp_mb__before_uatomic_dec()       cmm_barrier()
 #define cmm_smp_mb__after_uatomic_dec()                cmm_barrier()
 
-static inline void _cmm_compat_c11_smp_mb__before_uatomic_read_mo(enum cmm_memorder mo)
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_load_mo(enum cmm_memorder mo)
 {
        /*
         * A SMP barrier is not necessary for CMM_SEQ_CST because, only a
@@ -660,7 +662,7 @@ static inline void _cmm_compat_c11_smp_mb__before_uatomic_read_mo(enum cmm_memor
        }
 }
 
-static inline void _cmm_compat_c11_smp_mb__after_uatomic_read_mo(enum cmm_memorder mo)
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_load_mo(enum cmm_memorder mo)
 {
        /*
         * A SMP barrier is not necessary for CMM_SEQ_CST because following
@@ -749,354 +751,14 @@ static inline void _cmm_compat_c11_smp_mb__after_uatomic_set_mo(enum cmm_memorde
        }
 }
 
-static inline void _cmm_compat_c11_smp_mb__before_uatomic_xchg_mo(enum cmm_memorder mo)
-{
-       /* NOP. uatomic_xchg has implicit lock prefix. */
-       switch (mo) {
-       case CMM_RELAXED:       /* Fall-through */
-       case CMM_ACQUIRE:       /* Fall-through */
-       case CMM_CONSUME:       /* Fall-through */
-       case CMM_RELEASE:       /* Fall-through */
-       case CMM_ACQ_REL:       /* Fall-through */
-       case CMM_SEQ_CST:       /* Fall-through */
-       case CMM_SEQ_CST_FENCE:
-               break;
-       default:
-               abort();
-       }
-}
-
-static inline void _cmm_compat_c11_smp_mb__after_uatomic_xchg_mo(enum cmm_memorder mo)
-{
-       /* NOP. uatomic_xchg has implicit lock prefix. */
-       switch (mo) {
-       case CMM_RELAXED:       /* Fall-through */
-       case CMM_ACQUIRE:       /* Fall-through */
-       case CMM_CONSUME:       /* Fall-through */
-       case CMM_RELEASE:       /* Fall-through */
-       case CMM_ACQ_REL:       /* Fall-through */
-       case CMM_SEQ_CST:       /* Fall-through */
-       case CMM_SEQ_CST_FENCE:
-               break;
-       default:
-               abort();
-       }
-}
-
-static inline void _cmm_compat_c11_smp_mb__before_uatomic_cmpxchg_mo(enum cmm_memorder mo)
-{
-       /* NOP. uatomic_cmpxchg has implicit lock prefix. */
-       switch (mo) {
-       case CMM_RELAXED:       /* Fall-through */
-       case CMM_ACQUIRE:       /* Fall-through */
-       case CMM_CONSUME:       /* Fall-through */
-       case CMM_RELEASE:       /* Fall-through */
-       case CMM_ACQ_REL:       /* Fall-through */
-       case CMM_SEQ_CST:       /* Fall-through */
-       case CMM_SEQ_CST_FENCE:
-               break;
-       default:
-               abort();
-       }
-}
-
-static inline void _cmm_compat_c11_smp_mb__after_uatomic_cmpxchg_mo(enum cmm_memorder mo)
-{
-       /* NOP. uatomic_cmpxchg has implicit lock prefix. */
-       switch (mo) {
-       case CMM_RELAXED:       /* Fall-through */
-       case CMM_ACQUIRE:       /* Fall-through */
-       case CMM_CONSUME:       /* Fall-through */
-       case CMM_RELEASE:       /* Fall-through */
-       case CMM_ACQ_REL:       /* Fall-through */
-       case CMM_SEQ_CST:       /* Fall-through */
-       case CMM_SEQ_CST_FENCE:
-               break;
-       default:
-               abort();
-       }
-}
-
-static inline void _cmm_compat_c11_smp_mb__before_uatomic_and_mo(enum cmm_memorder mo)
-{
-       /* NOP. uatomic_and has explicit lock prefix. */
-       switch (mo) {
-       case CMM_RELAXED:       /* Fall-through */
-       case CMM_ACQUIRE:       /* Fall-through */
-       case CMM_CONSUME:       /* Fall-through */
-       case CMM_RELEASE:       /* Fall-through */
-       case CMM_ACQ_REL:       /* Fall-through */
-       case CMM_SEQ_CST:       /* Fall-through */
-       case CMM_SEQ_CST_FENCE:
-               break;
-       default:
-               abort();
-       }
-}
-
-static inline void _cmm_compat_c11_smp_mb__after_uatomic_and_mo(enum cmm_memorder mo)
-{
-       /* NOP. uatomic_and has explicit lock prefix. */
-       switch (mo) {
-       case CMM_RELAXED:       /* Fall-through */
-       case CMM_ACQUIRE:       /* Fall-through */
-       case CMM_CONSUME:       /* Fall-through */
-       case CMM_RELEASE:       /* Fall-through */
-       case CMM_ACQ_REL:       /* Fall-through */
-       case CMM_SEQ_CST:       /* Fall-through */
-       case CMM_SEQ_CST_FENCE:
-               break;
-       default:
-               abort();
-       }
-}
-
-static inline void _cmm_compat_c11_smp_mb__before_uatomic_or_mo(enum cmm_memorder mo)
-{
-       /* NOP. uatomic_or has explicit lock prefix. */
-       switch (mo) {
-       case CMM_RELAXED:       /* Fall-through */
-       case CMM_ACQUIRE:       /* Fall-through */
-       case CMM_CONSUME:       /* Fall-through */
-       case CMM_RELEASE:       /* Fall-through */
-       case CMM_ACQ_REL:       /* Fall-through */
-       case CMM_SEQ_CST:       /* Fall-through */
-       case CMM_SEQ_CST_FENCE:
-               break;
-       default:
-               abort();
-       }
-}
-
-static inline void _cmm_compat_c11_smp_mb__after_uatomic_or_mo(enum cmm_memorder mo)
-{
-       /* NOP. uatomic_or has explicit lock prefix. */
-       switch (mo) {
-       case CMM_RELAXED:       /* Fall-through */
-       case CMM_ACQUIRE:       /* Fall-through */
-       case CMM_CONSUME:       /* Fall-through */
-       case CMM_RELEASE:       /* Fall-through */
-       case CMM_ACQ_REL:       /* Fall-through */
-       case CMM_SEQ_CST:       /* Fall-through */
-       case CMM_SEQ_CST_FENCE:
-               break;
-       default:
-               abort();
-       }
-}
-
-static inline void _cmm_compat_c11_smp_mb__before_uatomic_add_mo(enum cmm_memorder mo)
-{
-       /* NOP. uatomic_add has explicit lock prefix. */
-       switch (mo) {
-       case CMM_RELAXED:       /* Fall-through */
-       case CMM_ACQUIRE:       /* Fall-through */
-       case CMM_CONSUME:       /* Fall-through */
-       case CMM_RELEASE:       /* Fall-through */
-       case CMM_ACQ_REL:       /* Fall-through */
-       case CMM_SEQ_CST:       /* Fall-through */
-       case CMM_SEQ_CST_FENCE:
-               break;
-       default:
-               abort();
-       }
-}
-
-static inline void _cmm_compat_c11_smp_mb__after_uatomic_add_mo(enum cmm_memorder mo)
-{
-       /* NOP. uatomic_add has explicit lock prefix. */
-       switch (mo) {
-       case CMM_RELAXED:       /* Fall-through */
-       case CMM_ACQUIRE:       /* Fall-through */
-       case CMM_CONSUME:       /* Fall-through */
-       case CMM_RELEASE:       /* Fall-through */
-       case CMM_ACQ_REL:       /* Fall-through */
-       case CMM_SEQ_CST:       /* Fall-through */
-       case CMM_SEQ_CST_FENCE:
-               break;
-       default:
-               abort();
-       }
-}
-
-static inline void _cmm_compat_c11_smp_mb__before_uatomic_sub_mo(enum cmm_memorder mo)
-{
-       /* NOP. uatomic_sub has explicit lock prefix. */
-       switch (mo) {
-       case CMM_RELAXED:       /* Fall-through */
-       case CMM_ACQUIRE:       /* Fall-through */
-       case CMM_CONSUME:       /* Fall-through */
-       case CMM_RELEASE:       /* Fall-through */
-       case CMM_ACQ_REL:       /* Fall-through */
-       case CMM_SEQ_CST:       /* Fall-through */
-       case CMM_SEQ_CST_FENCE:
-               break;
-       default:
-               abort();
-       }
-}
-
-static inline void _cmm_compat_c11_smp_mb__after_uatomic_sub_mo(enum cmm_memorder mo)
-{
-       /* NOP. uatomic_sub has explicit lock prefix. */
-       switch (mo) {
-       case CMM_RELAXED:       /* Fall-through */
-       case CMM_ACQUIRE:       /* Fall-through */
-       case CMM_CONSUME:       /* Fall-through */
-       case CMM_RELEASE:       /* Fall-through */
-       case CMM_ACQ_REL:       /* Fall-through */
-       case CMM_SEQ_CST:       /* Fall-through */
-       case CMM_SEQ_CST_FENCE:
-               break;
-       default:
-               abort();
-       }
-}
-
-static inline void _cmm_compat_c11_smp_mb__before_uatomic_inc_mo(enum cmm_memorder mo)
-{
-       /* NOP. uatomic_inc has explicit lock prefix. */
-       switch (mo) {
-       case CMM_RELAXED:       /* Fall-through */
-       case CMM_ACQUIRE:       /* Fall-through */
-       case CMM_CONSUME:       /* Fall-through */
-       case CMM_RELEASE:       /* Fall-through */
-       case CMM_ACQ_REL:       /* Fall-through */
-       case CMM_SEQ_CST:       /* Fall-through */
-       case CMM_SEQ_CST_FENCE:
-               break;
-       default:
-               abort();
-       }
-}
-
-static inline void _cmm_compat_c11_smp_mb__after_uatomic_inc_mo(enum cmm_memorder mo)
-{
-       /* NOP. uatomic_inc has explicit lock prefix. */
-       switch (mo) {
-       case CMM_RELAXED:       /* Fall-through */
-       case CMM_ACQUIRE:       /* Fall-through */
-       case CMM_CONSUME:       /* Fall-through */
-       case CMM_RELEASE:       /* Fall-through */
-       case CMM_ACQ_REL:       /* Fall-through */
-       case CMM_SEQ_CST:       /* Fall-through */
-       case CMM_SEQ_CST_FENCE:
-               break;
-       default:
-               abort();
-       }
-}
-
-static inline void _cmm_compat_c11_smp_mb__before_uatomic_dec_mo(enum cmm_memorder mo)
-{
-       /* NOP. uatomic_dec has explicit lock prefix. */
-       switch (mo) {
-       case CMM_RELAXED:       /* Fall-through */
-       case CMM_ACQUIRE:       /* Fall-through */
-       case CMM_CONSUME:       /* Fall-through */
-       case CMM_RELEASE:       /* Fall-through */
-       case CMM_ACQ_REL:       /* Fall-through */
-       case CMM_SEQ_CST:       /* Fall-through */
-       case CMM_SEQ_CST_FENCE:
-               break;
-       default:
-               abort();
-       }
-}
-
-static inline void _cmm_compat_c11_smp_mb__after_uatomic_dec_mo(enum cmm_memorder mo)
-{
-       /* NOP. uatomic_dec has explicit lock prefix. */
-       switch (mo) {
-       case CMM_RELAXED:       /* Fall-through */
-       case CMM_ACQUIRE:       /* Fall-through */
-       case CMM_CONSUME:       /* Fall-through */
-       case CMM_RELEASE:       /* Fall-through */
-       case CMM_ACQ_REL:       /* Fall-through */
-       case CMM_SEQ_CST:       /* Fall-through */
-       case CMM_SEQ_CST_FENCE:
-               break;
-       default:
-               abort();
-       }
-}
-
-static inline void _cmm_compat_c11_smp_mb__before_uatomic_add_return_mo(enum cmm_memorder mo)
-{
-       /* NOP. uatomic_add_return has explicit lock prefix. */
-       switch (mo) {
-       case CMM_RELAXED:       /* Fall-through */
-       case CMM_ACQUIRE:       /* Fall-through */
-       case CMM_CONSUME:       /* Fall-through */
-       case CMM_RELEASE:       /* Fall-through */
-       case CMM_ACQ_REL:       /* Fall-through */
-       case CMM_SEQ_CST:       /* Fall-through */
-       case CMM_SEQ_CST_FENCE:
-               break;
-       default:
-               abort();
-       }
-}
-
-static inline void _cmm_compat_c11_smp_mb__after_uatomic_add_return_mo(enum cmm_memorder mo)
-{
-       /* NOP. uatomic_add_return has explicit lock prefix. */
-       switch (mo) {
-       case CMM_RELAXED:       /* Fall-through */
-       case CMM_ACQUIRE:       /* Fall-through */
-       case CMM_CONSUME:       /* Fall-through */
-       case CMM_RELEASE:       /* Fall-through */
-       case CMM_ACQ_REL:       /* Fall-through */
-       case CMM_SEQ_CST:       /* Fall-through */
-       case CMM_SEQ_CST_FENCE:
-               break;
-       default:
-               abort();
-       }
-}
-
-static inline void _cmm_compat_c11_smp_mb__before_uatomic_sub_return_mo(enum cmm_memorder mo)
-{
-       /* NOP. uatomic_sub_return has explicit lock prefix. */
-       switch (mo) {
-       case CMM_RELAXED:       /* Fall-through */
-       case CMM_ACQUIRE:       /* Fall-through */
-       case CMM_CONSUME:       /* Fall-through */
-       case CMM_RELEASE:       /* Fall-through */
-       case CMM_ACQ_REL:       /* Fall-through */
-       case CMM_SEQ_CST:       /* Fall-through */
-       case CMM_SEQ_CST_FENCE:
-               break;
-       default:
-               abort();
-       }
-}
-
-static inline void _cmm_compat_c11_smp_mb__after_uatomic_sub_return_mo(enum cmm_memorder mo)
-{
-       /* NOP. uatomic_sub_return has explicit lock prefix. */
-       switch (mo) {
-       case CMM_RELAXED:       /* Fall-through */
-       case CMM_ACQUIRE:       /* Fall-through */
-       case CMM_CONSUME:       /* Fall-through */
-       case CMM_RELEASE:       /* Fall-through */
-       case CMM_ACQ_REL:       /* Fall-through */
-       case CMM_SEQ_CST:       /* Fall-through */
-       case CMM_SEQ_CST_FENCE:
-               break;
-       default:
-               abort();
-       }
-}
-
-#define _cmm_compat_c11_smp_mb__before_mo(operation, mo)                       \
-       do {                                                    \
-               _cmm_compat_c11_smp_mb__before_ ## operation ## _mo (mo);       \
+#define _cmm_compat_c11_smp_mb__before_mo(operation, mo)               \
+       do {                                                            \
+               _cmm_compat_c11_smp_mb__before_ ## operation ## _mo (mo); \
        } while (0)
 
 #define _cmm_compat_c11_smp_mb__after_mo(operation, mo)                        \
-       do {                                                    \
-               _cmm_compat_c11_smp_mb__after_ ## operation ## _mo (mo);        \
+       do {                                                            \
+               _cmm_compat_c11_smp_mb__after_ ## operation ## _mo (mo); \
        } while (0)
 
 
This page took 0.045524 seconds and 4 git commands to generate.