extern "C" {
#endif
-#define cmm_mb() asm volatile("mb":::"memory")
-#define cmm_wmb() asm volatile("wmb":::"memory")
-#define cmm_read_barrier_depends() asm volatile("mb":::"memory")
+#define cmm_mb() __asm__ __volatile__ ("mb":::"memory")
+#define cmm_wmb() __asm__ __volatile__ ("wmb":::"memory")
+#define cmm_read_barrier_depends() __asm__ __volatile__ ("mb":::"memory")
typedef unsigned long long cycles_t;
#endif
#ifdef CONFIG_RCU_ARM_HAVE_DMB
-#define cmm_mb() asm volatile("dmb":::"memory")
-#define cmm_rmb() asm volatile("dmb":::"memory")
-#define cmm_wmb() asm volatile("dmb":::"memory")
+#define cmm_mb() __asm__ __volatile__ ("dmb":::"memory")
+#define cmm_rmb() __asm__ __volatile__ ("dmb":::"memory")
+#define cmm_wmb() __asm__ __volatile__ ("dmb":::"memory")
#endif /* CONFIG_RCU_ARM_HAVE_DMB */
#include <stdlib.h>
* order cacheable and non-cacheable memory operations separately---i.e.
* not the latter against the former.
*/
-#define cmm_mb() asm volatile("sync":::"memory")
+#define cmm_mb() __asm__ __volatile__ ("sync":::"memory")
/*
* lwsync orders loads in cacheable memory with respect to other loads,
* Therefore, use it for barriers ordering accesses to cacheable memory
* only.
*/
-#define cmm_smp_rmb() asm volatile(LWSYNC_OPCODE:::"memory")
-#define cmm_smp_wmb() asm volatile(LWSYNC_OPCODE:::"memory")
+#define cmm_smp_rmb() __asm__ __volatile__ (LWSYNC_OPCODE:::"memory")
+#define cmm_smp_wmb() __asm__ __volatile__ (LWSYNC_OPCODE:::"memory")
#define mftbl() \
({ \
unsigned long rval; \
- asm volatile("mftbl %0" : "=r" (rval)); \
+ __asm__ __volatile__ ("mftbl %0" : "=r" (rval)); \
rval; \
})
#define mftbu() \
({ \
unsigned long rval; \
- asm volatile("mftbu %0" : "=r" (rval)); \
+ __asm__ __volatile__ ("mftbu %0" : "=r" (rval)); \
rval; \
})
#define mftb() \
({ \
unsigned long long rval; \
- asm volatile("mftb %0" : "=r" (rval)); \
+ __asm__ __volatile__ ("mftb %0" : "=r" (rval)); \
rval; \
})
#define CAA_CACHE_LINE_SIZE 128
#ifdef CONFIG_RCU_HAVE_FENCE
-#define cmm_mb() asm volatile("mfence":::"memory")
+#define cmm_mb() __asm__ __volatile__ ("mfence":::"memory")
/*
* Define cmm_rmb/cmm_wmb to "strict" barriers that may be needed when
* using SSE or working with I/O areas. cmm_smp_rmb/cmm_smp_wmb are
* only compiler barriers, which is enough for general use.
*/
-#define cmm_rmb() asm volatile("lfence":::"memory")
-#define cmm_wmb() asm volatile("sfence"::: "memory")
+#define cmm_rmb() __asm__ __volatile__ ("lfence":::"memory")
+#define cmm_wmb() __asm__ __volatile__ ("sfence"::: "memory")
#define cmm_smp_rmb() cmm_barrier()
#define cmm_smp_wmb() cmm_barrier()
#else
* IDT WinChip supports weak store ordering, and the kernel may enable it
* under our feet; cmm_smp_wmb() ceases to be a nop for these processors.
*/
-#define cmm_mb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
-#define cmm_rmb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
-#define cmm_wmb() asm volatile("lock; addl $0,0(%%esp)"::: "memory")
+#define cmm_mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)":::"memory")
+#define cmm_rmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)":::"memory")
+#define cmm_wmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)"::: "memory")
#endif
-#define caa_cpu_relax() asm volatile("rep; nop" : : : "memory");
+#define caa_cpu_relax() __asm__ __volatile__ ("rep; nop" : : : "memory");
#define rdtscll(val) \
do { \
unsigned int __a, __d; \
- asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
+ __asm__ __volatile__ ("rdtsc" : "=a" (__a), "=d" (__d)); \
(val) = ((unsigned long long)__a) \
| (((unsigned long long)__d) << 32); \
} while(0)
#define caa_likely(x) __builtin_expect(!!(x), 1)
#define caa_unlikely(x) __builtin_expect(!!(x), 0)
-#define cmm_barrier() asm volatile("" : : : "memory")
+#define cmm_barrier() __asm__ __volatile__ ("" : : : "memory")
/*
* Instruct the compiler to perform only a single access to a variable
* use is to mediate communication between process-level code and irq/NMI
* handlers, all running on the same CPU.
*/
-#define CMM_ACCESS_ONCE(x) (*(volatile __typeof__(x) *)&(x))
+#define CMM_ACCESS_ONCE(x) (*(__volatile__ __typeof__(x) *)&(x))
#ifndef caa_max
#define caa_max(a,b) ((a)>(b)?(a):(b))