From: Mathieu Desnoyers Date: Tue, 12 Jun 2012 15:24:31 +0000 (-0400) Subject: Fix c99 compatibility: use __asm__ and __volatile__ in public headers X-Git-Tag: v0.7.4~12 X-Git-Url: https://git.lttng.org./?a=commitdiff_plain;h=e51500edbd9919cee53bc85cbb4b22cd4786fc42;p=userspace-rcu.git Fix c99 compatibility: use __asm__ and __volatile__ in public headers Signed-off-by: Mathieu Desnoyers --- diff --git a/urcu/arch/alpha.h b/urcu/arch/alpha.h index 300213e..9c81f0d 100644 --- a/urcu/arch/alpha.h +++ b/urcu/arch/alpha.h @@ -28,9 +28,9 @@ extern "C" { #endif -#define cmm_mb() asm volatile("mb":::"memory") -#define cmm_wmb() asm volatile("wmb":::"memory") -#define cmm_read_barrier_depends() asm volatile("mb":::"memory") +#define cmm_mb() __asm__ __volatile__ ("mb":::"memory") +#define cmm_wmb() __asm__ __volatile__ ("wmb":::"memory") +#define cmm_read_barrier_depends() __asm__ __volatile__ ("mb":::"memory") typedef unsigned long long cycles_t; diff --git a/urcu/arch/arm.h b/urcu/arch/arm.h index b49f782..e20695e 100644 --- a/urcu/arch/arm.h +++ b/urcu/arch/arm.h @@ -30,9 +30,9 @@ extern "C" { #endif #ifdef CONFIG_RCU_ARM_HAVE_DMB -#define cmm_mb() asm volatile("dmb":::"memory") -#define cmm_rmb() asm volatile("dmb":::"memory") -#define cmm_wmb() asm volatile("dmb":::"memory") +#define cmm_mb() __asm__ __volatile__ ("dmb":::"memory") +#define cmm_rmb() __asm__ __volatile__ ("dmb":::"memory") +#define cmm_wmb() __asm__ __volatile__ ("dmb":::"memory") #endif /* CONFIG_RCU_ARM_HAVE_DMB */ #include diff --git a/urcu/arch/ppc.h b/urcu/arch/ppc.h index 2fcbf56..95393ea 100644 --- a/urcu/arch/ppc.h +++ b/urcu/arch/ppc.h @@ -46,7 +46,7 @@ extern "C" { * order cacheable and non-cacheable memory operations separately---i.e. * not the latter against the former. */ -#define cmm_mb() asm volatile("sync":::"memory") +#define cmm_mb() __asm__ __volatile__ ("sync":::"memory") /* * lwsync orders loads in cacheable memory with respect to other loads, @@ -54,27 +54,27 @@ extern "C" { * Therefore, use it for barriers ordering accesses to cacheable memory * only. */ -#define cmm_smp_rmb() asm volatile(LWSYNC_OPCODE:::"memory") -#define cmm_smp_wmb() asm volatile(LWSYNC_OPCODE:::"memory") +#define cmm_smp_rmb() __asm__ __volatile__ (LWSYNC_OPCODE:::"memory") +#define cmm_smp_wmb() __asm__ __volatile__ (LWSYNC_OPCODE:::"memory") #define mftbl() \ ({ \ unsigned long rval; \ - asm volatile("mftbl %0" : "=r" (rval)); \ + __asm__ __volatile__ ("mftbl %0" : "=r" (rval)); \ rval; \ }) #define mftbu() \ ({ \ unsigned long rval; \ - asm volatile("mftbu %0" : "=r" (rval)); \ + __asm__ __volatile__ ("mftbu %0" : "=r" (rval)); \ rval; \ }) #define mftb() \ ({ \ unsigned long long rval; \ - asm volatile("mftb %0" : "=r" (rval)); \ + __asm__ __volatile__ ("mftb %0" : "=r" (rval)); \ rval; \ }) diff --git a/urcu/arch/x86.h b/urcu/arch/x86.h index c1e2e07..5853604 100644 --- a/urcu/arch/x86.h +++ b/urcu/arch/x86.h @@ -32,15 +32,15 @@ extern "C" { #define CAA_CACHE_LINE_SIZE 128 #ifdef CONFIG_RCU_HAVE_FENCE -#define cmm_mb() asm volatile("mfence":::"memory") +#define cmm_mb() __asm__ __volatile__ ("mfence":::"memory") /* * Define cmm_rmb/cmm_wmb to "strict" barriers that may be needed when * using SSE or working with I/O areas. cmm_smp_rmb/cmm_smp_wmb are * only compiler barriers, which is enough for general use. */ -#define cmm_rmb() asm volatile("lfence":::"memory") -#define cmm_wmb() asm volatile("sfence"::: "memory") +#define cmm_rmb() __asm__ __volatile__ ("lfence":::"memory") +#define cmm_wmb() __asm__ __volatile__ ("sfence"::: "memory") #define cmm_smp_rmb() cmm_barrier() #define cmm_smp_wmb() cmm_barrier() #else @@ -55,17 +55,17 @@ extern "C" { * IDT WinChip supports weak store ordering, and the kernel may enable it * under our feet; cmm_smp_wmb() ceases to be a nop for these processors. */ -#define cmm_mb() asm volatile("lock; addl $0,0(%%esp)":::"memory") -#define cmm_rmb() asm volatile("lock; addl $0,0(%%esp)":::"memory") -#define cmm_wmb() asm volatile("lock; addl $0,0(%%esp)"::: "memory") +#define cmm_mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)":::"memory") +#define cmm_rmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)":::"memory") +#define cmm_wmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)"::: "memory") #endif -#define caa_cpu_relax() asm volatile("rep; nop" : : : "memory"); +#define caa_cpu_relax() __asm__ __volatile__ ("rep; nop" : : : "memory"); #define rdtscll(val) \ do { \ unsigned int __a, __d; \ - asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \ + __asm__ __volatile__ ("rdtsc" : "=a" (__a), "=d" (__d)); \ (val) = ((unsigned long long)__a) \ | (((unsigned long long)__d) << 32); \ } while(0) diff --git a/urcu/compiler.h b/urcu/compiler.h index 974885a..0c6ece2 100644 --- a/urcu/compiler.h +++ b/urcu/compiler.h @@ -24,7 +24,7 @@ #define caa_likely(x) __builtin_expect(!!(x), 1) #define caa_unlikely(x) __builtin_expect(!!(x), 0) -#define cmm_barrier() asm volatile("" : : : "memory") +#define cmm_barrier() __asm__ __volatile__ ("" : : : "memory") /* * Instruct the compiler to perform only a single access to a variable @@ -38,7 +38,7 @@ * use is to mediate communication between process-level code and irq/NMI * handlers, all running on the same CPU. */ -#define CMM_ACCESS_ONCE(x) (*(volatile __typeof__(x) *)&(x)) +#define CMM_ACCESS_ONCE(x) (*(__volatile__ __typeof__(x) *)&(x)) #ifndef caa_max #define caa_max(a,b) ((a)>(b)?(a):(b))