powerpc: use __NO_LWSYNC__ check to use appropriate lwsync/sync opcode
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Thu, 22 Sep 2011 15:00:14 +0000 (11:00 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Thu, 22 Sep 2011 15:00:14 +0000 (11:00 -0400)
We already used it in uatomic code, move it to arch ppc.

Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
urcu/arch/ppc.h
urcu/uatomic/ppc.h

index 048b217392cc7cd471647625ca1dd128e6627e00..2fcbf5660958a12d2ad6dd957838ed73da4f2bec 100644 (file)
@@ -32,6 +32,12 @@ extern "C" {
 /* Include size of POWER5+ L3 cache lines: 256 bytes */
 #define CAA_CACHE_LINE_SIZE    256
 
+#ifdef __NO_LWSYNC__
+#define LWSYNC_OPCODE  "sync\n"
+#else
+#define LWSYNC_OPCODE  "lwsync\n"
+#endif
+
 /*
  * Use sync for all cmm_mb/rmb/wmb barriers because lwsync does not
  * preserve ordering of cacheable vs. non-cacheable accesses, so it
@@ -48,8 +54,8 @@ extern "C" {
  * Therefore, use it for barriers ordering accesses to cacheable memory
  * only.
  */
-#define cmm_smp_rmb()    asm volatile("lwsync":::"memory")
-#define cmm_smp_wmb()    asm volatile("lwsync":::"memory")
+#define cmm_smp_rmb()    asm volatile(LWSYNC_OPCODE:::"memory")
+#define cmm_smp_wmb()    asm volatile(LWSYNC_OPCODE:::"memory")
 
 #define mftbl()                                                \
        ({                                              \
index 16dbd0cb7ad45d7b33501ddc7f53be69dbc8b673..31807505c271fc0a1e6eea5400a529585ea8935a 100644 (file)
 extern "C" {
 #endif 
 
-#ifdef __NO_LWSYNC__
-#define LWSYNC_OPCODE  "sync\n"
-#else
-#define LWSYNC_OPCODE  "lwsync\n"
-#endif
-
 #define ILLEGAL_INSTR  ".long  0xd00d00"
 
 /*
This page took 0.026166 seconds and 4 git commands to generate.