Add basic sparc64 support
authorMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Thu, 22 Oct 2009 19:11:08 +0000 (15:11 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Thu, 22 Oct 2009 19:11:08 +0000 (15:11 -0400)
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
configure.ac
urcu/arch_sparc64.h [new file with mode: 0644]
urcu/uatomic_arch_sparc64.h [new file with mode: 0644]

index d659d4f76f955702f8cec22cdeb184feffffd3ae..a8de71d3dcd9fc74ad5ca7edfad2e8ba97ad92fa 100644 (file)
@@ -47,6 +47,7 @@ case $target_cpu in
        ppc) ARCHTYPE="ppc" ;;
        s390) ARCHTYPE="s390" ;;
        s390x) ARCHTYPE="s390" ;;
+       sparc) ARCHTYPE="sparc" ;;
        *) ARCHTYPE="unknown";;
 esac
 
diff --git a/urcu/arch_sparc64.h b/urcu/arch_sparc64.h
new file mode 100644 (file)
index 0000000..a260e3a
--- /dev/null
@@ -0,0 +1,103 @@
+#ifndef _URCU_ARCH_SPARC64_H
+#define _URCU_ARCH_SPARC64_H
+
+/*
+ * arch_sparc64.h: trivial definitions for the Sparc64 architecture.
+ *
+ * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+*
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <urcu/compiler.h>
+#include <urcu/config.h>
+
+#define CONFIG_HAVE_MEM_COHERENCY
+
+#define CACHE_LINE_SIZE        256
+
+#ifndef BITS_PER_LONG
+#define BITS_PER_LONG  (__SIZEOF_LONG__ * 8)
+#endif
+
+/*
+ * Inspired from the Linux kernel. Workaround Spitfire bug #51.
+ */
+#define membar_safe(type)                      \
+__asm__ __volatile__("ba,pt %%xcc, 1f\n\t"     \
+                    "membar " type "\n"        \
+                    "1:\n"                     \
+                    : : : "memory")
+
+#define mb()    membar_safe("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad")
+#define rmb()    membar_safe("#LoadLoad")
+#define wmb()    membar_safe("#StoreStore")
+
+/*
+ * Architectures without cache coherency need something like the following:
+ *
+ * #define mb()                mc()
+ * #define rmb()       rmc()
+ * #define wmb()       wmc()
+ * #define mc()                arch_cache_flush()
+ * #define rmc()       arch_cache_flush_read()
+ * #define wmc()       arch_cache_flush_write()
+ */
+
+#define mc()   barrier()
+#define rmc()  barrier()
+#define wmc()  barrier()
+
+#ifdef CONFIG_URCU_SMP
+#define smp_mb()       mb()
+#define smp_rmb()      rmb()
+#define smp_wmb()      wmb()
+#define smp_mc()       mc()
+#define smp_rmc()      rmc()
+#define smp_wmc()      wmc()
+#else
+#define smp_mb()       barrier()
+#define smp_rmb()      barrier()
+#define smp_wmb()      barrier()
+#define smp_mc()       barrier()
+#define smp_rmc()      barrier()
+#define smp_wmc()      barrier()
+#endif
+
+/* Nop everywhere except on alpha. */
+#define smp_read_barrier_depends()
+
+static inline void cpu_relax(void)
+{
+       barrier();
+}
+
+/*
+ * Serialize core instruction execution. Also acts as a compiler barrier.
+ */
+static inline void sync_core()
+{
+       mb();
+}
+
+typedef unsigned long long cycles_t;
+
+static inline cycles_t get_cycles (void)
+{
+       return 0;       /* unimplemented */
+}
+
+#endif /* _URCU_ARCH_SPARC64_H */
diff --git a/urcu/uatomic_arch_sparc64.h b/urcu/uatomic_arch_sparc64.h
new file mode 100644 (file)
index 0000000..87deacd
--- /dev/null
@@ -0,0 +1,182 @@
+#ifndef _URCU_ARCH_UATOMIC_SPARC64_H
+#define _URCU_ARCH_UATOMIC_SPARC64_H
+
+/* 
+ * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
+ * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
+ * Copyright (c) 2009      Mathieu Desnoyers
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose,  provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ * Code inspired from libuatomic_ops-1.2, inherited in part from the
+ * Boehm-Demers-Weiser conservative garbage collector.
+ */
+
+#include <urcu/compiler.h>
+#include <urcu/system.h>
+
+#ifndef __SIZEOF_LONG__
+#if (defined(__sparc_v8__) || defined(__sparc_v9__))
+#define __SIZEOF_LONG__ 8
+#else
+#define __SIZEOF_LONG__ 4
+#endif
+#endif
+
+#ifndef BITS_PER_LONG
+#define BITS_PER_LONG  (__SIZEOF_LONG__ * 8)
+#endif
+
+#define uatomic_set(addr, v)   STORE_SHARED(*(addr), (v))
+#define uatomic_read(addr)     LOAD_SHARED(*(addr))
+
+/* cmpxchg */
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
+                             unsigned long _new, int len)
+{
+       switch (len) {
+       case 4:
+       {
+               __asm__ __volatile__ (
+                       "membar #StoreLoad | #LoadLoad\n\t"
+                        "cas [%1],%2,%0\n\t"
+                        "membar #StoreLoad | #StoreStore\n\t"
+                        : "+&r" (_new)
+                        : "r" (addr), "r" (old)
+                        : "memory");
+
+               return _new;
+       }
+#if (BITS_PER_LONG == 64)
+       case 8:
+       {
+               __asm__ __volatile__ (
+                       "membar #StoreLoad | #LoadLoad\n\t"
+                        "casx [%1],%2,%0\n\t"
+                        "membar #StoreLoad | #StoreStore\n\t"
+                        : "+&r" (_new)
+                        : "r" (addr), "r" (old)
+                        : "memory");
+
+               return _new;
+       }
+#endif
+       }
+       __builtin_trap();
+       return 0;
+}
+
+
+#define uatomic_cmpxchg(addr, old, _new)                                   \
+       ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
+                                               (unsigned long)(_new),      \
+                                               sizeof(*(addr))))
+
+/* xchg */
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
+{
+       switch (len) {
+       case 4:
+       {
+               unsigned int old;
+
+               oldt = uatomic_read(addr);
+               do {
+                       old = oldt;
+                       oldt = _uatomic_cmpxchg(addr, old, val, 4);
+               } while (oldt != old);
+
+               return old;
+       }
+#if (BITS_PER_LONG == 64)
+       case 8:
+       {
+               unsigned long old;
+
+               oldt = uatomic_read(addr);
+               do {
+                       old = oldt;
+                       oldt = _uatomic_cmpxchg(addr, old, val, 8);
+               } while (oldt != old);
+
+               return old;
+       }
+#endif
+       }
+       __builtin_trap();
+       return 0;
+}
+
+#define uatomic_xchg(addr, v)                                              \
+       ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
+                                               sizeof(*(addr))))
+
+/* uatomic_add_return */
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_add_return(void *addr, unsigned long val,
+                                int len)
+{
+       switch (len) {
+       case 4:
+       {
+               unsigned int old;
+
+               oldt = uatomic_read(addr);
+               do {
+                       old = oldt;
+                       oldt = _uatomic_cmpxchg(addr, old, old + val, 4);
+               } while (oldt != old);
+
+               return old + val;
+       }
+#if (BITS_PER_LONG == 64)
+       case 8:
+       {
+               unsigned long old;
+
+               oldt = uatomic_read(addr);
+               do {
+                       old = oldt;
+                       oldt = _uatomic_cmpxchg(addr, old, old + val, 8);
+               } while (oldt != old);
+
+               return old + val;
+       }
+#endif
+       }
+       __builtin_trap();
+       return 0;
+}
+
+#define uatomic_add_return(addr, v)                                    \
+       ((__typeof__(*(addr))) _uatomic_add_return((addr),              \
+                                                 (unsigned long)(v),   \
+                                                 sizeof(*(addr))))
+
+/* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */
+
+#define uatomic_sub_return(addr, v)    uatomic_add_return((addr), -(v))
+
+#define uatomic_add(addr, v)           (void)uatomic_add_return((addr), (v))
+#define uatomic_sub(addr, v)           (void)uatomic_sub_return((addr), (v))
+
+#define uatomic_inc(addr)              uatomic_add((addr), 1)
+#define uatomic_dec(addr)              uatomic_add((addr), -1)
+
+#define URCU_CAS_AVAIL()       1
+#define compat_uatomic_cmpxchg(ptr, old, _new) uatomic_cmpxchg(ptr, old, _new)
+
+#endif /* _URCU_ARCH_UATOMIC_PPC_H */
This page took 0.028121 seconds and 4 git commands to generate.