--- /dev/null
+/*
+ * arch_x86.h: Definitions for the x86 architecture, derived from Linux.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; but only version 2 of the License given
+ * that this comes from the Linux kernel.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
+ */
+
+/* Assume P4 or newer */
+#define CONFIG_HAVE_FENCE 1
+#define CONFIG_HAVE_MEM_COHERENCY
+
+#ifdef CONFIG_HAVE_FENCE
+#define mb() asm volatile("mfence":::"memory")
+#define rmb() asm volatile("lfence":::"memory")
+#define wmb() asm volatile("sfence"::: "memory")
+#else
+/*
+ * Some non-Intel clones support out of order store. wmb() ceases to be a
+ * nop for these.
+ */
+#define mb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
+#define rmb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
+#define wmb() asm volatile("lock; addl $0,0(%%esp)"::: "memory")
+#endif
+
+/*
+ * Architectures without cache coherency need something like the following:
+ *
+ * #define mb() mc()
+ * #define rmb() rmc()
+ * #define wmb() wmc()
+ * #define mc() arch_cache_flush()
+ * #define rmc() arch_cache_flush_read()
+ * #define wmc() arch_cache_flush_write()
+ */
+
+#define mc() barrier()
+#define rmc() barrier()
+#define wmc() barrier()
+
+/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
+static inline void rep_nop(void)
+{
+ asm volatile("rep; nop" ::: "memory");
+}
+
+static inline void cpu_relax(void)
+{
+ rep_nop();
+}
+
+#ifndef _INCLUDE_API_H
+
+static inline void atomic_inc(int *v)
+{
+ asm volatile("lock; incl %0"
+ : "+m" (*v));
+}
+
+#endif /* #ifndef _INCLUDE_API_H */
+
+#define xchg(ptr, v) \
+ ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr))))
+
+struct __xchg_dummy {
+ unsigned long a[100];
+};
+#define __xg(x) ((struct __xchg_dummy *)(x))
+
+/*
+ * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
+ * Note 2: xchg has side effect, so that attribute volatile is necessary,
+ * but generally the primitive is invalid, *ptr is output argument. --ANK
+ * x is considered local, ptr is considered remote.
+ */
+static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
+ int size)
+{
+ switch (size) {
+ case 1:
+ asm volatile("xchgb %b0,%1"
+ : "=q" (x)
+ : "m" (*__xg(ptr)), "0" (x)
+ : "memory");
+ break;
+ case 2:
+ asm volatile("xchgw %w0,%1"
+ : "=r" (x)
+ : "m" (*__xg(ptr)), "0" (x)
+ : "memory");
+ break;
+ case 4:
+ asm volatile("xchgl %k0,%1"
+ : "=r" (x)
+ : "m" (*__xg(ptr)), "0" (x)
+ : "memory");
+ break;
+ case 8:
+ asm volatile("xchgq %0,%1"
+ : "=r" (x)
+ : "m" (*__xg(ptr)), "0" (x)
+ : "memory");
+ break;
+ }
+ smp_wmc();
+ return x;
+}
+
+
+#define rdtscll(val) do { \
+ unsigned int __a,__d; \
+ asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
+ (val) = ((unsigned long long)__a) | (((unsigned long long)__d)<<32); \
+} while(0)
+
+typedef unsigned long long cycles_t;
+
+static inline cycles_t get_cycles (void)
+{
+ unsigned long long ret = 0;
+
+ rdtscll(ret);
+ return ret;
+}