2 * arch_x86.h: Definitions for the x86 architecture, derived from Linux.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; but only version 2 of the License given
7 * that this comes from the Linux kernel.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
21 /* Assume P4 or newer */
22 #define CONFIG_HAVE_FENCE 1
23 #define CONFIG_HAVE_MEM_COHERENCY
25 #ifdef CONFIG_HAVE_FENCE
26 #define mb() asm volatile("mfence":::"memory")
27 #define rmb() asm volatile("lfence":::"memory")
28 #define wmb() asm volatile("sfence"::: "memory")
31 * Some non-Intel clones support out of order store. wmb() ceases to be a
34 #define mb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
35 #define rmb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
36 #define wmb() asm volatile("lock; addl $0,0(%%esp)"::: "memory")
40 * Architectures without cache coherency need something like the following:
45 * #define mc() arch_cache_flush()
46 * #define rmc() arch_cache_flush_read()
47 * #define wmc() arch_cache_flush_write()
50 #define mc() barrier()
51 #define rmc() barrier()
52 #define wmc() barrier()
54 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
55 static inline void rep_nop(void)
57 asm volatile("rep; nop" ::: "memory");
60 static inline void cpu_relax(void)
65 #ifndef _INCLUDE_API_H
67 static inline void atomic_inc(int *v
)
69 asm volatile("lock; incl %0"
73 #endif /* #ifndef _INCLUDE_API_H */
75 #define xchg(ptr, v) \
76 ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr))))
81 #define __xg(x) ((struct __xchg_dummy *)(x))
84 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
85 * Note 2: xchg has side effect, so that attribute volatile is necessary,
86 * but generally the primitive is invalid, *ptr is output argument. --ANK
87 * x is considered local, ptr is considered remote.
89 static inline unsigned long __xchg(unsigned long x
, volatile void *ptr
,
94 asm volatile("xchgb %b0,%1"
96 : "m" (*__xg(ptr
)), "0" (x
)
100 asm volatile("xchgw %w0,%1"
102 : "m" (*__xg(ptr
)), "0" (x
)
106 asm volatile("xchgl %k0,%1"
108 : "m" (*__xg(ptr
)), "0" (x
)
112 asm volatile("xchgq %0,%1"
114 : "m" (*__xg(ptr
)), "0" (x
)
123 #define rdtscll(val) do { \
124 unsigned int __a,__d; \
125 asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
126 (val) = ((unsigned long long)__a) | (((unsigned long long)__d)<<32); \
129 typedef unsigned long long cycles_t
;
131 static inline cycles_t
get_cycles (void)
133 unsigned long long ret
= 0;
This page took 0.041862 seconds and 4 git commands to generate.