5 * arch_x86.h: Definitions for the x86 architecture, derived from Linux.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; but only version 2 of the License given
10 * that this comes from the Linux kernel.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
26 /* Assume P4 or newer */
27 #define CONFIG_HAVE_FENCE 1
28 #define CONFIG_HAVE_MEM_COHERENCY
30 #ifdef CONFIG_HAVE_FENCE
31 #define mb() asm volatile("mfence":::"memory")
32 #define rmb() asm volatile("lfence":::"memory")
33 #define wmb() asm volatile("sfence"::: "memory")
36 * Some non-Intel clones support out of order store. wmb() ceases to be a
39 #define mb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
40 #define rmb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
41 #define wmb() asm volatile("lock; addl $0,0(%%esp)"::: "memory")
45 * Architectures without cache coherency need something like the following:
50 * #define mc() arch_cache_flush()
51 * #define rmc() arch_cache_flush_read()
52 * #define wmc() arch_cache_flush_write()
55 #define mc() barrier()
56 #define rmc() barrier()
57 #define wmc() barrier()
59 /* Assume SMP machine, given we don't have this information */
64 #define smp_rmb() rmb()
65 #define smp_wmb() wmb()
67 #define smp_rmc() rmc()
68 #define smp_wmc() wmc()
70 #define smp_mb() barrier()
71 #define smp_rmb() barrier()
72 #define smp_wmb() barrier()
73 #define smp_mc() barrier()
74 #define smp_rmc() barrier()
75 #define smp_wmc() barrier()
78 /* Nop everywhere except on alpha. */
79 #define smp_read_barrier_depends()
81 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
82 static inline void rep_nop(void)
84 asm volatile("rep; nop" ::: "memory");
87 static inline void cpu_relax(void)
92 #ifndef _INCLUDE_API_H
94 static inline void atomic_inc(int *v
)
96 asm volatile("lock; incl %0"
100 #endif /* #ifndef _INCLUDE_API_H */
102 #define xchg(ptr, v) \
103 ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr))))
105 struct __xchg_dummy
{
106 unsigned long a
[100];
108 #define __xg(x) ((struct __xchg_dummy *)(x))
111 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
112 * Note 2: xchg has side effect, so that attribute volatile is necessary,
113 * but generally the primitive is invalid, *ptr is output argument. --ANK
114 * x is considered local, ptr is considered remote.
116 static inline unsigned long __xchg(unsigned long x
, volatile void *ptr
,
121 asm volatile("xchgb %b0,%1"
123 : "m" (*__xg(ptr
)), "0" (x
)
127 asm volatile("xchgw %w0,%1"
129 : "m" (*__xg(ptr
)), "0" (x
)
133 asm volatile("xchgl %k0,%1"
135 : "m" (*__xg(ptr
)), "0" (x
)
139 asm volatile("xchgq %0,%1"
141 : "m" (*__xg(ptr
)), "0" (x
)
150 #define rdtscll(val) do { \
151 unsigned int __a,__d; \
152 asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
153 (val) = ((unsigned long long)__a) | (((unsigned long long)__d)<<32); \
156 typedef unsigned long long cycles_t
;
158 static inline cycles_t
get_cycles (void)
160 unsigned long long ret
= 0;
166 #endif /* _ARCH_X86_H */
This page took 0.050303 seconds and 4 git commands to generate.