Commit | Line | Data |
---|---|---|
121a5d44 MD |
1 | #ifndef _ARCH_X86_H |
2 | #define _ARCH_X86_H | |
3 | ||
6d0ce021 | 4 | /* |
af02d47e | 5 | * arch_x86.h: trivial definitions for the x86 architecture. |
6d0ce021 | 6 | * |
af02d47e MD |
7 | * Copyright (c) 2009 Paul E. McKenney, IBM Corporation. |
8 | * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> | |
6d0ce021 | 9 | * |
af02d47e MD |
10 | * This library is free software; you can redistribute it and/or |
11 | * modify it under the terms of the GNU Lesser General Public | |
12 | * License as published by the Free Software Foundation; either | |
13 | * version 2.1 of the License, or (at your option) any later version. | |
14 | * | |
15 | * This library is distributed in the hope that it will be useful, | |
6d0ce021 | 16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
af02d47e MD |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
18 | * Lesser General Public License for more details. | |
6d0ce021 | 19 | * |
af02d47e MD |
20 | * You should have received a copy of the GNU Lesser General Public |
21 | * License along with this library; if not, write to the Free Software | |
22 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
6d0ce021 PM |
23 | */ |
24 | ||
121a5d44 MD |
25 | #include <compiler.h> |
26 | ||
6d0ce021 PM |
27 | /* Assume P4 or newer */ |
28 | #define CONFIG_HAVE_FENCE 1 | |
29 | #define CONFIG_HAVE_MEM_COHERENCY | |
30 | ||
af02d47e MD |
31 | #ifndef BITS_PER_LONG |
32 | #define BITS_PER_LONG (sizeof(unsigned long) * 8) | |
33 | #endif | |
34 | ||
6d0ce021 PM |
35 | #ifdef CONFIG_HAVE_FENCE |
36 | #define mb() asm volatile("mfence":::"memory") | |
37 | #define rmb() asm volatile("lfence":::"memory") | |
38 | #define wmb() asm volatile("sfence"::: "memory") | |
39 | #else | |
40 | /* | |
41 | * Some non-Intel clones support out of order store. wmb() ceases to be a | |
42 | * nop for these. | |
43 | */ | |
44 | #define mb() asm volatile("lock; addl $0,0(%%esp)":::"memory") | |
45 | #define rmb() asm volatile("lock; addl $0,0(%%esp)":::"memory") | |
46 | #define wmb() asm volatile("lock; addl $0,0(%%esp)"::: "memory") | |
47 | #endif | |
48 | ||
49 | /* | |
50 | * Architectures without cache coherency need something like the following: | |
51 | * | |
52 | * #define mb() mc() | |
53 | * #define rmb() rmc() | |
54 | * #define wmb() wmc() | |
55 | * #define mc() arch_cache_flush() | |
56 | * #define rmc() arch_cache_flush_read() | |
57 | * #define wmc() arch_cache_flush_write() | |
58 | */ | |
59 | ||
60 | #define mc() barrier() | |
61 | #define rmc() barrier() | |
62 | #define wmc() barrier() | |
63 | ||
121a5d44 MD |
64 | /* Assume SMP machine, given we don't have this information */ |
65 | #define CONFIG_SMP 1 | |
66 | ||
67 | #ifdef CONFIG_SMP | |
68 | #define smp_mb() mb() | |
69 | #define smp_rmb() rmb() | |
70 | #define smp_wmb() wmb() | |
71 | #define smp_mc() mc() | |
72 | #define smp_rmc() rmc() | |
73 | #define smp_wmc() wmc() | |
74 | #else | |
75 | #define smp_mb() barrier() | |
76 | #define smp_rmb() barrier() | |
77 | #define smp_wmb() barrier() | |
78 | #define smp_mc() barrier() | |
79 | #define smp_rmc() barrier() | |
80 | #define smp_wmc() barrier() | |
81 | #endif | |
82 | ||
83 | /* Nop everywhere except on alpha. */ | |
84 | #define smp_read_barrier_depends() | |
85 | ||
6d0ce021 PM |
86 | /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ |
87 | static inline void rep_nop(void) | |
88 | { | |
89 | asm volatile("rep; nop" ::: "memory"); | |
90 | } | |
91 | ||
92 | static inline void cpu_relax(void) | |
93 | { | |
94 | rep_nop(); | |
95 | } | |
96 | ||
af02d47e MD |
97 | #define xchg(ptr, v) \ |
98 | ((__typeof__(*(ptr)))__xchg((ptr), (unsigned long)(v), sizeof(*(ptr)))) | |
6d0ce021 | 99 | |
af02d47e | 100 | struct __xchg_ptr_as_array { |
6d0ce021 PM |
101 | unsigned long a[100]; |
102 | }; | |
af02d47e MD |
103 | |
104 | #define __xchg_ptr_as_array(x) ((struct __xchg_ptr_as_array *)(x)) | |
6d0ce021 PM |
105 | |
106 | /* | |
af02d47e MD |
107 | * xchg always implies a "lock" prefix, even on UP. See Intel documentation. |
108 | * volatile attribute is neccessary due to xchg side effect. | |
109 | * *ptr is an output argument. | |
6d0ce021 PM |
110 | * x is considered local, ptr is considered remote. |
111 | */ | |
af02d47e | 112 | static inline unsigned long __xchg(volatile void *ptr, unsigned long x, |
6d0ce021 PM |
113 | int size) |
114 | { | |
115 | switch (size) { | |
116 | case 1: | |
117 | asm volatile("xchgb %b0,%1" | |
118 | : "=q" (x) | |
af02d47e | 119 | : "m" (*__xchg_ptr_as_array(ptr)), "0" (x) |
6d0ce021 PM |
120 | : "memory"); |
121 | break; | |
122 | case 2: | |
123 | asm volatile("xchgw %w0,%1" | |
124 | : "=r" (x) | |
af02d47e | 125 | : "m" (*__xchg_ptr_as_array(ptr)), "0" (x) |
6d0ce021 PM |
126 | : "memory"); |
127 | break; | |
128 | case 4: | |
129 | asm volatile("xchgl %k0,%1" | |
130 | : "=r" (x) | |
af02d47e | 131 | : "m" (*__xchg_ptr_as_array(ptr)), "0" (x) |
6d0ce021 PM |
132 | : "memory"); |
133 | break; | |
af02d47e | 134 | #if (BITS_PER_LONG == 64) |
6d0ce021 PM |
135 | case 8: |
136 | asm volatile("xchgq %0,%1" | |
137 | : "=r" (x) | |
af02d47e | 138 | : "m" (*__xchg_ptr_as_array(ptr)), "0" (x) |
6d0ce021 PM |
139 | : "memory"); |
140 | break; | |
af02d47e | 141 | #endif |
6d0ce021 PM |
142 | } |
143 | smp_wmc(); | |
144 | return x; | |
145 | } | |
146 | ||
af02d47e MD |
147 | #define rdtscll(val) \ |
148 | do { \ | |
149 | unsigned int __a, __d; \ | |
150 | asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \ | |
151 | (val) = ((unsigned long long)__a) \ | |
152 | | (((unsigned long long)__d) << 32); \ | |
153 | } while(0) | |
6d0ce021 PM |
154 | |
155 | typedef unsigned long long cycles_t; | |
156 | ||
af02d47e | 157 | static inline cycles_t get_cycles(void) |
6d0ce021 | 158 | { |
af02d47e | 159 | cycles_t ret = 0; |
6d0ce021 PM |
160 | |
161 | rdtscll(ret); | |
162 | return ret; | |
163 | } | |
121a5d44 MD |
164 | |
165 | #endif /* _ARCH_X86_H */ |