| 1 | /* |
| 2 | * arch_x86.h: Definitions for the x86 architecture, derived from Linux. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by |
| 6 | * the Free Software Foundation; but only version 2 of the License given |
| 7 | * that this comes from the Linux kernel. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write to the Free Software |
| 16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
| 17 | * |
| 18 | * Copyright (c) 2009 Paul E. McKenney, IBM Corporation. |
| 19 | */ |
| 20 | |
| 21 | /* Assume P4 or newer */ |
| 22 | #define CONFIG_HAVE_FENCE 1 |
| 23 | #define CONFIG_HAVE_MEM_COHERENCY |
| 24 | |
| 25 | #ifdef CONFIG_HAVE_FENCE |
| 26 | #define mb() asm volatile("mfence":::"memory") |
| 27 | #define rmb() asm volatile("lfence":::"memory") |
| 28 | #define wmb() asm volatile("sfence"::: "memory") |
| 29 | #else |
| 30 | /* |
| 31 | * Some non-Intel clones support out of order store. wmb() ceases to be a |
| 32 | * nop for these. |
| 33 | */ |
| 34 | #define mb() asm volatile("lock; addl $0,0(%%esp)":::"memory") |
| 35 | #define rmb() asm volatile("lock; addl $0,0(%%esp)":::"memory") |
| 36 | #define wmb() asm volatile("lock; addl $0,0(%%esp)"::: "memory") |
| 37 | #endif |
| 38 | |
| 39 | /* |
| 40 | * Architectures without cache coherency need something like the following: |
| 41 | * |
| 42 | * #define mb() mc() |
| 43 | * #define rmb() rmc() |
| 44 | * #define wmb() wmc() |
| 45 | * #define mc() arch_cache_flush() |
| 46 | * #define rmc() arch_cache_flush_read() |
| 47 | * #define wmc() arch_cache_flush_write() |
| 48 | */ |
| 49 | |
| 50 | #define mc() barrier() |
| 51 | #define rmc() barrier() |
| 52 | #define wmc() barrier() |
| 53 | |
| 54 | /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ |
| 55 | static inline void rep_nop(void) |
| 56 | { |
| 57 | asm volatile("rep; nop" ::: "memory"); |
| 58 | } |
| 59 | |
| 60 | static inline void cpu_relax(void) |
| 61 | { |
| 62 | rep_nop(); |
| 63 | } |
| 64 | |
| 65 | #ifndef _INCLUDE_API_H |
| 66 | |
| 67 | static inline void atomic_inc(int *v) |
| 68 | { |
| 69 | asm volatile("lock; incl %0" |
| 70 | : "+m" (*v)); |
| 71 | } |
| 72 | |
| 73 | #endif /* #ifndef _INCLUDE_API_H */ |
| 74 | |
| 75 | #define xchg(ptr, v) \ |
| 76 | ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr)))) |
| 77 | |
| 78 | struct __xchg_dummy { |
| 79 | unsigned long a[100]; |
| 80 | }; |
| 81 | #define __xg(x) ((struct __xchg_dummy *)(x)) |
| 82 | |
| 83 | /* |
| 84 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway |
| 85 | * Note 2: xchg has side effect, so that attribute volatile is necessary, |
| 86 | * but generally the primitive is invalid, *ptr is output argument. --ANK |
| 87 | * x is considered local, ptr is considered remote. |
| 88 | */ |
| 89 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, |
| 90 | int size) |
| 91 | { |
| 92 | switch (size) { |
| 93 | case 1: |
| 94 | asm volatile("xchgb %b0,%1" |
| 95 | : "=q" (x) |
| 96 | : "m" (*__xg(ptr)), "0" (x) |
| 97 | : "memory"); |
| 98 | break; |
| 99 | case 2: |
| 100 | asm volatile("xchgw %w0,%1" |
| 101 | : "=r" (x) |
| 102 | : "m" (*__xg(ptr)), "0" (x) |
| 103 | : "memory"); |
| 104 | break; |
| 105 | case 4: |
| 106 | asm volatile("xchgl %k0,%1" |
| 107 | : "=r" (x) |
| 108 | : "m" (*__xg(ptr)), "0" (x) |
| 109 | : "memory"); |
| 110 | break; |
| 111 | case 8: |
| 112 | asm volatile("xchgq %0,%1" |
| 113 | : "=r" (x) |
| 114 | : "m" (*__xg(ptr)), "0" (x) |
| 115 | : "memory"); |
| 116 | break; |
| 117 | } |
| 118 | smp_wmc(); |
| 119 | return x; |
| 120 | } |
| 121 | |
| 122 | |
| 123 | #define rdtscll(val) do { \ |
| 124 | unsigned int __a,__d; \ |
| 125 | asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \ |
| 126 | (val) = ((unsigned long long)__a) | (((unsigned long long)__d)<<32); \ |
| 127 | } while(0) |
| 128 | |
| 129 | typedef unsigned long long cycles_t; |
| 130 | |
| 131 | static inline cycles_t get_cycles (void) |
| 132 | { |
| 133 | unsigned long long ret = 0; |
| 134 | |
| 135 | rdtscll(ret); |
| 136 | return ret; |
| 137 | } |