40d186b1c107bd4da45d89fa6977d31ba7eb1cbd
5 * arch_ppc.h: trivial definitions for the powerpc architecture.
7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
8 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 #define CONFIG_HAVE_FENCE 1
28 #define CONFIG_HAVE_MEM_COHERENCY
31 #define BITS_PER_LONG (sizeof(unsigned long) * 8)
34 #define mb() asm volatile("sync":::"memory")
35 #define rmb() asm volatile("sync":::"memory")
36 #define wmb() asm volatile("sync"::: "memory")
39 * Architectures without cache coherency need something like the following:
44 * #define mc() arch_cache_flush()
45 * #define rmc() arch_cache_flush_read()
46 * #define wmc() arch_cache_flush_write()
49 #define mc() barrier()
50 #define rmc() barrier()
51 #define wmc() barrier()
53 /* Assume SMP machine, given we don't have this information */
58 #define smp_rmb() rmb()
59 #define smp_wmb() wmb()
61 #define smp_rmc() rmc()
62 #define smp_wmc() wmc()
64 #define smp_mb() barrier()
65 #define smp_rmb() barrier()
66 #define smp_wmb() barrier()
67 #define smp_mc() barrier()
68 #define smp_rmc() barrier()
69 #define smp_wmc() barrier()
72 /* Nop everywhere except on alpha. */
73 #define smp_read_barrier_depends()
75 static inline void cpu_relax(void)
80 #define PPC405_ERR77(ra,rb)
81 #define LWSYNC_ON_SMP "\n\tlwsync\n"
82 #define ISYNC_ON_SMP "\n\tisync\n"
87 #define __xg(x) ((struct __xchg_dummy *)(x))
89 #ifndef _INCLUDE_API_H
92 * Exchange the 32-bits value pointed to by p, returns the old value.
93 * Might not work with PPC405 (see err 77).
95 static __always_inline
96 unsigned int __xchg_u32(volatile void *p
, unsigned int val
)
100 __asm__
__volatile__(LWSYNC_ON_SMP
101 "1:\t" "lwarx %0,0,%2\n"
105 : "=&r" (prev
), "+m" (*(volatile unsigned int *)p
)
111 #if (BITS_PER_LONG == 64)
113 * Exchange the 64-bits value pointed to by p, returns the old value.
114 * Might not work with PPC405 (see err 77).
116 static __always_inline
117 unsigned long __xchg_u64(volatile void *p
, unsigned long val
)
121 __asm__
__volatile__(LWSYNC_ON_SMP
122 "1:\t" "ldarx %0,0,%2\n"
126 : "=&r" (prev
), "+m" (*(volatile unsigned long *)p
)
133 static __always_inline
134 unsigned long __xchg(volatile void *ptr
, unsigned long x
, int size
)
138 return __xchg_u32(ptr
, x
);
139 #if (BITS_PER_LONG == 64)
141 return __xchg_u64(ptr
, x
);
148 * note : xchg should only be used with pointers to 32 or 64-bits elements.
149 * No build-time check is done on the element size because depending on
150 * non-referenced unexisting symbol at link time to provide an error message
151 * only work when compiling with optimizations.
153 #define xchg(ptr, v) \
154 ((__typeof__(*(ptr)))__xchg((ptr), (unsigned long)(v), sizeof(*(ptr))))
156 #endif /* #ifndef _INCLUDE_API_H */
160 unsigned long rval; \
161 asm volatile("mftbl %0" : "=r" (rval)); \
167 unsigned long rval; \
168 asm volatile("mftbu %0" : "=r" (rval)); \
172 typedef unsigned long long cycles_t
;
174 static inline cycles_t
get_cycles (void)
184 return (((cycles_t
) h
) << 32) + l
;
188 #endif /* _ARCH_PPC_H */
This page took 0.032772 seconds and 3 git commands to generate.