From: compudj Date: Fri, 2 Jun 2006 11:47:26 +0000 (+0000) Subject: add ppc atomic X-Git-Tag: v0.12.20~1615 X-Git-Url: https://git.lttng.org./?a=commitdiff_plain;h=8e66edd7fc35eb6b9ed6b493874c74e4363138ec;p=lttv.git add ppc atomic git-svn-id: http://ltt.polymtl.ca/svn@1874 04897980-b3bd-0310-b5e0-8ef037075253 --- diff --git a/ltt-usertrace/ltt/system-ppc.h b/ltt-usertrace/ltt/system-ppc.h new file mode 100644 index 00000000..82395f30 --- /dev/null +++ b/ltt-usertrace/ltt/system-ppc.h @@ -0,0 +1,207 @@ +/* + * Copyright (C) 1999 Cort Dougan + */ +#ifndef __PPC_SYSTEM_H +#define __PPC_SYSTEM_H + +#include +#include + +#include +#include + +/* + * Memory barrier. + * The sync instruction guarantees that all memory accesses initiated + * by this processor have been performed (with respect to all other + * mechanisms that access memory). The eieio instruction is a barrier + * providing an ordering (separately) for (a) cacheable stores and (b) + * loads and stores to non-cacheable memory (e.g. I/O devices). + * + * mb() prevents loads and stores being reordered across this point. + * rmb() prevents loads being reordered across this point. + * wmb() prevents stores being reordered across this point. + * read_barrier_depends() prevents data-dependent loads being reordered + * across this point (nop on PPC). + * + * We can use the eieio instruction for wmb, but since it doesn't + * give any ordering guarantees about loads, we have to use the + * stronger but slower sync instruction for mb and rmb. + */ +#define mb() __asm__ __volatile__ ("sync" : : : "memory") +#define rmb() __asm__ __volatile__ ("sync" : : : "memory") +#define wmb() __asm__ __volatile__ ("eieio" : : : "memory") +#define read_barrier_depends() do { } while(0) + +#define set_mb(var, value) do { var = value; mb(); } while (0) +#define set_wmb(var, value) do { var = value; wmb(); } while (0) + +#ifdef CONFIG_SMP +#define smp_mb() mb() +#define smp_rmb() rmb() +#define smp_wmb() wmb() +#define smp_read_barrier_depends() read_barrier_depends() +#else +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#define smp_read_barrier_depends() do { } while(0) +#endif /* CONFIG_SMP */ + +#ifdef __KERNEL__ +struct task_struct; +struct pt_regs; + +extern void print_backtrace(unsigned long *); +extern void show_regs(struct pt_regs * regs); +extern void flush_instruction_cache(void); +extern void hard_reset_now(void); +extern void poweroff_now(void); +#ifdef CONFIG_6xx +extern long _get_L2CR(void); +extern long _get_L3CR(void); +extern void _set_L2CR(unsigned long); +extern void _set_L3CR(unsigned long); +#else +#define _get_L2CR() 0L +#define _get_L3CR() 0L +#define _set_L2CR(val) do { } while(0) +#define _set_L3CR(val) do { } while(0) +#endif +extern void via_cuda_init(void); +extern void pmac_nvram_init(void); +extern void read_rtc_time(void); +extern void pmac_find_display(void); +extern void giveup_fpu(struct task_struct *); +extern void enable_kernel_fp(void); +extern void enable_kernel_altivec(void); +extern void giveup_altivec(struct task_struct *); +extern void load_up_altivec(struct task_struct *); +extern void giveup_spe(struct task_struct *); +extern void load_up_spe(struct task_struct *); +extern int fix_alignment(struct pt_regs *); +extern void cvt_fd(float *from, double *to, unsigned long *fpscr); +extern void cvt_df(double *from, float *to, unsigned long *fpscr); +extern int call_rtas(const char *, int, int, unsigned long *, ...); +extern void cacheable_memzero(void *p, unsigned int nb); +extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long); +extern void bad_page_fault(struct pt_regs *, unsigned long, int); +extern void die(const char *, struct pt_regs *, long); + +struct device_node; +extern void note_scsi_host(struct device_node *, void *); + +extern struct task_struct *__switch_to(struct task_struct *, + struct task_struct *); +#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next))) + +struct thread_struct; +extern struct task_struct *_switch(struct thread_struct *prev, + struct thread_struct *next); + +extern unsigned int rtas_data; + +static __inline__ unsigned long +xchg_u32(volatile void *p, unsigned long val) +{ + unsigned long prev; + + __asm__ __volatile__ ("\n\ +1: lwarx %0,0,%2 \n" + PPC405_ERR77(0,%2) +" stwcx. %3,0,%2 \n\ + bne- 1b" + : "=&r" (prev), "=m" (*(volatile unsigned long *)p) + : "r" (p), "r" (val), "m" (*(volatile unsigned long *)p) + : "cc", "memory"); + + return prev; +} + +/* + * This function doesn't exist, so you'll get a linker error + * if something tries to do an invalid xchg(). + */ +extern void __xchg_called_with_bad_pointer(void); + +#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) +#define tas(ptr) (xchg((ptr),1)) + +static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) +{ + switch (size) { + case 4: + return (unsigned long) xchg_u32(ptr, x); +#if 0 /* xchg_u64 doesn't exist on 32-bit PPC */ + case 8: + return (unsigned long) xchg_u64(ptr, x); +#endif /* 0 */ + } + __xchg_called_with_bad_pointer(); + return x; + + +} + +extern inline void * xchg_ptr(void * m, void * val) +{ + return (void *) xchg_u32(m, (unsigned long) val); +} + + +#define __HAVE_ARCH_CMPXCHG 1 + +static __inline__ unsigned long +__cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new) +{ + unsigned int prev; + + __asm__ __volatile__ ("\n\ +1: lwarx %0,0,%2 \n\ + cmpw 0,%0,%3 \n\ + bne 2f \n" + PPC405_ERR77(0,%2) +" stwcx. %4,0,%2 \n\ + bne- 1b\n" +#ifdef CONFIG_SMP +" sync\n" +#endif /* CONFIG_SMP */ +"2:" + : "=&r" (prev), "=m" (*p) + : "r" (p), "r" (old), "r" (new), "m" (*p) + : "cc", "memory"); + + return prev; +} + +/* This function doesn't exist, so you'll get a linker error + if something tries to do an invalid cmpxchg(). */ +extern void __cmpxchg_called_with_bad_pointer(void); + +static __inline__ unsigned long +__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) +{ + switch (size) { + case 4: + return __cmpxchg_u32(ptr, old, new); +#if 0 /* we don't have __cmpxchg_u64 on 32-bit PPC */ + case 8: + return __cmpxchg_u64(ptr, old, new); +#endif /* 0 */ + } + __cmpxchg_called_with_bad_pointer(); + return old; +} + +#define cmpxchg(ptr,o,n) \ + ({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ + (unsigned long)_n_, sizeof(*(ptr))); \ + }) + +#define arch_align_stack(x) (x) + +#endif /* __KERNEL__ */ +#endif /* __PPC_SYSTEM_H */ diff --git a/ltt-usertrace/ltt/system-ppc64.h b/ltt-usertrace/ltt/system-ppc64.h new file mode 100644 index 00000000..98d120ca --- /dev/null +++ b/ltt-usertrace/ltt/system-ppc64.h @@ -0,0 +1,306 @@ +#ifndef __PPC64_SYSTEM_H +#define __PPC64_SYSTEM_H + +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include + +/* + * Memory barrier. + * The sync instruction guarantees that all memory accesses initiated + * by this processor have been performed (with respect to all other + * mechanisms that access memory). The eieio instruction is a barrier + * providing an ordering (separately) for (a) cacheable stores and (b) + * loads and stores to non-cacheable memory (e.g. I/O devices). + * + * mb() prevents loads and stores being reordered across this point. + * rmb() prevents loads being reordered across this point. + * wmb() prevents stores being reordered across this point. + * read_barrier_depends() prevents data-dependent loads being reordered + * across this point (nop on PPC). + * + * We have to use the sync instructions for mb(), since lwsync doesn't + * order loads with respect to previous stores. Lwsync is fine for + * rmb(), though. + * For wmb(), we use sync since wmb is used in drivers to order + * stores to system memory with respect to writes to the device. + * However, smp_wmb() can be a lighter-weight eieio barrier on + * SMP since it is only used to order updates to system memory. + */ +#define mb() __asm__ __volatile__ ("sync" : : : "memory") +#define rmb() __asm__ __volatile__ ("lwsync" : : : "memory") +#define wmb() __asm__ __volatile__ ("sync" : : : "memory") +#define read_barrier_depends() do { } while(0) + +#define set_mb(var, value) do { var = value; smp_mb(); } while (0) +#define set_wmb(var, value) do { var = value; smp_wmb(); } while (0) + +#ifdef CONFIG_SMP +#define smp_mb() mb() +#define smp_rmb() rmb() +#define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory") +#define smp_read_barrier_depends() read_barrier_depends() +#else +#define smp_mb() __asm__ __volatile__("": : :"memory") +#define smp_rmb() __asm__ __volatile__("": : :"memory") +#define smp_wmb() __asm__ __volatile__("": : :"memory") +#define smp_read_barrier_depends() do { } while(0) +#endif /* CONFIG_SMP */ + +#ifdef __KERNEL__ +struct task_struct; +struct pt_regs; + +#ifdef CONFIG_DEBUGGER + +extern int (*__debugger)(struct pt_regs *regs); +extern int (*__debugger_ipi)(struct pt_regs *regs); +extern int (*__debugger_bpt)(struct pt_regs *regs); +extern int (*__debugger_sstep)(struct pt_regs *regs); +extern int (*__debugger_iabr_match)(struct pt_regs *regs); +extern int (*__debugger_dabr_match)(struct pt_regs *regs); +extern int (*__debugger_fault_handler)(struct pt_regs *regs); + +#define DEBUGGER_BOILERPLATE(__NAME) \ +static inline int __NAME(struct pt_regs *regs) \ +{ \ + if (unlikely(__ ## __NAME)) \ + return __ ## __NAME(regs); \ + return 0; \ +} + +DEBUGGER_BOILERPLATE(debugger) +DEBUGGER_BOILERPLATE(debugger_ipi) +DEBUGGER_BOILERPLATE(debugger_bpt) +DEBUGGER_BOILERPLATE(debugger_sstep) +DEBUGGER_BOILERPLATE(debugger_iabr_match) +DEBUGGER_BOILERPLATE(debugger_dabr_match) +DEBUGGER_BOILERPLATE(debugger_fault_handler) + +#ifdef CONFIG_XMON +extern void xmon_init(void); +#endif + +#else +static inline int debugger(struct pt_regs *regs) { return 0; } +static inline int debugger_ipi(struct pt_regs *regs) { return 0; } +static inline int debugger_bpt(struct pt_regs *regs) { return 0; } +static inline int debugger_sstep(struct pt_regs *regs) { return 0; } +static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; } +static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; } +static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; } +#endif + +extern int fix_alignment(struct pt_regs *regs); +extern void bad_page_fault(struct pt_regs *regs, unsigned long address, + int sig); +extern void show_regs(struct pt_regs * regs); +extern void low_hash_fault(struct pt_regs *regs, unsigned long address); +extern int die(const char *str, struct pt_regs *regs, long err); + +extern int _get_PVR(void); +extern void giveup_fpu(struct task_struct *); +extern void disable_kernel_fp(void); +extern void flush_fp_to_thread(struct task_struct *); +extern void enable_kernel_fp(void); +extern void giveup_altivec(struct task_struct *); +extern void disable_kernel_altivec(void); +extern void enable_kernel_altivec(void); +extern int emulate_altivec(struct pt_regs *); +extern void cvt_fd(float *from, double *to, unsigned long *fpscr); +extern void cvt_df(double *from, float *to, unsigned long *fpscr); + +#ifdef CONFIG_ALTIVEC +extern void flush_altivec_to_thread(struct task_struct *); +#else +static inline void flush_altivec_to_thread(struct task_struct *t) +{ +} +#endif + +extern int mem_init_done; /* set on boot once kmalloc can be called */ + +/* EBCDIC -> ASCII conversion for [0-9A-Z] on iSeries */ +extern unsigned char e2a(unsigned char); + +extern struct task_struct *__switch_to(struct task_struct *, + struct task_struct *); +#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next))) + +struct thread_struct; +extern struct task_struct * _switch(struct thread_struct *prev, + struct thread_struct *next); + +static inline int __is_processor(unsigned long pv) +{ + unsigned long pvr; + asm("mfspr %0, 0x11F" : "=r" (pvr)); + return(PVR_VER(pvr) == pv); +} + +/* + * Atomic exchange + * + * Changes the memory location '*ptr' to be val and returns + * the previous value stored there. + * + * Inline asm pulled from arch/ppc/kernel/misc.S so ppc64 + * is more like most of the other architectures. + */ +static __inline__ unsigned long +__xchg_u32(volatile int *m, unsigned long val) +{ + unsigned long dummy; + + __asm__ __volatile__( + EIEIO_ON_SMP +"1: lwarx %0,0,%3 # __xchg_u32\n\ + stwcx. %2,0,%3\n\ +2: bne- 1b" + ISYNC_ON_SMP + : "=&r" (dummy), "=m" (*m) + : "r" (val), "r" (m) + : "cc", "memory"); + + return (dummy); +} + +static __inline__ unsigned long +__xchg_u64(volatile long *m, unsigned long val) +{ + unsigned long dummy; + + __asm__ __volatile__( + EIEIO_ON_SMP +"1: ldarx %0,0,%3 # __xchg_u64\n\ + stdcx. %2,0,%3\n\ +2: bne- 1b" + ISYNC_ON_SMP + : "=&r" (dummy), "=m" (*m) + : "r" (val), "r" (m) + : "cc", "memory"); + + return (dummy); +} + +/* + * This function doesn't exist, so you'll get a linker error + * if something tries to do an invalid xchg(). + */ +extern void __xchg_called_with_bad_pointer(void); + +static __inline__ unsigned long +__xchg(volatile void *ptr, unsigned long x, int size) +{ + switch (size) { + case 4: + return __xchg_u32(ptr, x); + case 8: + return __xchg_u64(ptr, x); + } + __xchg_called_with_bad_pointer(); + return x; +} + +#define xchg(ptr,x) \ + ({ \ + __typeof__(*(ptr)) _x_ = (x); \ + (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ + }) + +#define tas(ptr) (xchg((ptr),1)) + +#define __HAVE_ARCH_CMPXCHG 1 + +static __inline__ unsigned long +__cmpxchg_u32(volatile int *p, int old, int new) +{ + unsigned int prev; + + __asm__ __volatile__ ( + EIEIO_ON_SMP +"1: lwarx %0,0,%2 # __cmpxchg_u32\n\ + cmpw 0,%0,%3\n\ + bne- 2f\n\ + stwcx. %4,0,%2\n\ + bne- 1b" + ISYNC_ON_SMP + "\n\ +2:" + : "=&r" (prev), "=m" (*p) + : "r" (p), "r" (old), "r" (new), "m" (*p) + : "cc", "memory"); + + return prev; +} + +static __inline__ unsigned long +__cmpxchg_u64(volatile long *p, unsigned long old, unsigned long new) +{ + unsigned long prev; + + __asm__ __volatile__ ( + EIEIO_ON_SMP +"1: ldarx %0,0,%2 # __cmpxchg_u64\n\ + cmpd 0,%0,%3\n\ + bne- 2f\n\ + stdcx. %4,0,%2\n\ + bne- 1b" + ISYNC_ON_SMP + "\n\ +2:" + : "=&r" (prev), "=m" (*p) + : "r" (p), "r" (old), "r" (new), "m" (*p) + : "cc", "memory"); + + return prev; +} + +/* This function doesn't exist, so you'll get a linker error + if something tries to do an invalid cmpxchg(). */ +extern void __cmpxchg_called_with_bad_pointer(void); + +static __inline__ unsigned long +__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) +{ + switch (size) { + case 4: + return __cmpxchg_u32(ptr, old, new); + case 8: + return __cmpxchg_u64(ptr, old, new); + } + __cmpxchg_called_with_bad_pointer(); + return old; +} + +#define cmpxchg(ptr,o,n) \ + ({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ + (unsigned long)_n_, sizeof(*(ptr))); \ + }) + +/* + * We handle most unaligned accesses in hardware. On the other hand + * unaligned DMA can be very expensive on some ppc64 IO chips (it does + * powers of 2 writes until it reaches sufficient alignment). + * + * Based on this we disable the IP header alignment in network drivers. + */ +#define NET_IP_ALIGN 0 + +#define arch_align_stack(x) (x) + +#endif /* __KERNEL__ */ +#endif