fi
;;
x86_64) LIBFORMAT="elf64-x86-64" ;;
- ppc) LIBFORMAT="elf32-powerpc" ;;
+ powerpc) LIBFORMAT="elf32-powerpc" ;;
ppc64) LIBFORMAT="elf64-powerpc" ;;
s390) LIBFORMAT="elf32-s390" ;;
s390x) LIBFORMAT="elf64-s390" ;;
- *) AC_MSG_ERROR([unable to detect library format (unsupported architecture?)]) ;;
+ *) AC_MSG_ERROR([unable to detect library format (unsupported architecture ($host_cpu)?)]) ;;
esac
AC_SUBST(LIBFORMAT)
AC_MSG_RESULT($LIBFORMAT)
/* MATH */
+#include <ust/processor.h>
static inline unsigned int hweight32(unsigned int w)
{
unsigned int res = w - ((w >> 1) & 0x55555555);
return (res + (res >> 16)) & 0x000000FF;
}
-static inline int fls(int x)
-{
- int r;
-//ust// #ifdef CONFIG_X86_CMOV
- asm("bsrl %1,%0\n\t"
- "cmovzl %2,%0"
- : "=&r" (r) : "rm" (x), "rm" (-1));
-//ust// #else
-//ust// asm("bsrl %1,%0\n\t"
-//ust// "jnz 1f\n\t"
-//ust// "movl $-1,%0\n"
-//ust// "1:" : "=r" (r) : "rm" (x));
-//ust// #endif
- return r + 1;
-}
-
static __inline__ int get_count_order(unsigned int count)
{
int order;
); \
asm volatile ( \
".section __markers,\"aw\",@progbits\n\t" \
- ".align 8\n\t" \
+ ".balign 8\n\t" \
"2:\n\t" \
_ASM_PTR "(__mstrtab_" XSTR(channel) "_" XSTR(name) "_channel_" XSTR(unique) ")\n\t" /* channel string */ \
_ASM_PTR "(__mstrtab_" XSTR(channel) "_" XSTR(name) "_name_" XSTR(unique) ")\n\t" /* name string */ \
".byte 0\n\t" /* ptype */ \
".word 0\n\t" /* channel_id */ \
".word 0\n\t" /* event_id */ \
- ".align " XSTR(__WORDSIZE) " / 8\n\t" /* alignment */ \
+ ".balign " XSTR(__WORDSIZE) " / 8\n\t" /* alignment */ \
_ASM_PTR "(marker_probe_cb)\n\t" /* call */ \
_ASM_PTR "(__mark_empty_function)\n\t" /* marker_probe_closure single.field1 */ \
_ASM_PTR "0\n\t" /* marker_probe_closure single.field2 */ \
_ASM_PTR "0\n\t" /* tp_cb */ \
_ASM_PTR "(1f)\n\t" /* location */ \
".previous\n\t" \
+ ARCH_COPY_ADDR("%[outptr]") \
"1:\n\t" \
- ARCH_COPY_ADDR("2b", "%[outptr]") "\n\t" \
: [outptr] "=r" (m) ); \
\
save_registers(®s)
extern __thread long ust_reg_stack[500];
extern volatile __thread long *ust_reg_stack_ptr;
-#ifndef __x86_64
+#ifdef __i386
struct registers {
short ss;
long esp;
};
+static inline int fls(int x)
+{
+ int r;
+ asm("bsrl %1,%0\n\t"
+ "cmovzl %2,%0"
+ : "=&r" (r) : "rm" (x), "rm" (-1));
+ return r + 1;
+}
+
#ifdef CONFIG_UST_GDB_INTEGRATION
/* save_registers - saves most of the processor's registers so
#define RELATIVE_ADDRESS(__rel_label__) __rel_label__
-#define ARCH_COPY_ADDR(src, dst) "lea " src "," dst
+#define ARCH_COPY_ADDR(dst) "lea 2b," dst "\n\t"
#define _ASM_PTR ".long "
-#else /* below is code for x86-64 */
+#endif /* below is code for x86-64 */
+
+#ifdef __x86_64
struct registers {
int padding; /* 4 bytes */
unsigned long rsp;
};
+static inline int fls(int x)
+{
+ int r;
+ asm("bsrl %1,%0\n\t"
+ "cmovzl %2,%0"
+ : "=&r" (r) : "rm" (x), "rm" (-1));
+ return r + 1;
+}
+
#ifdef CONFIG_UST_GDB_INTEGRATION
#define save_registers(regsptr) \
* in a relocatable way. On x86-64, this uses a special (%rip) notation. */
#define RELATIVE_ADDRESS(__rel_label__) __rel_label__(%%rip)
-#define ARCH_COPY_ADDR(src, dst) "lea " src "(%%rip)," dst
+#define ARCH_COPY_ADDR(dst) "lea 2b(%%rip)," dst "\n\t"
#define _ASM_PTR ".quad "
-#endif
+#endif /* x86_64 */
+
+#ifdef __PPC__
+
+struct registers {
+};
+
+static __inline__ int fls(unsigned int x)
+{
+ int lz;
+
+ asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x));
+ return 32 - lz;
+}
+
+#define ARCH_COPY_ADDR(dst) \
+ "lis " dst ",2b@h\n\t" /* load high bytes */ \
+ "ori " dst "," dst ",2b@l\n\t" /* load low bytes */
+
+#define _ASM_PTR ".long "
+#define save_registers(a)
+
+#endif /* __PPC__ */
#endif /* UST_PROCESSOR_H */