completely remove kernel headers
authorcompudj <compudj@04897980-b3bd-0310-b5e0-8ef037075253>
Tue, 15 Aug 2006 16:34:08 +0000 (16:34 +0000)
committercompudj <compudj@04897980-b3bd-0310-b5e0-8ef037075253>
Tue, 15 Aug 2006 16:34:08 +0000 (16:34 +0000)
git-svn-id: http://ltt.polymtl.ca/svn@2029 04897980-b3bd-0310-b5e0-8ef037075253

ltt-usertrace/Makefile
ltt-usertrace/ltt-usertrace-fast.c
ltt-usertrace/ltt/kernelutils-i386.h [new file with mode: 0644]
ltt-usertrace/ltt/kernelutils-x86_64.h [new file with mode: 0644]
ltt-usertrace/ltt/ltt-usertrace.h
ltt-usertrace/specs/ltt-usertrace.spec

index de07916e2a66e46df37ae5c2fd556b47870233be..93748564f4e099968ce8a1d29a3c5ab6a0e1e684 100644 (file)
@@ -22,7 +22,7 @@ sample: sample.c
        $(CC) $(LTT_CFLAGS) $(CFLAGS) -lltt-loader-user_generic -o $@ $^
 sample-block: sample-block.c
        $(CC) $(LTT_CFLAGS) $(CFLAGS) -lltt-loader-user_generic -o $@ $^
-       
+
 sample-thread-brand: sample-thread-brand.c
        $(CC) $(LTT_CFLAGS) $(CFLAGS) -lltt-loader-user_generic -o $@ $^
 
@@ -95,4 +95,4 @@ install_libs:
 install: install_headers libs install_libs
 
 clean:
-       rm -fr *.o *~ sample-thread sample sample-highspeed sample-printf sample-instrument-fct libltt-instrument-functions.so* libltt-instrument-functions.a libltt-usertrace-fast.a libltt-usertrace-fast.so* libltt-loader-user_generic.so* libltt-loader-user_generic.a sample-thread-slow sample-thread-fast sample-thread-brand sample-block java/*.class java/Sample.h java/TestBrand.h
+       find . -name \*~ | xargs rm -fr *.o sample-thread sample sample-highspeed sample-printf sample-instrument-fct libltt-instrument-functions.so* libltt-instrument-functions.a libltt-usertrace-fast.a libltt-usertrace-fast.so* libltt-loader-user_generic.so* libltt-loader-user_generic.a sample-thread-slow sample-thread-fast sample-thread-brand sample-block java/*.class java/Sample.h java/TestBrand.h
index 17da6cd8cf2a4d05b97bf6bf353e141643859dcf..032b22419af9d933a789357993c7a57f64f9bd53 100644 (file)
@@ -68,9 +68,6 @@
 #include <unistd.h>
 #include <sys/syscall.h>
 
-// included with hack for powerpc in ltt-usertrace.h #include <asm/atomic.h>
-#include <asm/timex.h> //for get_cycles()
-
 #include <ltt/ltt-usertrace.h>
 
 #define gettid() syscall(__NR_gettid)
diff --git a/ltt-usertrace/ltt/kernelutils-i386.h b/ltt-usertrace/ltt/kernelutils-i386.h
new file mode 100644 (file)
index 0000000..df7e695
--- /dev/null
@@ -0,0 +1,154 @@
+/*****************************************************************************
+ * kernelutils-x386.h
+ *
+ * This file holds the code needed by LTT usertrace that comes from the
+ * kernel headers.  Since including kernel headers is not recommended in
+ * userspace programs/libraries, we rewrote implementations HIGHLY INSPIRED
+ * (i.e. copied/pasted) from the original kernel headers (2.6.17).
+ *
+ * Martin Bisson, July 2006
+ * Mathieu Desnoyers, August 2006
+ */
+
+#ifndef _KERNELUTILS_I386_H
+#define _KERNELUTILS_I386_H
+
+// We are careful, so we assume a possibly SMP machine
+#define LOCK "lock ; "
+#define LOCK_PREFIX "lock ; "
+
+
+
+
+// From atomic.h
+
+
+/*
+ * Make sure gcc doesn't try to be clever and move things around
+ * on us. We need to use _exactly_ the address the user gave us,
+ * not some alias that contains the same information.
+ */
+typedef struct { volatile int counter; } atomic_t;
+
+
+/**
+ * atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ * 
+ * Atomically reads the value of @v.
+ */ 
+#define atomic_read(v)         ((v)->counter)
+
+/**
+ * atomic_add - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ * 
+ * Atomically adds @i to @v.
+ */
+static __inline__ void atomic_add(int i, atomic_t *v)
+{
+       __asm__ __volatile__(
+               LOCK "addl %1,%0"
+               :"=m" (v->counter)
+               :"ir" (i), "m" (v->counter));
+}
+
+/**
+ * atomic_inc - increment atomic variable
+ * @v: pointer of type atomic_t
+ * 
+ * Atomically increments @v by 1.
+ */ 
+static __inline__ void atomic_inc(atomic_t *v)
+{
+       __asm__ __volatile__(
+               LOCK "incl %0"
+               :"=m" (v->counter)
+               :"m" (v->counter));
+}
+
+/**
+ * atomic_add_return - add and return
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v and returns @i + @v
+ */
+static __inline__ int atomic_add_return(int i, atomic_t *v)
+{
+       int __i = i;
+       __asm__ __volatile__(
+               LOCK "xaddl %0, %1;"
+               :"=r"(i)
+               :"m"(v->counter), "0"(i));
+       return i + __i;
+}
+
+
+
+
+// From system.h
+
+struct __xchg_dummy { unsigned long a[100]; };
+#define __xg(x) ((struct __xchg_dummy *)(x))
+
+
+/*
+ * Atomic compare and exchange.  Compare OLD with MEM, if identical,
+ * store NEW in MEM.  Return the initial value in MEM.  Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+#define __HAVE_ARCH_CMPXCHG 1
+#define cmpxchg(ptr,o,n)\
+       ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
+                                       (unsigned long)(n),sizeof(*(ptr))))
+
+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+                                     unsigned long new, int size)
+{
+       unsigned long prev;
+       switch (size) {
+       case 1:
+               __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
+                                    : "=a"(prev)
+                                    : "q"(new), "m"(*__xg(ptr)), "0"(old)
+                                    : "memory");
+               return prev;
+       case 2:
+               __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
+                                    : "=a"(prev)
+                                    : "r"(new), "m"(*__xg(ptr)), "0"(old)
+                                    : "memory");
+               return prev;
+       case 4:
+               __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
+                                    : "=a"(prev)
+                                    : "r"(new), "m"(*__xg(ptr)), "0"(old)
+                                    : "memory");
+               return prev;
+       }
+       return old;
+}
+
+
+// From msr.h
+
+#define rdtscll(val) \
+     __asm__ __volatile__("rdtsc" : "=A" (val))
+
+// From timex.h
+
+typedef unsigned long long cycles_t;
+
+static inline cycles_t get_cycles (void)
+{
+       unsigned long long ret;
+
+       rdtscll(ret);
+       return ret;
+}
+
+
+#endif // _KERNELUTILS_I386_H
diff --git a/ltt-usertrace/ltt/kernelutils-x86_64.h b/ltt-usertrace/ltt/kernelutils-x86_64.h
new file mode 100644 (file)
index 0000000..4f2ec2f
--- /dev/null
@@ -0,0 +1,168 @@
+/*****************************************************************************
+ * kernelutils-x86_64.h
+ *
+ * This file holds the code needed by LTT usertrace that comes from the
+ * kernel headers.  Since including kernel headers is not recommended in
+ * userspace programs/libraries, we rewrote implementations HIGHLY INSPIRED
+ * (i.e. copied/pasted) from the original kernel headers (2.6.17).
+ *
+ * Martin Bisson, July 2006
+ */
+
+#ifndef _KERNELUTILS_X86_64_H
+#define _KERNELUTILS_X86_64_H
+
+// We are careful, so we assume a possibly SMP machine
+#define LOCK "lock ; "
+#define LOCK_PREFIX "lock ; "
+
+
+
+
+// From atomic.h
+
+
+/*
+ * Make sure gcc doesn't try to be clever and move things around
+ * on us. We need to use _exactly_ the address the user gave us,
+ * not some alias that contains the same information.
+ */
+typedef struct { volatile int counter; } atomic_t;
+
+
+/**
+ * atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ * 
+ * Atomically reads the value of @v.
+ */ 
+#define atomic_read(v)         ((v)->counter)
+
+/**
+ * atomic_add - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ * 
+ * Atomically adds @i to @v.
+ */
+static __inline__ void atomic_add(int i, atomic_t *v)
+{
+       __asm__ __volatile__(
+               LOCK "addl %1,%0"
+               :"=m" (v->counter)
+               :"ir" (i), "m" (v->counter));
+}
+
+/**
+ * atomic_inc - increment atomic variable
+ * @v: pointer of type atomic_t
+ * 
+ * Atomically increments @v by 1.
+ */ 
+static __inline__ void atomic_inc(atomic_t *v)
+{
+       __asm__ __volatile__(
+               LOCK "incl %0"
+               :"=m" (v->counter)
+               :"m" (v->counter));
+}
+
+/**
+ * atomic_add_return - add and return
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v and returns @i + @v
+ */
+static __inline__ int atomic_add_return(int i, atomic_t *v)
+{
+       int __i = i;
+       __asm__ __volatile__(
+               LOCK "xaddl %0, %1;"
+               :"=r"(i)
+               :"m"(v->counter), "0"(i));
+       return i + __i;
+}
+
+
+
+
+// From system.h
+
+#define __xg(x) ((volatile long *)(x))
+
+
+/*
+ * Atomic compare and exchange.  Compare OLD with MEM, if identical,
+ * store NEW in MEM.  Return the initial value in MEM.  Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+#define __HAVE_ARCH_CMPXCHG 1
+
+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+                                     unsigned long new, int size)
+{
+       unsigned long prev;
+       switch (size) {
+       case 1:
+               __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
+                                    : "=a"(prev)
+                                    : "q"(new), "m"(*__xg(ptr)), "0"(old)
+                                    : "memory");
+               return prev;
+       case 2:
+               __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
+                                    : "=a"(prev)
+                                    : "r"(new), "m"(*__xg(ptr)), "0"(old)
+                                    : "memory");
+               return prev;
+       case 4:
+               __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
+                                    : "=a"(prev)
+                                    : "r"(new), "m"(*__xg(ptr)), "0"(old)
+                                    : "memory");
+               return prev;
+       case 8:
+               __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
+                                    : "=a"(prev)
+                                    : "r"(new), "m"(*__xg(ptr)), "0"(old)
+                                    : "memory");
+               return prev;
+       }
+       return old;
+}
+
+#define cmpxchg(ptr,o,n)\
+       ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
+                                       (unsigned long)(n),sizeof(*(ptr))))
+
+
+
+
+// From msr.h
+
+
+#define rdtscll(val) do { \
+     unsigned int __a,__d; \
+     asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
+     (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
+} while(0)
+
+
+
+
+// From timex.h
+
+typedef unsigned long long cycles_t;
+
+static inline cycles_t get_cycles (void)
+{
+       unsigned long long ret;
+
+       rdtscll(ret);
+       return ret;
+}
+
+
+#endif // _KERNELUTILS_X86_64_H
index 4fed3b76b90c0c083f8510b331d1bb3919b105e9..d4d8861322dc3cc1d873e3e2ab420d0241d8ccde 100644 (file)
@@ -14,7 +14,6 @@
 #include <string.h>
 #include <stdint.h>
 #include <sys/types.h>
-#include <linux/unistd.h>
 
 #define inline inline __attribute__((always_inline))
 
 #include <ltt/timex-ppc.h>
 #endif
 #elif defined(__x86_64__)
-#include <asm/timex.h>
-#include <asm/bitops.h>
-#include <asm/atomic.h>
-#include <asm/system.h>
+#include <ltt/kernelutils-x86_64.h>
+#elif defined(__i386__)
+#include <ltt/kernelutils-i386.h>
 #else
-#include <asm/timex.h>
-#include <asm/atomic.h>
+#error "Unsupported architecture"
 #endif
 
 #ifndef min
index 0480933acc3d3256d79eb18d096e45178960da6e..a7b4e3e6bd6b23c7a264bf709d842dbe7871deea 100644 (file)
@@ -47,6 +47,7 @@ ldconfig
 /usr/include/ltt
 /usr/include/ltt/atomic-ppc.h
 /usr/include/ltt/atomic-ppc64.h
+/usr/include/ltt/kernelutils-x86_64.h
 /usr/include/ltt/ltt-facility-custom-user_generic.h
 /usr/include/ltt/ltt-facility-id-user_generic.h
 /usr/include/ltt/ltt-facility-user_generic.h
This page took 0.029097 seconds and 4 git commands to generate.