--- /dev/null
+
+CC=gcc
+INCLUDE_DIR?=/usr/include
+LIB_DIR?=/usr/lib
+RANLIB=ranlib
+
+LTT_CFLAGS=-I. -O2 -L. -fPIC
+# note : x86_64 needs -fPIC ? FIXME
+
+#For testing lib ltt-usertrace-fast
+#CFLAGS+=-DLTT_SUBBUF_SIZE_CPU=134217728
+#CFLAGS+=-DLTT_NULL_OUTPUT_TEST
+
+all: libs samples
+
+#SAMPLE PROGRAMS
+
+samples: sample sample-highspeed sample-printf \
+ sample-instrument-fct sample-thread-slow sample-thread-fast sample-thread-brand sample-block
+
+sample: sample.c
+ $(CC) $(LTT_CFLAGS) $(CFLAGS) -lltt-loader-user_generic -o $@ $^
+sample-block: sample-block.c
+ $(CC) $(LTT_CFLAGS) $(CFLAGS) -lltt-loader-user_generic -o $@ $^
+
+sample-thread-brand: sample-thread-brand.c
+ $(CC) $(LTT_CFLAGS) $(CFLAGS) -lltt-loader-user_generic -o $@ $^
+
+sample-highspeed: sample-highspeed.c
+ $(CC) $(LTT_CFLAGS) $(CFLAGS) -lltt-usertrace-fast -lltt-loader-user_generic -o $@ $^
+
+sample-printf: sample-printf.c
+ $(CC) $(LTT_CFLAGS) $(CFLAGS) -lltt-loader-user_generic -o $@ $^
+
+sample-instrument-fct: sample-instrument-fct.c
+ $(CC) $(LTT_CFLAGS) $(CFLAGS) -g -finstrument-functions -lltt-instrument-functions -o $@ $^
+
+sample-thread-slow: sample-thread-slow.c
+ $(CC) $(LTT_CFLAGS) $(CFLAGS) -lpthread -lltt-loader-user_generic -o $@ $^
+
+sample-thread-fast: sample-thread-fast.c
+ $(CC) $(LTT_CFLAGS) $(CFLAGS) -lpthread -lltt-usertrace-fast -lltt-loader-user_generic -o $@ $^
+
+
+#LIBRAIRIES
+
+libs: libltt-instrument-functions.a libltt-instrument-functions.so.0 \
+ libltt-usertrace-fast.a libltt-usertrace-fast.so.0 \
+ libltt-loader-user_generic.a libltt-loader-user_generic.so.0
+
+libltt-usertrace-fast.a: ltt-usertrace-fast.o
+ @rm -f libltt-usertrace-fast.a
+ $(AR) rc $@ $^
+ $(RANLIB) $@
+
+libltt-usertrace-fast.so.0: ltt-usertrace-fast.o
+ @rm -f libltt-usertrace-fast.so libltt-usertrace-fast.so.0
+ $(CC) $(LTT_CFLAGS) $(CFLAGS) -lpthread -shared -Wl,-soname,libltt-usertrace-fast.so -o $@ $^
+ ln -s libltt-usertrace-fast.so.0 libltt-usertrace-fast.so
+
+libltt-instrument-functions.a: ltt-instrument-functions.o ltt-facility-loader-user_generic.o ltt-usertrace-fast.o
+ @rm -f libltt-instrument-functions.a
+ $(AR) rc $@ $^
+ $(RANLIB) $@
+
+libltt-instrument-functions.so.0: ltt-instrument-functions.o ltt-facility-loader-user_generic.o ltt-usertrace-fast.o
+ @rm -f libltt-instrument-functions.so libltt-instrument-functions.so.0
+ $(CC) $(LTT_CFLAGS) $(CFLAGS) -lpthread -shared -Wl,-soname,libltt-instrument-functions.so -o $@ $^
+ ln -s libltt-instrument-functions.so.0 libltt-instrument-functions.so
+
+libltt-loader-user_generic.a: ltt-facility-loader-user_generic.o
+ @rm -f libltt-loader-user_generic.a
+ $(AR) rc $@ $^
+ $(RANLIB) $@
+
+libltt-loader-user_generic.so.0: ltt-facility-loader-user_generic.o
+ @rm -f libltt-loader-user_generic.so libltt-loader-user_generic.so.0
+ $(CC) $(LTT_CFLAGS) $(CFLAGS) -lpthread -shared -Wl,-soname,libltt-loader-user_generic.so -o $@ $^
+ ln -s libltt-loader-user_generic.so.0 libltt-loader-user_generic.so
+
+%.o: %.c
+ $(CC) $(LTT_CFLAGS) $(CFLAGS) -c -o $@ $+
+
+.PHONY : clean install libs install_libs install_headers samples
+
+install_headers:
+ if [ ! -e "$(INCLUDE_DIR)/ltt" ] ; then mkdir $(INCLUDE_DIR)/ltt ; fi
+ cp -f ltt/*.h $(INCLUDE_DIR)/ltt
+
+install_libs:
+ cp -df libltt-instrument-functions.so* libltt-instrument-functions.a $(LIB_DIR)
+ cp -df libltt-usertrace-fast.so* libltt-usertrace-fast.a $(LIB_DIR)
+ cp -df libltt-loader-user_generic.so* libltt-loader-user_generic.a $(LIB_DIR)
+
+install: install_headers libs install_libs
+
+clean:
+ find . -name \*~ | xargs rm -fr *.o sample-thread sample sample-highspeed sample-printf sample-instrument-fct libltt-instrument-functions.so* libltt-instrument-functions.a libltt-usertrace-fast.a libltt-usertrace-fast.so* libltt-loader-user_generic.so* libltt-loader-user_generic.a sample-thread-slow sample-thread-fast sample-thread-brand sample-block java/*.class java/Sample.h java/TestBrand.h
--- /dev/null
+
+LTTng usertrace package
+
+Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+March 2006
+
+This package contains all the user space headers and c files necessary to make
+your application and library trace through an active LTTng tracer. Here is a
+short quickstart guide of it.
+
+Here are the currently supported architectures :
+x86
+(please add the ltt_trace_generic and ltt_register_generic system calls to
+other architectures as you need them : it will work magically)
+
+* Compile your kernel with the latest LTTng patch. Make sure the option
+ "Allow tracing from userspace" is _active_!
+ See the QUICKSTART guide at http://ltt.polymtl.ca/ for details about how to
+ setup a working tracer and viewer. See the genevent installation step : it is
+ required for method #2 below.
+
+* Extract the latest ltt-usertrace archive :
+su
+cd /usr/src
+wget http://ltt.polymtl.ca/packages/ltt-usertrace-x.x.tar.gz
+gzip -cd ltt-usertrace-x.x.tar.gz | tar xvof -
+
+* Build the sample programs and install the headers and librairies into your
+system :
+(32 bits)
+su
+cd /usr/src/ltt-usertrace
+make clean
+make install (will build and install headers and libraries)
+make
+(64 bits)
+su
+cd /usr/src/ltt-usertrace
+make clean
+LIB_DIR=/usr/lib64 make install CFLAGS=-m64
+make CFLAGS=-m64
+
+Feel free to look at the sample programs and the Makefile : they demonstrate
+very well the features of the usertrace package and how to use them.
+
+* There are three ways to trace information from your application. The choice
+ will principally depend on the trace data rate.
+
+1) Easy way, but slow (printf style)
+ See sample-printf.c for code example.
+
+- Add the following statements to your program source (the define must come
+ _before_ the includes!) :
+
+#define LTT_TRACE
+#define LTT_BLOCKING 1
+#include <ltt/ltt-facility-user_generic.h>
+#include <ltt/ltt-facility-custom-user_generic.h>
+
+Note the define of LTT_BLOCKING to 1 : if a trace buffer is full, your
+application will block. The default of this parameter is 0 (non blocking) :
+events are lost when trace buffer is full. The choice is up to you.
+
+- Add something like the following sample line in your code. Note that this is a
+ very standard format string, this is only a suggested presentation.
+
+trace_user_generic_slow_printf("in: %s at: %s:%d: Counter value is: %u.",
+ __FILE__, __func__, __LINE__, count);
+
+- Compile your application with at least these parameters to gcc (it is splitted
+ on two lines, joined by a "\") :
+gcc -D LTT_SHOW_DEBUG -I /usr/src/usertrace-generic -o myapp myapp.c \
+ /usr/src/usertrace-generic/ltt-facility-loader-user_generic.c
+
+To see what the final result looks like :
+- Start tracing
+- Start your application
+ ** You should see the following message when your program starts and the
+ LTT_SHOW_DEBUG is defined :
+ "LTT : ltt-facility-user_generic init in userspace"
+ If you don't then you forgot to compile the facility loader in your
+ application. If you find this output annoying, you can remove the
+ "-D LTT_SHOW_DEBUG" gcc parameter, which will make the facility loader
+ silent.
+- Stop tracing
+Then, to see only the user_generic events :
+lttv -m textDump -t /tmp/trace1 -e "event.facility=user_generic"
+
+It will show :
+user_generic.slow_printf: 35885.922829472 (/cpu_0), 15521, 7453, SYSCALL { "in: sample-printf.c at: main:18: Counter value is: 0." }
+user_generic.slow_printf: 35886.925685289 (/cpu_0), 15521, 7453, SYSCALL { "in: sample-printf.c at: main:18: Counter value is: 1." }
+...
+
+
+
+2) The second way to log events is still easy. The advantage is that it
+ will make it easier to identify your data in the trace viewer afterward.
+ Please read the comments in method 1) explained previously, as they
+ are not repeated here.
+ See sample.c and sample-thread-slow.c for code example.
+
+- Go to the ltt-usertrace directory
+su
+cd /usr/src/ltt-usertrace
+
+- Create your own facility (i.e. user_myfacility.xml).
+ See the ones available in /usr/share/LinuxTraceToolkitViewer/facilities for
+ examples.
+ You facility _must_ be named following this standard : "user_*", where * is
+ whatever you like. If it is not, it will be rejected by the kernel with a
+ Operation not permitted (can be seen with the -D LTT_SHOW_DEBUG compilation
+ parameter).
+
+user_myfacility.xml:
+
+<?xml version="1.0"?>
+<facility name="user_myfacility">
+ <description>Sample facility</description>
+ <event name="myevent">
+ <description>Sample event</description>
+ <field name="file"><string></field>
+ <field name="function"><string></field>
+ <field name="line"><int></field>
+ <field name="firstval"><long></field>
+ <field name="secondval"><pointer></field>
+ </event>
+</facility>
+
+- AN IMPORTANT STEP FOLLOWS :
+ *copy* the user_myfacility.xml file in your system :
+su
+cp user_myfacility.xml /usr/share/LinuxTraceToolkitViewer/facilities
+
+- Use genevent to create the c code and headers :
+su
+cd /tmp
+mkdir genevent
+cd genevent
+for a in /usr/share/LinuxTraceToolkitViewer/facilities/user_*.xml;
+ do /usr/local/bin/genevent $a;
+done
+cd /usr/src/usertrace-generic
+cp /tmp/genevent/*load* .
+cd ltt
+cp /tmp/genevent/ltt-facility-id-user_myfacility.h .
+cp /tmp/genevent/ltt-facility-user_myfacility.h .
+cd ..
+make install
+
+- Add the following statements to your program source (the define must come
+ _before_ the includes!) :
+
+#define LTT_TRACE
+#define LTT_BLOCKING 1
+#include <ltt/ltt-facility-user_myfacility.h>
+
+- Add a call following the trace_user_myfacility_myevent function found in
+ /usr/include/ltt/ltt-facility-user_myfacility.h in your program.
+For instance :
+trace_user_myfacility_myevent(__FILE__, __func__, __LINE__, 1234, (void*)0xF0F0F0F0);
+
+- Compile your application with at least these parameters to gcc (it is splitted
+ on two lines, joined by a "\") :
+gcc -I /usr/src/usertrace-generic -o myapp myapp.c \
+ /usr/src/usertrace-generic/ltt-facility-loader-user_myfacility.c
+
+To see what the final result looks like :
+- Start tracing
+- Start your application
+- Stop tracing
+Then, to see only the user_myfacility events :
+lttv -m textDump -t /tmp/trace1 -e "event.facility=user_myfacility"
+
+It will show, for example :
+user_myfacility.myevent: 39507.805584526 (/cpu_1), 15829, 15736, SYSCALL { "myapp.c", "main", 8, 1234, 0xf0f0f0f0 }
+
+
+3) The third way to trace information from your application
+
+This method is cleary the _FASTEST_. It is principally I/O (disk and memory)
+bound. It will create a companion process for each of you program's thread which
+will dump the tracing information into /tmp/ltt-usertrace.
+
+See sample-highspeed.c and sample-thread-fast.c for code example.
+
+- Add the following statements to your program source (the define must come
+ _before_ the includes!) :
+
+#define LTT_TRACE
+#define LTT_TRACE_FAST
+#include <ltt/ltt-facility-user_myfacility.h>
+
+- Add a call following the trace_user_myfacility_myevent function found in
+ /usr/include/ltt/ltt-facility-user_myfacility.h in your program.
+For instance :
+trace_user_myfacility_myevent(__FILE__, __func__, __LINE__, 1234, (void*)0xF0F0F0F0);
+
+- Compile your application with at least these parameters to gcc (it is splitted
+ on two lines, joined by a "\") :
+gcc -lltt-usertrace-fast -I /usr/src/usertrace-generic -o myapp myapp.c \
+ /usr/src/usertrace-generic/ltt-facility-loader-user_myfacility.c
+
+It requires a supplementary operation when you take the trace :
+- Start tracing (with lttctl)
+- Start your application
+- Let your application run...
+- Stop tracing
+- Move or copy /tmp/ltt-usertrace info your trace.
+i.e., if your trace is in /tmp/trace1 :
+su
+mv /tmp/ltt-usertrace /tmp/trace1
+
+
+Then, to see only the user_myfacility events :
+lttv -m textDump -t /tmp/trace1 -e "event.facility=user_myfacility"
+
+It will show, for example :
+user_myfacility.myevent: 39507.805584526 (/ltt-usertrace/process-26174.26174.39236180500380_0), 15829, 15736, USER_MODE { "myapp.c", "main", 8, 1234, 0xf0f0f0f0 }
+
+
+
+* Fun feature : function instrumentation
+
+Here is how to generate a full trace of you program function calls.
+See the sample-instrument-fct.c example program.
+
+- Compile your application with at least these parameters to gcc (it is splitted
+ on two lines, joined by a "\") :
+gcc -g -finstrument-functions \
+ -lltt-instrument-functions -o myapp myapp.c
+
+To see what the final result looks like :
+- Start tracing
+- Start your application
+- Stop tracing
+Then, to see only the function_entry and function_exit events :
+lttv -m textDump -t /tmp/trace1 -e "event.facility=user_generic & (event.name=function_entry & event.name=function_exit)"
+
+It will show, for example :
+user_generic.function_entry: 59329.709939111 (/ltt-usertrace/process-26202.0.39949996866578_0), 19250, 18581, USER_MODE { 0x8048454, 0x80484c2 }
+user_generic.function_exit: 59329.709944613 (/ltt-usertrace/process-26202.0.39949996866578_0), 19250, 18581, USER_MODE { 0x8048454, 0x80484c2 }
+
+you can then use (from the binutils package)
+addr2line -e sample-instrument-fct -i -f 0x8048454
+Which shows :
+test_function
+/usr/src/usertrace-generic/sample-instrument-fct.c:12
+
+The lookup in LTTV through libbfd has not been implemented yet.
+
+
+* Instrumentation of a java program
+
+See the java/ directory of this package. You will have to create a C library
+that holds the tracing functions, following the java-instrument-string.c. It has
+to be called from the Java code as shown in Sample.java.
+
+The generate.sh scripts compiles and executes the Java program with the JNI
+tracing library.
+
--- /dev/null
+// The Sample.java file
+public class Sample
+{
+ // Declaration of the Native (C) function
+ private static native void trace_java_generic_string(String arg);
+ static {
+ System.loadLibrary("ltt-java-string");
+ }
+
+ public static void main(String[] args)
+ {
+ Sample.trace_java_generic_string("Tracing from java");
+ }
+}
--- /dev/null
+
+import ltt.*;
+
+// The Sample.java file
+public class TestBrand
+{
+ public static void main(String[] args)
+ {
+ ltt.ThreadBrand.trace_java_generic_thread_brand("Brand_test");
+ }
+}
--- /dev/null
+
+package ltt;
+
+// The ThreadBrand.java file
+public class ThreadBrand
+{
+ // Declaration of the Native (C) function
+ public static native void trace_java_generic_thread_brand(String arg);
+ static {
+ System.loadLibrary("ltt-java-thread_brand");
+ }
+}
--- /dev/null
+#!/bin/sh
+
+export CLASSPATH=.:/usr/lib/jvm/java-1.5.0-sun-1.5.0.06/bin
+
+#Sample
+javac Sample.java
+javah -jni Sample
+gcc -I /usr/lib/jvm/java-1.5.0-sun-1.5.0.06/include \
+ -I /usr/lib/jvm/java-1.5.0-sun-1.5.0.06/include/linux \
+ -shared -Wl,-soname,libltt-java-string \
+ -o libltt-java-string.so ltt-java-string.c \
+ ../ltt-facility-loader-user_generic.c
+LD_LIBRARY_PATH=. java Sample
+
+#TestBrand
+echo javac Threadbrand
+javac -d . ThreadBrand.java
+echo javah Threadbrand
+javah -jni ltt.ThreadBrand
+echo gcc
+gcc -I /usr/lib/jvm/java-1.5.0-sun-1.5.0.06/include \
+ -I /usr/lib/jvm/java-1.5.0-sun-1.5.0.06/include/linux \
+ -shared -Wl,-soname,libltt-java-thread_brand \
+ -o libltt-java-thread_brand.so ltt-java-thread_brand.c \
+ ../ltt-facility-loader-user_generic.c
+echo javac test
+javac TestBrand.java
+echo run
+LD_LIBRARY_PATH=. java TestBrand
--- /dev/null
+
+#include <jni.h>
+#include "Sample.h"
+#include <stdio.h>
+#include <unistd.h>
+
+#define LTT_TRACE
+#define LTT_BLOCKING 1
+#include <ltt/ltt-facility-user_generic.h>
+
+JNIEXPORT void JNICALL Java_Sample_trace_1java_1generic_1string
+ (JNIEnv *env, jobject obj, jstring jstr)
+{
+ const char *str;
+ str = (*env)->GetStringUTFChars(env, jstr, NULL);
+ if (str == NULL) return; // out of memory error thrown
+ trace_user_generic_string(str);
+ (*env)->ReleaseStringUTFChars(env, jstr, str);
+}
+
--- /dev/null
+
+#include <jni.h>
+#include "Sample.h"
+#include <stdio.h>
+#include <unistd.h>
+
+#define LTT_TRACE
+#define LTT_BLOCKING 1
+#include <ltt/ltt-facility-user_generic.h>
+
+JNIEXPORT void JNICALL Java_ltt_ThreadBrand_trace_1java_1generic_1thread_1brand
+ (JNIEnv *env, jclass jc, jstring jstr)
+{
+ const char *str;
+ str = (*env)->GetStringUTFChars(env, jstr, NULL);
+ if (str == NULL) return; // out of memory error thrown
+ trace_user_generic_thread_brand(str);
+ (*env)->ReleaseStringUTFChars(env, jstr, str);
+}
+
--- /dev/null
+/*
+ * ltt-facility-loader-user_generic.c
+ *
+ * (C) Copyright 2005 -
+ * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
+ *
+ * Contains the LTT user space facility loader.
+ *
+ */
+
+
+#define LTT_TRACE
+#include <error.h>
+#include <stdio.h>
+#include <ltt/ltt-usertrace.h>
+#include "ltt-facility-loader-user_generic.h"
+
+static struct user_facility_info facility = {
+ .name = LTT_FACILITY_NAME,
+ .num_events = LTT_FACILITY_NUM_EVENTS,
+#ifndef LTT_PACK
+ .alignment = LTT_FACILITY_ALIGNMENT?sizeof(void*):0,
+#else
+ .alignment = 0,
+#endif //LTT_PACK
+ .checksum = LTT_FACILITY_CHECKSUM,
+ .int_size = sizeof(int),
+ .long_size = sizeof(long),
+ .pointer_size = sizeof(void*),
+ .size_t_size = sizeof(size_t)
+};
+
+static void __attribute__((constructor)) __ltt_user_init(void)
+{
+ int err;
+#ifdef LTT_SHOW_DEBUG
+ printf("LTT : ltt-facility-user_generic init in userspace\n");
+#endif //LTT_SHOW_DEBUG
+
+ err = ltt_register_generic(<T_FACILITY_SYMBOL, &facility);
+ LTT_FACILITY_CHECKSUM_SYMBOL = LTT_FACILITY_SYMBOL;
+
+ if (err) {
+#ifdef LTT_SHOW_DEBUG
+ perror("Error in ltt_register_generic");
+#endif //LTT_SHOW_DEBUG
+ }
+}
+
--- /dev/null
+#ifndef _LTT_FACILITY_LOADER_USER_GENERIC_H_
+#define _LTT_FACILITY_LOADER_USER_GENERIC_H_
+
+#include <ltt/ltt-usertrace.h>
+#include <ltt/ltt-facility-id-user_generic.h>
+
+ltt_facility_t ltt_facility_user_generic;
+ltt_facility_t ltt_facility_user_generic_B1865E44;
+
+#define LTT_FACILITY_SYMBOL ltt_facility_user_generic
+#define LTT_FACILITY_CHECKSUM_SYMBOL ltt_facility_user_generic_B1865E44
+#define LTT_FACILITY_CHECKSUM 0xB1865E44
+#define LTT_FACILITY_NAME "user_generic"
+#define LTT_FACILITY_NUM_EVENTS facility_user_generic_num_events
+
+#define LTT_FACILITY_ALIGNMENT 1
+
+#endif //_LTT_FACILITY_LOADER_USER_GENERIC_H_
--- /dev/null
+/****************************************************************************
+ * ltt-instrument-functions.c
+ *
+ * Mathieu Desnoyers
+ * March 2006
+ */
+
+#define inline inline __attribute__((always_inline))
+
+#define LTT_TRACE
+#define LTT_TRACE_FAST
+#include <ltt/ltt-usertrace-fast.h>
+#include <ltt/ltt-facility-user_generic.h>
+
+void __attribute__((no_instrument_function)) __cyg_profile_func_enter (
+ void *this_fn,
+ void *call_site)
+{
+ /* don't care about the return value */
+ trace_user_generic_function_entry(this_fn, call_site);
+}
+
+void __attribute__((no_instrument_function)) __cyg_profile_func_exit (
+ void *this_fn,
+ void *call_site)
+{
+ /* don't care about the return value */
+ trace_user_generic_function_exit(this_fn, call_site);
+}
+
--- /dev/null
+/* LTTng user-space "fast" library
+ *
+ * This daemon is spawned by each traced thread (to share the mmap).
+ *
+ * Its job is to dump periodically this buffer to disk (when it receives a
+ * SIGUSR1 from its parent).
+ *
+ * It uses the control information in the shared memory area (producer/consumer
+ * count).
+ *
+ * When the parent thread dies (yes, those thing may happen) ;) , this daemon
+ * will flush the last buffer and write it to disk.
+ *
+ * Supplement note for streaming : the daemon is responsible for flushing
+ * periodically the buffer if it is streaming data.
+ *
+ *
+ * Notes :
+ * shm memory is typically limited to 4096 units (system wide limit SHMMNI in
+ * /proc/sys/kernel/shmmni). As it requires computation time upon creation, we
+ * do not use it : we will use a shared mmap() instead which is passed through
+ * the fork().
+ * MAP_SHARED mmap segment. Updated when msync or munmap are called.
+ * MAP_ANONYMOUS.
+ * Memory mapped by mmap() is preserved across fork(2), with the same
+ * attributes.
+ *
+ * Eventually, there will be two mode :
+ * * Slow thread spawn : a fork() is done for each new thread. If the process
+ * dies, the data is not lost.
+ * * Fast thread spawn : a pthread_create() is done by the application for each
+ * new thread.
+ *
+ * We use a timer to check periodically if the parent died. I think it is less
+ * intrusive than a ptrace() on the parent, which would get every signal. The
+ * side effect of this is that we won't be notified if the parent does an
+ * exec(). In this case, we will just sit there until the parent exits.
+ *
+ *
+ * Copyright 2006 Mathieu Desnoyers
+ *
+ */
+
+#define inline inline __attribute__((always_inline))
+
+#define _GNU_SOURCE
+#define LTT_TRACE
+#define LTT_TRACE_FAST
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <signal.h>
+#include <syscall.h>
+#include <features.h>
+#include <pthread.h>
+#include <malloc.h>
+#include <string.h>
+#include <signal.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <sys/param.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#include <sys/syscall.h>
+
+#include <ltt/ltt-usertrace.h>
+
+#define gettid() syscall(__NR_gettid)
+
+#ifdef LTT_SHOW_DEBUG
+#define dbg_printf(...) printf(__VA_ARGS__)
+#else
+#define dbg_printf(...)
+#endif //LTT_SHOW_DEBUG
+
+
+enum force_switch_mode { FORCE_ACTIVE, FORCE_FLUSH };
+
+/* Writer (the traced application) */
+
+__thread struct ltt_trace_info *thread_trace_info = NULL;
+
+void ltt_usertrace_fast_buffer_switch(void)
+{
+ struct ltt_trace_info *tmp = thread_trace_info;
+ if(tmp)
+ kill(tmp->daemon_id, SIGUSR1);
+}
+
+/* The cleanup should never be called from a signal handler */
+static void ltt_usertrace_fast_cleanup(void *arg)
+{
+ struct ltt_trace_info *tmp = thread_trace_info;
+ if(tmp) {
+ thread_trace_info = NULL;
+ kill(tmp->daemon_id, SIGUSR2);
+ munmap(tmp, sizeof(*tmp));
+ }
+}
+
+/* Reader (the disk dumper daemon) */
+
+static pid_t traced_pid = 0;
+static pid_t traced_tid = 0;
+static int parent_exited = 0;
+static int fd_process = -1;
+static char outfile_name[PATH_MAX];
+static char identifier_name[PATH_MAX];
+
+/* signal handling */
+static void handler_sigusr1(int signo)
+{
+ dbg_printf("LTT Signal %d received : parent buffer switch.\n", signo);
+}
+
+static void handler_sigusr2(int signo)
+{
+ dbg_printf("LTT Signal %d received : parent exited.\n", signo);
+ parent_exited = 1;
+}
+
+static void handler_sigalarm(int signo)
+{
+ dbg_printf("LTT Signal %d received\n", signo);
+
+ if(getppid() != traced_pid) {
+ /* Parent died */
+ dbg_printf("LTT Parent %lu died, cleaning up\n", traced_pid);
+ traced_pid = 0;
+ }
+ alarm(3);
+}
+
+/* Do a buffer switch. Don't switch if buffer is completely empty */
+static void flush_buffer(struct ltt_buf *ltt_buf, enum force_switch_mode mode)
+{
+ uint64_t tsc;
+ int offset_begin, offset_end, offset_old;
+ int reserve_commit_diff;
+ int consumed_old, consumed_new;
+ int commit_count, reserve_count;
+ int end_switch_old;
+
+ do {
+ offset_old = atomic_read(<t_buf->offset);
+ offset_begin = offset_old;
+ end_switch_old = 0;
+ tsc = ltt_get_timestamp();
+ if(tsc == 0) {
+ /* Error in getting the timestamp : should not happen : it would
+ * mean we are called from an NMI during a write seqlock on xtime. */
+ return;
+ }
+
+ if(SUBBUF_OFFSET(offset_begin, ltt_buf) != 0) {
+ offset_begin = SUBBUF_ALIGN(offset_begin, ltt_buf);
+ end_switch_old = 1;
+ } else {
+ /* we do not have to switch : buffer is empty */
+ return;
+ }
+ if(mode == FORCE_ACTIVE)
+ offset_begin += ltt_subbuf_header_len(ltt_buf);
+ /* Always begin_switch in FORCE_ACTIVE mode */
+
+ /* Test new buffer integrity */
+ reserve_commit_diff =
+ atomic_read(
+ <t_buf->reserve_count[SUBBUF_INDEX(offset_begin, ltt_buf)])
+ - atomic_read(
+ <t_buf->commit_count[SUBBUF_INDEX(offset_begin, ltt_buf)]);
+ if(reserve_commit_diff == 0) {
+ /* Next buffer not corrupted. */
+ if(mode == FORCE_ACTIVE
+ && (offset_begin-atomic_read(<t_buf->consumed))
+ >= ltt_buf->alloc_size) {
+ /* We do not overwrite non consumed buffers and we are full : ignore
+ switch while tracing is active. */
+ return;
+ }
+ } else {
+ /* Next subbuffer corrupted. Force pushing reader even in normal mode */
+ }
+
+ offset_end = offset_begin;
+ } while(atomic_cmpxchg(<t_buf->offset, offset_old, offset_end)
+ != offset_old);
+
+
+ if(mode == FORCE_ACTIVE) {
+ /* Push the reader if necessary */
+ do {
+ consumed_old = atomic_read(<t_buf->consumed);
+ /* If buffer is in overwrite mode, push the reader consumed count if
+ the write position has reached it and we are not at the first
+ iteration (don't push the reader farther than the writer).
+ This operation can be done concurrently by many writers in the
+ same buffer, the writer being at the fartest write position sub-buffer
+ index in the buffer being the one which will win this loop. */
+ /* If the buffer is not in overwrite mode, pushing the reader only
+ happen if a sub-buffer is corrupted */
+ if((SUBBUF_TRUNC(offset_end-1, ltt_buf)
+ - SUBBUF_TRUNC(consumed_old, ltt_buf))
+ >= ltt_buf->alloc_size)
+ consumed_new = SUBBUF_ALIGN(consumed_old, ltt_buf);
+ else {
+ consumed_new = consumed_old;
+ break;
+ }
+ } while(atomic_cmpxchg(<t_buf->consumed, consumed_old, consumed_new)
+ != consumed_old);
+
+ if(consumed_old != consumed_new) {
+ /* Reader pushed : we are the winner of the push, we can therefore
+ reequilibrate reserve and commit. Atomic increment of the commit
+ count permits other writers to play around with this variable
+ before us. We keep track of corrupted_subbuffers even in overwrite
+ mode :
+ we never want to write over a non completely committed sub-buffer :
+ possible causes : the buffer size is too low compared to the unordered
+ data input, or there is a writer who died between the reserve and the
+ commit. */
+ if(reserve_commit_diff) {
+ /* We have to alter the sub-buffer commit count : a sub-buffer is
+ corrupted */
+ atomic_add(reserve_commit_diff,
+ <t_buf->commit_count[SUBBUF_INDEX(offset_begin, ltt_buf)]);
+ atomic_inc(<t_buf->corrupted_subbuffers);
+ }
+ }
+ }
+
+ /* Always switch */
+
+ if(end_switch_old) {
+ /* old subbuffer */
+ /* Concurrency safe because we are the last and only thread to alter this
+ sub-buffer. As long as it is not delivered and read, no other thread can
+ alter the offset, alter the reserve_count or call the
+ client_buffer_end_callback on this sub-buffer.
+ The only remaining threads could be the ones with pending commits. They
+ will have to do the deliver themself.
+ Not concurrency safe in overwrite mode. We detect corrupted subbuffers with
+ commit and reserve counts. We keep a corrupted sub-buffers count and push
+ the readers across these sub-buffers.
+ Not concurrency safe if a writer is stalled in a subbuffer and
+ another writer switches in, finding out it's corrupted. The result will be
+ than the old (uncommited) subbuffer will be declared corrupted, and that
+ the new subbuffer will be declared corrupted too because of the commit
+ count adjustment.
+ Offset old should never be 0. */
+ ltt_buffer_end_callback(ltt_buf, tsc, offset_old,
+ SUBBUF_INDEX((offset_old), ltt_buf));
+ /* Setting this reserve_count will allow the sub-buffer to be delivered by
+ the last committer. */
+ reserve_count = atomic_add_return((SUBBUF_OFFSET((offset_old-1),
+ ltt_buf) + 1),
+ <t_buf->reserve_count[SUBBUF_INDEX((offset_old),
+ ltt_buf)]);
+ if(reserve_count == atomic_read(
+ <t_buf->commit_count[SUBBUF_INDEX((offset_old), ltt_buf)])) {
+ ltt_deliver_callback(ltt_buf, SUBBUF_INDEX((offset_old), ltt_buf), NULL);
+ }
+ }
+
+ if(mode == FORCE_ACTIVE) {
+ /* New sub-buffer */
+ /* This code can be executed unordered : writers may already have written
+ to the sub-buffer before this code gets executed, caution. */
+ /* The commit makes sure that this code is executed before the deliver
+ of this sub-buffer */
+ ltt_buffer_begin_callback(ltt_buf, tsc, SUBBUF_INDEX(offset_begin, ltt_buf));
+ commit_count = atomic_add_return(ltt_subbuf_header_len(ltt_buf),
+ <t_buf->commit_count[SUBBUF_INDEX(offset_begin, ltt_buf)]);
+ /* Check if the written buffer has to be delivered */
+ if(commit_count == atomic_read(
+ <t_buf->reserve_count[SUBBUF_INDEX(offset_begin, ltt_buf)])) {
+ ltt_deliver_callback(ltt_buf, SUBBUF_INDEX(offset_begin, ltt_buf), NULL);
+ }
+ }
+
+}
+
+
+static int open_output_files(void)
+{
+ int ret;
+ int fd;
+ /* Open output files */
+ umask(00000);
+ ret = mkdir(LTT_USERTRACE_ROOT, 0777);
+ if(ret < 0 && errno != EEXIST) {
+ perror("LTT Error in creating output (mkdir)");
+ exit(-1);
+ }
+ ret = chdir(LTT_USERTRACE_ROOT);
+ if(ret < 0) {
+ perror("LTT Error in creating output (chdir)");
+ exit(-1);
+ }
+ snprintf(identifier_name, PATH_MAX-1, "%lu.%lu.%llu",
+ traced_tid, traced_pid, get_cycles());
+ snprintf(outfile_name, PATH_MAX-1, "process-%s", identifier_name);
+
+#ifndef LTT_NULL_OUTPUT_TEST
+ fd = creat(outfile_name, 0644);
+#else
+ /* NULL test */
+ ret = symlink("/dev/null", outfile_name);
+ if(ret < 0) {
+ perror("error in symlink");
+ exit(-1);
+ }
+ fd = open(outfile_name, O_WRONLY);
+ if(fd_process < 0) {
+ perror("Error in open");
+ exit(-1);
+ }
+#endif //LTT_NULL_OUTPUT_TEST
+ return fd;
+}
+
+static inline int ltt_buffer_get(struct ltt_buf *ltt_buf,
+ unsigned int *offset)
+{
+ unsigned int consumed_old, consumed_idx;
+ consumed_old = atomic_read(<t_buf->consumed);
+ consumed_idx = SUBBUF_INDEX(consumed_old, ltt_buf);
+
+ if(atomic_read(<t_buf->commit_count[consumed_idx])
+ != atomic_read(<t_buf->reserve_count[consumed_idx])) {
+ return -EAGAIN;
+ }
+ if((SUBBUF_TRUNC(atomic_read(<t_buf->offset), ltt_buf)
+ -SUBBUF_TRUNC(consumed_old, ltt_buf)) == 0) {
+ return -EAGAIN;
+ }
+
+ *offset = consumed_old;
+
+ return 0;
+}
+
+static inline int ltt_buffer_put(struct ltt_buf *ltt_buf,
+ unsigned int offset)
+{
+ unsigned int consumed_old, consumed_new;
+ int ret;
+
+ consumed_old = offset;
+ consumed_new = SUBBUF_ALIGN(consumed_old, ltt_buf);
+ if(atomic_cmpxchg(<t_buf->consumed, consumed_old, consumed_new)
+ != consumed_old) {
+ /* We have been pushed by the writer : the last buffer read _is_
+ * corrupted!
+ * It can also happen if this is a buffer we never got. */
+ return -EIO;
+ } else {
+ if(traced_pid == 0 || parent_exited) return 0;
+
+ ret = sem_post(<t_buf->writer_sem);
+ if(ret < 0) {
+ printf("error in sem_post");
+ }
+ }
+ return ret;
+}
+
+static int read_subbuffer(struct ltt_buf *ltt_buf, int fd)
+{
+ unsigned int consumed_old;
+ int err;
+ dbg_printf("LTT read buffer\n");
+
+
+ err = ltt_buffer_get(ltt_buf, &consumed_old);
+ if(err != 0) {
+ if(err != -EAGAIN) dbg_printf("LTT Reserving sub buffer failed\n");
+ goto get_error;
+ }
+ if(fd_process == -1) {
+ fd_process = fd = open_output_files();
+ }
+
+ err = TEMP_FAILURE_RETRY(write(fd,
+ ltt_buf->start
+ + (consumed_old & ((ltt_buf->alloc_size)-1)),
+ ltt_buf->subbuf_size));
+
+ if(err < 0) {
+ perror("Error in writing to file");
+ goto write_error;
+ }
+#if 0
+ err = fsync(pair->trace);
+ if(err < 0) {
+ ret = errno;
+ perror("Error in writing to file");
+ goto write_error;
+ }
+#endif //0
+write_error:
+ err = ltt_buffer_put(ltt_buf, consumed_old);
+
+ if(err != 0) {
+ if(err == -EIO) {
+ dbg_printf("Reader has been pushed by the writer, last subbuffer corrupted.\n");
+ /* FIXME : we may delete the last written buffer if we wish. */
+ }
+ goto get_error;
+ }
+
+get_error:
+ return err;
+}
+
+/* This function is called by ltt_rw_init which has signals blocked */
+static void ltt_usertrace_fast_daemon(struct ltt_trace_info *shared_trace_info,
+ sigset_t oldset, pid_t l_traced_pid, pthread_t l_traced_tid)
+{
+ struct sigaction act;
+ int ret;
+
+ traced_pid = l_traced_pid;
+ traced_tid = l_traced_tid;
+
+ dbg_printf("LTT ltt_usertrace_fast_daemon : init is %d, pid is %lu, traced_pid is %lu, traced_tid is %lu\n",
+ shared_trace_info->init, getpid(), traced_pid, traced_tid);
+
+ act.sa_handler = handler_sigusr1;
+ act.sa_flags = 0;
+ sigemptyset(&(act.sa_mask));
+ sigaddset(&(act.sa_mask), SIGUSR1);
+ sigaction(SIGUSR1, &act, NULL);
+
+ act.sa_handler = handler_sigusr2;
+ act.sa_flags = 0;
+ sigemptyset(&(act.sa_mask));
+ sigaddset(&(act.sa_mask), SIGUSR2);
+ sigaction(SIGUSR2, &act, NULL);
+
+ act.sa_handler = handler_sigalarm;
+ act.sa_flags = 0;
+ sigemptyset(&(act.sa_mask));
+ sigaddset(&(act.sa_mask), SIGALRM);
+ sigaction(SIGALRM, &act, NULL);
+
+ alarm(3);
+
+ while(1) {
+ ret = sigsuspend(&oldset);
+ if(ret != -1) {
+ perror("LTT Error in sigsuspend\n");
+ }
+ if(traced_pid == 0) break; /* parent died */
+ if(parent_exited) break;
+ dbg_printf("LTT Doing a buffer switch read. pid is : %lu\n", getpid());
+
+ do {
+ ret = read_subbuffer(&shared_trace_info->channel.process, fd_process);
+ } while(ret == 0);
+ }
+ /* The parent thread is dead and we have finished with the buffer */
+
+ /* Buffer force switch (flush). Using FLUSH instead of ACTIVE because we know
+ * there is no writer. */
+ flush_buffer(&shared_trace_info->channel.process, FORCE_FLUSH);
+ do {
+ ret = read_subbuffer(&shared_trace_info->channel.process, fd_process);
+ } while(ret == 0);
+
+ if(fd_process != -1)
+ close(fd_process);
+
+ ret = sem_destroy(&shared_trace_info->channel.process.writer_sem);
+ if(ret < 0) {
+ perror("error in sem_destroy");
+ }
+ munmap(shared_trace_info, sizeof(*shared_trace_info));
+
+ exit(0);
+}
+
+
+/* Reader-writer initialization */
+
+static enum ltt_process_role { LTT_ROLE_WRITER, LTT_ROLE_READER }
+ role = LTT_ROLE_WRITER;
+
+
+void ltt_rw_init(void)
+{
+ pid_t pid;
+ struct ltt_trace_info *shared_trace_info;
+ int ret;
+ sigset_t set, oldset;
+ pid_t l_traced_pid = getpid();
+ pid_t l_traced_tid = gettid();
+
+ /* parent : create the shared memory map */
+ shared_trace_info = mmap(0, sizeof(*thread_trace_info),
+ PROT_READ|PROT_WRITE, MAP_SHARED|MAP_ANONYMOUS, 0, 0);
+ shared_trace_info->init=0;
+ shared_trace_info->filter=0;
+ shared_trace_info->daemon_id=0;
+ shared_trace_info->nesting=0;
+ memset(&shared_trace_info->channel.process, 0,
+ sizeof(shared_trace_info->channel.process));
+ //Need NPTL!
+ ret = sem_init(&shared_trace_info->channel.process.writer_sem, 1,
+ LTT_N_SUBBUFS);
+ if(ret < 0) {
+ perror("error in sem_init");
+ }
+ shared_trace_info->channel.process.alloc_size = LTT_BUF_SIZE_PROCESS;
+ shared_trace_info->channel.process.subbuf_size = LTT_SUBBUF_SIZE_PROCESS;
+ shared_trace_info->channel.process.start =
+ shared_trace_info->channel.process_buf;
+ ltt_buffer_begin_callback(&shared_trace_info->channel.process,
+ ltt_get_timestamp(), 0);
+
+ shared_trace_info->init = 1;
+
+ /* Disable signals */
+ ret = sigfillset(&set);
+ if(ret) {
+ dbg_printf("LTT Error in sigfillset\n");
+ }
+
+ ret = pthread_sigmask(SIG_BLOCK, &set, &oldset);
+ if(ret) {
+ dbg_printf("LTT Error in pthread_sigmask\n");
+ }
+
+ pid = fork();
+ if(pid > 0) {
+ /* Parent */
+ shared_trace_info->daemon_id = pid;
+ thread_trace_info = shared_trace_info;
+
+ /* Enable signals */
+ ret = pthread_sigmask(SIG_SETMASK, &oldset, NULL);
+ if(ret) {
+ dbg_printf("LTT Error in pthread_sigmask\n");
+ }
+ } else if(pid == 0) {
+ pid_t sid;
+ /* Child */
+ role = LTT_ROLE_READER;
+ sid = setsid();
+ //Not a good idea to renice, unless futex wait eventually implement
+ //priority inheritence.
+ //ret = nice(1);
+ //if(ret < 0) {
+ // perror("Error in nice");
+ //}
+ if(sid < 0) {
+ perror("Error setting sid");
+ }
+ ltt_usertrace_fast_daemon(shared_trace_info, oldset, l_traced_pid,
+ l_traced_tid);
+ /* Should never return */
+ exit(-1);
+ } else if(pid < 0) {
+ /* fork error */
+ perror("LTT Error in forking ltt-usertrace-fast");
+ }
+}
+
+static __thread struct _pthread_cleanup_buffer cleanup_buffer;
+
+void ltt_thread_init(void)
+{
+ _pthread_cleanup_push(&cleanup_buffer, ltt_usertrace_fast_cleanup, NULL);
+ ltt_rw_init();
+}
+
+void __attribute__((constructor)) __ltt_usertrace_fast_init(void)
+{
+ dbg_printf("LTT usertrace-fast init\n");
+
+ ltt_rw_init();
+}
+
+void __attribute__((destructor)) __ltt_usertrace_fast_fini(void)
+{
+ if(role == LTT_ROLE_WRITER) {
+ dbg_printf("LTT usertrace-fast fini\n");
+ ltt_usertrace_fast_cleanup(NULL);
+ }
+}
+
--- /dev/null
+/*
+ * PowerPC atomic operations
+ */
+
+#ifndef _ASM_PPC_ATOMIC_H_
+#define _ASM_PPC_ATOMIC_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct { volatile int counter; } atomic_t;
+
+#define ATOMIC_INIT(i) { (i) }
+
+#define atomic_read(v) ((v)->counter)
+#define atomic_set(v,i) (((v)->counter) = (i))
+
+extern void atomic_clear_mask(unsigned long mask, unsigned long *addr);
+
+#if 0 // We only do operation on one CPU at a time (LTT)
+#define SMP_SYNC "sync"
+#define SMP_ISYNC "\n\tisync"
+#else
+#define SMP_SYNC ""
+#define SMP_ISYNC
+#endif
+
+/* Erratum #77 on the 405 means we need a sync or dcbt before every stwcx.
+ * The old ATOMIC_SYNC_FIX covered some but not all of this.
+ */
+#ifdef CONFIG_IBM405_ERR77
+#define PPC405_ERR77(ra,rb) "dcbt " #ra "," #rb ";"
+#else
+#define PPC405_ERR77(ra,rb)
+#endif
+
+static __inline__ void atomic_add(int a, atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%3 # atomic_add\n\
+ add %0,%2,%0\n"
+ PPC405_ERR77(0,%3)
+" stwcx. %0,0,%3 \n\
+ bne- 1b"
+ : "=&r" (t), "=m" (v->counter)
+ : "r" (a), "r" (&v->counter), "m" (v->counter)
+ : "cc");
+}
+
+static __inline__ int atomic_add_return(int a, atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%2 # atomic_add_return\n\
+ add %0,%1,%0\n"
+ PPC405_ERR77(0,%2)
+" stwcx. %0,0,%2 \n\
+ bne- 1b"
+ SMP_ISYNC
+ : "=&r" (t)
+ : "r" (a), "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
+
+static __inline__ void atomic_sub(int a, atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%3 # atomic_sub\n\
+ subf %0,%2,%0\n"
+ PPC405_ERR77(0,%3)
+" stwcx. %0,0,%3 \n\
+ bne- 1b"
+ : "=&r" (t), "=m" (v->counter)
+ : "r" (a), "r" (&v->counter), "m" (v->counter)
+ : "cc");
+}
+
+static __inline__ int atomic_sub_return(int a, atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%2 # atomic_sub_return\n\
+ subf %0,%1,%0\n"
+ PPC405_ERR77(0,%2)
+" stwcx. %0,0,%2 \n\
+ bne- 1b"
+ SMP_ISYNC
+ : "=&r" (t)
+ : "r" (a), "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+static __inline__ void atomic_inc(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%2 # atomic_inc\n\
+ addic %0,%0,1\n"
+ PPC405_ERR77(0,%2)
+" stwcx. %0,0,%2 \n\
+ bne- 1b"
+ : "=&r" (t), "=m" (v->counter)
+ : "r" (&v->counter), "m" (v->counter)
+ : "cc");
+}
+
+static __inline__ int atomic_inc_return(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%1 # atomic_inc_return\n\
+ addic %0,%0,1\n"
+ PPC405_ERR77(0,%1)
+" stwcx. %0,0,%1 \n\
+ bne- 1b"
+ SMP_ISYNC
+ : "=&r" (t)
+ : "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+/*
+ * atomic_inc_and_test - increment and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
+
+static __inline__ void atomic_dec(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%2 # atomic_dec\n\
+ addic %0,%0,-1\n"
+ PPC405_ERR77(0,%2)\
+" stwcx. %0,0,%2\n\
+ bne- 1b"
+ : "=&r" (t), "=m" (v->counter)
+ : "r" (&v->counter), "m" (v->counter)
+ : "cc");
+}
+
+static __inline__ int atomic_dec_return(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%1 # atomic_dec_return\n\
+ addic %0,%0,-1\n"
+ PPC405_ERR77(0,%1)
+" stwcx. %0,0,%1\n\
+ bne- 1b"
+ SMP_ISYNC
+ : "=&r" (t)
+ : "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
+#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
+
+/*
+ * Atomically test *v and decrement if it is greater than 0.
+ * The function returns the old value of *v minus 1.
+ */
+static __inline__ int atomic_dec_if_positive(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
+ addic. %0,%0,-1\n\
+ blt- 2f\n"
+ PPC405_ERR77(0,%1)
+" stwcx. %0,0,%1\n\
+ bne- 1b"
+ SMP_ISYNC
+ "\n\
+2:" : "=&r" (t)
+ : "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+#define __MB __asm__ __volatile__ (SMP_SYNC : : : "memory")
+#define smp_mb__before_atomic_dec() __MB
+#define smp_mb__after_atomic_dec() __MB
+#define smp_mb__before_atomic_inc() __MB
+#define smp_mb__after_atomic_inc() __MB
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* _ASM_PPC_ATOMIC_H_ */
--- /dev/null
+/*
+ * PowerPC64 atomic operations
+ *
+ * Copyright (C) 2001 Paul Mackerras <paulus@au.ibm.com>, IBM
+ * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_PPC64_ATOMIC_H_
+#define _ASM_PPC64_ATOMIC_H_
+
+#include <asm/memory.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct { volatile int counter; } atomic_t;
+
+#define ATOMIC_INIT(i) { (i) }
+
+#define atomic_read(v) ((v)->counter)
+#define atomic_set(v,i) (((v)->counter) = (i))
+
+static __inline__ void atomic_add(int a, atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%3 # atomic_add\n\
+ add %0,%2,%0\n\
+ stwcx. %0,0,%3\n\
+ bne- 1b"
+ : "=&r" (t), "=m" (v->counter)
+ : "r" (a), "r" (&v->counter), "m" (v->counter)
+ : "cc");
+}
+
+static __inline__ int atomic_add_return(int a, atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1: lwarx %0,0,%2 # atomic_add_return\n\
+ add %0,%1,%0\n\
+ stwcx. %0,0,%2\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (t)
+ : "r" (a), "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
+
+static __inline__ void atomic_sub(int a, atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%3 # atomic_sub\n\
+ subf %0,%2,%0\n\
+ stwcx. %0,0,%3\n\
+ bne- 1b"
+ : "=&r" (t), "=m" (v->counter)
+ : "r" (a), "r" (&v->counter), "m" (v->counter)
+ : "cc");
+}
+
+static __inline__ int atomic_sub_return(int a, atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1: lwarx %0,0,%2 # atomic_sub_return\n\
+ subf %0,%1,%0\n\
+ stwcx. %0,0,%2\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (t)
+ : "r" (a), "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+static __inline__ void atomic_inc(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%2 # atomic_inc\n\
+ addic %0,%0,1\n\
+ stwcx. %0,0,%2\n\
+ bne- 1b"
+ : "=&r" (t), "=m" (v->counter)
+ : "r" (&v->counter), "m" (v->counter)
+ : "cc");
+}
+
+static __inline__ int atomic_inc_return(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1: lwarx %0,0,%1 # atomic_inc_return\n\
+ addic %0,%0,1\n\
+ stwcx. %0,0,%1\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (t)
+ : "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+/*
+ * atomic_inc_and_test - increment and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
+
+static __inline__ void atomic_dec(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%2 # atomic_dec\n\
+ addic %0,%0,-1\n\
+ stwcx. %0,0,%2\n\
+ bne- 1b"
+ : "=&r" (t), "=m" (v->counter)
+ : "r" (&v->counter), "m" (v->counter)
+ : "cc");
+}
+
+static __inline__ int atomic_dec_return(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1: lwarx %0,0,%1 # atomic_dec_return\n\
+ addic %0,%0,-1\n\
+ stwcx. %0,0,%1\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (t)
+ : "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
+#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
+
+/*
+ * Atomically test *v and decrement if it is greater than 0.
+ * The function returns the old value of *v minus 1.
+ */
+static __inline__ int atomic_dec_if_positive(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
+ addic. %0,%0,-1\n\
+ blt- 2f\n\
+ stwcx. %0,0,%1\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ "\n\
+2:" : "=&r" (t)
+ : "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+#define smp_mb__before_atomic_dec() smp_mb()
+#define smp_mb__after_atomic_dec() smp_mb()
+#define smp_mb__before_atomic_inc() smp_mb()
+#define smp_mb__after_atomic_inc() smp_mb()
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* _ASM_PPC64_ATOMIC_H_ */
--- /dev/null
+/*****************************************************************************
+ * kernelutils-arm.h
+ *
+ * This file holds the code needed by LTT usertrace that comes from the
+ * kernel headers. Since including kernel headers is not recommended in
+ * userspace programs/libraries, we rewrote implementations HIGHLY INSPIRED
+ * (i.e. copied/pasted) from the original kernel headers (2.6.18).
+ *
+ * Do not use these functions within signal handlers, as the architecture offers
+ * no atomic operations. (Mathieu Desnoyers) It is safe to do multithreaded
+ * tracing though, as the buffers are per thread.
+ *
+ * Deepak Saxena, October 2006
+ */
+
+#ifndef _KERNELUTILS_ARM_H
+#define _KERNELUTILS_ARM_H
+
+#include <time.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct { volatile int counter; } atomic_t;
+
+#define atomic_read(v) ((v)->counter)
+
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+ unsigned long flags;
+ int val;
+
+ val = v->counter;
+ v->counter = val += i;
+
+ return val;
+}
+
+#define atomic_add(i, v) (void) atomic_add_return(i, v)
+#define atomic_inc(v) (void) atomic_add_return(1, v)
+
+static inline unsigned long cmpxchg(volatile void *ptr,
+ unsigned long old,
+ unsigned long new)
+{
+ unsigned long flags, prev;
+ volatile unsigned long *p = ptr;
+
+ if ((prev = *p) == old)
+ *p = new;
+ return(prev);
+}
+
+static inline unsigned long long get_cycles(void)
+{
+ struct timespec tp;
+ clock_gettime(CLOCK_MONOTONIC, &tp);
+ return tp.tv_sec * 1000000000 + tp.tv_nsec;
+}
+
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif
--- /dev/null
+/*****************************************************************************
+ * kernelutils-x386.h
+ *
+ * This file holds the code needed by LTT usertrace that comes from the
+ * kernel headers. Since including kernel headers is not recommended in
+ * userspace programs/libraries, we rewrote implementations HIGHLY INSPIRED
+ * (i.e. copied/pasted) from the original kernel headers (2.6.17).
+ *
+ * Martin Bisson, July 2006
+ * Mathieu Desnoyers, August 2006
+ */
+
+#ifndef _KERNELUTILS_I386_H
+#define _KERNELUTILS_I386_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// We are careful, so we assume a possibly SMP machine
+#define LOCK "lock ; "
+#define LOCK_PREFIX "lock ; "
+
+
+// From atomic.h
+
+
+/*
+ * Make sure gcc doesn't try to be clever and move things around
+ * on us. We need to use _exactly_ the address the user gave us,
+ * not some alias that contains the same information.
+ */
+typedef struct { volatile int counter; } atomic_t;
+
+
+/**
+ * atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically reads the value of @v.
+ */
+#define atomic_read(v) ((v)->counter)
+
+/**
+ * atomic_add - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v.
+ */
+static __inline__ void atomic_add(int i, atomic_t *v)
+{
+ __asm__ __volatile__(
+ LOCK "addl %1,%0"
+ :"=m" (v->counter)
+ :"ir" (i), "m" (v->counter));
+}
+
+/**
+ * atomic_inc - increment atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1.
+ */
+static __inline__ void atomic_inc(atomic_t *v)
+{
+ __asm__ __volatile__(
+ LOCK "incl %0"
+ :"=m" (v->counter)
+ :"m" (v->counter));
+}
+
+/**
+ * atomic_add_return - add and return
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v and returns @i + @v
+ */
+static __inline__ int atomic_add_return(int i, atomic_t *v)
+{
+ int __i = i;
+ __asm__ __volatile__(
+ LOCK "xaddl %0, %1;"
+ :"=r"(i)
+ :"m"(v->counter), "0"(i));
+ return i + __i;
+}
+
+
+
+
+// From system.h
+
+struct __xchg_dummy { unsigned long a[100]; };
+#define __xg(x) ((struct __xchg_dummy *)(x))
+
+
+/*
+ * Atomic compare and exchange. Compare OLD with MEM, if identical,
+ * store NEW in MEM. Return the initial value in MEM. Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+#define __HAVE_ARCH_CMPXCHG 1
+#define cmpxchg(ptr,o,n)\
+ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
+ (unsigned long)(n),sizeof(*(ptr))))
+
+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+ unsigned long newval, int size)
+{
+ unsigned long prev;
+ switch (size) {
+ case 1:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
+ : "=a"(prev)
+ : "q"(newval), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 2:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
+ : "=a"(prev)
+ : "r"(newval), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 4:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
+ : "=a"(prev)
+ : "r"(newval), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ }
+ return old;
+}
+
+
+// From msr.h
+
+#define rdtscll(val) \
+ __asm__ __volatile__("rdtsc" : "=A" (val))
+
+// From timex.h
+
+typedef unsigned long long cycles_t;
+
+static inline cycles_t get_cycles (void)
+{
+ unsigned long long ret;
+
+ rdtscll(ret);
+ return ret;
+}
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif // _KERNELUTILS_I386_H
--- /dev/null
+/*****************************************************************************
+ * kernelutils-x86_64.h
+ *
+ * This file holds the code needed by LTT usertrace that comes from the
+ * kernel headers. Since including kernel headers is not recommended in
+ * userspace programs/libraries, we rewrote implementations HIGHLY INSPIRED
+ * (i.e. copied/pasted) from the original kernel headers (2.6.17).
+ *
+ * Martin Bisson, July 2006
+ */
+
+#ifndef _KERNELUTILS_X86_64_H
+#define _KERNELUTILS_X86_64_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// We are careful, so we assume a possibly SMP machine
+#define LOCK "lock ; "
+#define LOCK_PREFIX "lock ; "
+
+
+
+
+// From atomic.h
+
+
+/*
+ * Make sure gcc doesn't try to be clever and move things around
+ * on us. We need to use _exactly_ the address the user gave us,
+ * not some alias that contains the same information.
+ */
+typedef struct { volatile int counter; } atomic_t;
+
+
+/**
+ * atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically reads the value of @v.
+ */
+#define atomic_read(v) ((v)->counter)
+
+/**
+ * atomic_add - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v.
+ */
+static __inline__ void atomic_add(int i, atomic_t *v)
+{
+ __asm__ __volatile__(
+ LOCK "addl %1,%0"
+ :"=m" (v->counter)
+ :"ir" (i), "m" (v->counter));
+}
+
+/**
+ * atomic_inc - increment atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1.
+ */
+static __inline__ void atomic_inc(atomic_t *v)
+{
+ __asm__ __volatile__(
+ LOCK "incl %0"
+ :"=m" (v->counter)
+ :"m" (v->counter));
+}
+
+/**
+ * atomic_add_return - add and return
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v and returns @i + @v
+ */
+static __inline__ int atomic_add_return(int i, atomic_t *v)
+{
+ int __i = i;
+ __asm__ __volatile__(
+ LOCK "xaddl %0, %1;"
+ :"=r"(i)
+ :"m"(v->counter), "0"(i));
+ return i + __i;
+}
+
+
+
+
+// From system.h
+
+#define __xg(x) ((volatile long *)(x))
+
+
+/*
+ * Atomic compare and exchange. Compare OLD with MEM, if identical,
+ * store NEW in MEM. Return the initial value in MEM. Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+#define __HAVE_ARCH_CMPXCHG 1
+
+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+ unsigned long new, int size)
+{
+ unsigned long prev;
+ switch (size) {
+ case 1:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 2:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 4:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 8:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ }
+ return old;
+}
+
+#define cmpxchg(ptr,o,n)\
+ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
+ (unsigned long)(n),sizeof(*(ptr))))
+
+
+
+
+// From msr.h
+
+
+#define rdtscll(val) do { \
+ unsigned int __a,__d; \
+ asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
+ (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
+} while(0)
+
+
+
+
+// From timex.h
+
+typedef unsigned long long cycles_t;
+
+static inline cycles_t get_cycles (void)
+{
+ unsigned long long ret;
+
+ rdtscll(ret);
+ return ret;
+}
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif // _KERNELUTILS_X86_64_H
--- /dev/null
+#ifndef _LTT_FACILITY_CUSTOM_USER_GENERIC_H_
+#define _LTT_FACILITY_CUSTOM_USER_GENERIC_H_
+
+#include <sys/types.h>
+#include <ltt/ltt-facility-id-user_generic.h>
+#include <ltt/ltt-usertrace.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+static int trace_user_generic_slow_printf(
+ const char *fmt, ...)
+#ifndef LTT_TRACE
+{
+}
+#else
+{
+ /* Guess we need no more than 100 bytes. */
+ int n, size = 104;
+ char *p, *np;
+ va_list ap;
+ int ret;
+
+ size += ltt_align(size, sizeof(void*));
+ if ((p = malloc (size)) == NULL)
+ return -1;
+
+ while (1) {
+ /* Try to print in the allocated space. */
+ va_start(ap, fmt);
+ n = vsnprintf (p, size, fmt, ap);
+ va_end(ap);
+ /* If that worked, trace the string. */
+ if (n > -1 && n < size) {
+ ret = trace_user_generic_slow_printf_param_buffer(p, n+1+ltt_align(n+1, sizeof(void*)));
+ free(p);
+ return ret;
+ }
+ /* Else try again with more space. */
+ if (n > -1) /* glibc 2.1 */
+ size = n+1; /* precisely what is needed */
+ else /* glibc 2.0 */
+ size *= 2; /* twice the old size */
+ size += ltt_align(size, sizeof(void*));
+ if ((np = realloc (p, size)) == NULL) {
+ free(p);
+ return -1;
+ } else {
+ p = np;
+ }
+ }
+}
+#endif //LTT_TRACE
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif //_LTT_FACILITY_CUSTOM_USER_GENERIC_H_
--- /dev/null
+#ifndef _LTT_FACILITY_ID_USER_GENERIC_H_
+#define _LTT_FACILITY_ID_USER_GENERIC_H_
+
+#ifdef LTT_TRACE
+#include <ltt/ltt-usertrace.h>
+
+/**** facility handle ****/
+
+extern ltt_facility_t ltt_facility_user_generic_B1865E44;
+extern ltt_facility_t ltt_facility_user_generic;
+
+
+/**** event index ****/
+
+enum user_generic_event {
+ event_user_generic_string,
+ event_user_generic_string_pointer,
+ event_user_generic_slow_printf,
+ event_user_generic_function_entry,
+ event_user_generic_function_exit,
+ event_user_generic_thread_brand,
+ facility_user_generic_num_events
+};
+
+#endif //LTT_TRACE
+#endif //_LTT_FACILITY_ID_USER_GENERIC_H_
--- /dev/null
+#ifndef _LTT_FACILITY_USER_GENERIC_H_
+#define _LTT_FACILITY_USER_GENERIC_H_
+
+#include <sys/types.h>
+#include <ltt/ltt-facility-id-user_generic.h>
+#include <ltt/ltt-usertrace.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Named types */
+
+/* Event string structures */
+static inline void lttng_write_string_user_generic_string_data(
+ char *buffer,
+ size_t *to_base,
+ size_t *to,
+ const char **from,
+ size_t *len,
+ const char * obj)
+{
+ size_t size;
+ size_t align;
+
+ /* Flush pending memcpy */
+ if (*len != 0) {
+ if (buffer != NULL)
+ memcpy(buffer+*to_base+*to, *from, *len);
+ }
+ *to += *len;
+ *len = 0;
+
+ align = sizeof(char);
+
+ if (*len == 0) {
+ *to += ltt_align(*to, align); /* align output */
+ } else {
+ *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
+ }
+
+ /* Contains variable sized fields : must explode the structure */
+
+ size = strlen(obj) + 1; /* Include final NULL char. */
+ if (buffer != NULL)
+ memcpy(buffer+*to_base+*to, obj, size);
+ *to += size;
+
+ /* Realign the *to_base on arch size, set *to to 0 */
+ *to += ltt_align(*to, sizeof(void *));
+ *to_base = *to_base+*to;
+ *to = 0;
+
+ /* Put source *from just after the C string */
+ *from += size;
+}
+
+
+/* Event string logging function */
+static inline int trace_user_generic_string(
+ const char * lttng_param_data);
+
+#ifndef LTT_TRACE_FAST
+static inline int trace_user_generic_string(
+ const char * lttng_param_data)
+#ifndef LTT_TRACE
+{
+}
+#else
+{
+ int ret = 0;
+ char *buffer = NULL;
+ size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
+ size_t *to_base = &real_to_base;
+ size_t real_to = 0;
+ size_t *to = &real_to;
+ size_t real_len = 0;
+ size_t *len = &real_len;
+ size_t reserve_size;
+ size_t slot_size;
+ const char *real_from;
+ const char **from = &real_from;
+ /* For each field, calculate the field size. */
+ /* size = *to_base + *to + *len */
+ /* Assume that the padding for alignment starts at a
+ * sizeof(void *) address. */
+
+ *from = (const char*)lttng_param_data;
+ lttng_write_string_user_generic_string_data(buffer, to_base, to, from, len, lttng_param_data);
+
+ reserve_size = *to_base + *to + *len;
+ {
+ char stack_buffer[reserve_size];
+ buffer = stack_buffer;
+
+ *to_base = *to = *len = 0;
+
+ *from = (const char*)lttng_param_data;
+ lttng_write_string_user_generic_string_data(buffer, to_base, to, from, len, lttng_param_data);
+
+ /* Flush pending memcpy */
+ if (*len != 0) {
+ memcpy(buffer+*to_base+*to, *from, *len);
+ *to += *len;
+ *len = 0;
+ }
+
+ ret = ltt_trace_generic(ltt_facility_user_generic_B1865E44, event_user_generic_string, buffer, reserve_size, LTT_BLOCKING, 0);
+ }
+
+ return ret;
+
+}
+#endif //LTT_TRACE
+#endif //!LTT_TRACE_FAST
+
+#ifdef LTT_TRACE_FAST
+static inline int trace_user_generic_string(
+ const char * lttng_param_data)
+#ifndef LTT_TRACE
+{
+}
+#else
+{
+ unsigned int index;
+ struct ltt_trace_info *trace = thread_trace_info;
+ struct ltt_buf *ltt_buf;
+ char *buffer = NULL;
+ size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
+ size_t *to_base = &real_to_base;
+ size_t real_to = 0;
+ size_t *to = &real_to;
+ size_t real_len = 0;
+ size_t *len = &real_len;
+ size_t reserve_size;
+ size_t slot_size;
+ const char *real_from;
+ const char **from = &real_from;
+ uint64_t tsc;
+ if (!trace) {
+ ltt_thread_init();
+ trace = thread_trace_info;
+ }
+
+
+ /* For each field, calculate the field size. */
+ /* size = *to_base + *to + *len */
+ /* Assume that the padding for alignment starts at a
+ * sizeof(void *) address. */
+
+ *from = (const char*)lttng_param_data;
+ lttng_write_string_user_generic_string_data(buffer, to_base, to, from, len, lttng_param_data);
+
+ reserve_size = *to_base + *to + *len;
+ trace->nesting++;
+ index = ltt_get_index_from_facility(ltt_facility_user_generic_B1865E44,
+ event_user_generic_string);
+
+ {
+ ltt_buf = ltt_get_channel_from_index(trace, index);
+ slot_size = 0;
+ buffer = ltt_reserve_slot(trace, ltt_buf,
+ reserve_size, &slot_size, &tsc);
+ if (!buffer)
+ goto end; /* buffer full */
+
+ *to_base = *to = *len = 0;
+
+ buffer = ltt_write_event_header(trace, ltt_buf, buffer,
+ ltt_facility_user_generic_B1865E44, event_user_generic_string,
+ reserve_size, tsc);
+ *from = (const char*)lttng_param_data;
+ lttng_write_string_user_generic_string_data(buffer, to_base, to, from, len, lttng_param_data);
+
+ /* Flush pending memcpy */
+ if (*len != 0) {
+ memcpy(buffer+*to_base+*to, *from, *len);
+ *to += *len;
+ *len = 0;
+ }
+
+ ltt_commit_slot(ltt_buf, buffer, slot_size);
+
+}
+
+end:
+ trace->nesting--;
+}
+#endif //LTT_TRACE
+#endif //LTT_TRACE_FAST
+
+/* Event string_pointer structures */
+static inline void lttng_write_string_user_generic_string_pointer_string(
+ char *buffer,
+ size_t *to_base,
+ size_t *to,
+ const char **from,
+ size_t *len,
+ const char * obj)
+{
+ size_t size;
+ size_t align;
+
+ /* Flush pending memcpy */
+ if (*len != 0) {
+ if (buffer != NULL)
+ memcpy(buffer+*to_base+*to, *from, *len);
+ }
+ *to += *len;
+ *len = 0;
+
+ align = sizeof(char);
+
+ if (*len == 0) {
+ *to += ltt_align(*to, align); /* align output */
+ } else {
+ *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
+ }
+
+ /* Contains variable sized fields : must explode the structure */
+
+ size = strlen(obj) + 1; /* Include final NULL char. */
+ if (buffer != NULL)
+ memcpy(buffer+*to_base+*to, obj, size);
+ *to += size;
+
+ /* Realign the *to_base on arch size, set *to to 0 */
+ *to += ltt_align(*to, sizeof(void *));
+ *to_base = *to_base+*to;
+ *to = 0;
+
+ /* Put source *from just after the C string */
+ *from += size;
+}
+
+
+/* Event string_pointer logging function */
+static inline int trace_user_generic_string_pointer(
+ const char * lttng_param_string,
+ const void * lttng_param_pointer);
+
+#ifndef LTT_TRACE_FAST
+static inline int trace_user_generic_string_pointer(
+ const char * lttng_param_string,
+ const void * lttng_param_pointer)
+#ifndef LTT_TRACE
+{
+}
+#else
+{
+ int ret = 0;
+ char *buffer = NULL;
+ size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
+ size_t *to_base = &real_to_base;
+ size_t real_to = 0;
+ size_t *to = &real_to;
+ size_t real_len = 0;
+ size_t *len = &real_len;
+ size_t reserve_size;
+ size_t slot_size;
+ size_t align;
+ const char *real_from;
+ const char **from = &real_from;
+ /* For each field, calculate the field size. */
+ /* size = *to_base + *to + *len */
+ /* Assume that the padding for alignment starts at a
+ * sizeof(void *) address. */
+
+ *from = (const char*)lttng_param_string;
+ lttng_write_string_user_generic_string_pointer_string(buffer, to_base, to, from, len, lttng_param_string);
+
+ *from = (const char*)<tng_param_pointer;
+ align = sizeof(const void *);
+
+ if (*len == 0) {
+ *to += ltt_align(*to, align); /* align output */
+ } else {
+ *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
+ }
+
+ *len += sizeof(const void *);
+
+ reserve_size = *to_base + *to + *len;
+ {
+ char stack_buffer[reserve_size];
+ buffer = stack_buffer;
+
+ *to_base = *to = *len = 0;
+
+ *from = (const char*)lttng_param_string;
+ lttng_write_string_user_generic_string_pointer_string(buffer, to_base, to, from, len, lttng_param_string);
+
+ /* Flush pending memcpy */
+ if (*len != 0) {
+ memcpy(buffer+*to_base+*to, *from, *len);
+ *to += *len;
+ *len = 0;
+ }
+
+ *from = (const char*)<tng_param_pointer;
+ align = sizeof(const void *);
+
+ if (*len == 0) {
+ *to += ltt_align(*to, align); /* align output */
+ } else {
+ *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
+ }
+
+ *len += sizeof(const void *);
+
+ /* Flush pending memcpy */
+ if (*len != 0) {
+ memcpy(buffer+*to_base+*to, *from, *len);
+ *to += *len;
+ *len = 0;
+ }
+
+ ret = ltt_trace_generic(ltt_facility_user_generic_B1865E44, event_user_generic_string_pointer, buffer, reserve_size, LTT_BLOCKING, 0);
+ }
+
+ return ret;
+
+}
+#endif //LTT_TRACE
+#endif //!LTT_TRACE_FAST
+
+#ifdef LTT_TRACE_FAST
+static inline int trace_user_generic_string_pointer(
+ const char * lttng_param_string,
+ const void * lttng_param_pointer)
+#ifndef LTT_TRACE
+{
+}
+#else
+{
+ unsigned int index;
+ struct ltt_trace_info *trace = thread_trace_info;
+ struct ltt_buf *ltt_buf;
+ char *buffer = NULL;
+ size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
+ size_t *to_base = &real_to_base;
+ size_t real_to = 0;
+ size_t *to = &real_to;
+ size_t real_len = 0;
+ size_t *len = &real_len;
+ size_t reserve_size;
+ size_t slot_size;
+ size_t align;
+ const char *real_from;
+ const char **from = &real_from;
+ uint64_t tsc;
+ if (!trace) {
+ ltt_thread_init();
+ trace = thread_trace_info;
+ }
+
+
+ /* For each field, calculate the field size. */
+ /* size = *to_base + *to + *len */
+ /* Assume that the padding for alignment starts at a
+ * sizeof(void *) address. */
+
+ *from = (const char*)lttng_param_string;
+ lttng_write_string_user_generic_string_pointer_string(buffer, to_base, to, from, len, lttng_param_string);
+
+ *from = (const char*)<tng_param_pointer;
+ align = sizeof(const void *);
+
+ if (*len == 0) {
+ *to += ltt_align(*to, align); /* align output */
+ } else {
+ *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
+ }
+
+ *len += sizeof(const void *);
+
+ reserve_size = *to_base + *to + *len;
+ trace->nesting++;
+ index = ltt_get_index_from_facility(ltt_facility_user_generic_B1865E44,
+ event_user_generic_string_pointer);
+
+ {
+ ltt_buf = ltt_get_channel_from_index(trace, index);
+ slot_size = 0;
+ buffer = ltt_reserve_slot(trace, ltt_buf,
+ reserve_size, &slot_size, &tsc);
+ if (!buffer)
+ goto end; /* buffer full */
+
+ *to_base = *to = *len = 0;
+
+ buffer = ltt_write_event_header(trace, ltt_buf, buffer,
+ ltt_facility_user_generic_B1865E44, event_user_generic_string_pointer,
+ reserve_size, tsc);
+ *from = (const char*)lttng_param_string;
+ lttng_write_string_user_generic_string_pointer_string(buffer, to_base, to, from, len, lttng_param_string);
+
+ /* Flush pending memcpy */
+ if (*len != 0) {
+ memcpy(buffer+*to_base+*to, *from, *len);
+ *to += *len;
+ *len = 0;
+ }
+
+ *from = (const char*)<tng_param_pointer;
+ align = sizeof(const void *);
+
+ if (*len == 0) {
+ *to += ltt_align(*to, align); /* align output */
+ } else {
+ *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
+ }
+
+ *len += sizeof(const void *);
+
+ /* Flush pending memcpy */
+ if (*len != 0) {
+ memcpy(buffer+*to_base+*to, *from, *len);
+ *to += *len;
+ *len = 0;
+ }
+
+ ltt_commit_slot(ltt_buf, buffer, slot_size);
+
+}
+
+end:
+ trace->nesting--;
+}
+#endif //LTT_TRACE
+#endif //LTT_TRACE_FAST
+
+/* Event slow_printf structures */
+static inline void lttng_write_string_user_generic_slow_printf_string(
+ char *buffer,
+ size_t *to_base,
+ size_t *to,
+ const char **from,
+ size_t *len,
+ const char * obj)
+{
+ size_t size;
+ size_t align;
+
+ /* Flush pending memcpy */
+ if (*len != 0) {
+ if (buffer != NULL)
+ memcpy(buffer+*to_base+*to, *from, *len);
+ }
+ *to += *len;
+ *len = 0;
+
+ align = sizeof(char);
+
+ if (*len == 0) {
+ *to += ltt_align(*to, align); /* align output */
+ } else {
+ *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
+ }
+
+ /* Contains variable sized fields : must explode the structure */
+
+ size = strlen(obj) + 1; /* Include final NULL char. */
+ if (buffer != NULL)
+ memcpy(buffer+*to_base+*to, obj, size);
+ *to += size;
+
+ /* Realign the *to_base on arch size, set *to to 0 */
+ *to += ltt_align(*to, sizeof(void *));
+ *to_base = *to_base+*to;
+ *to = 0;
+
+ /* Put source *from just after the C string */
+ *from += size;
+}
+
+
+/* Event slow_printf logging function */
+static inline int trace_user_generic_slow_printf_param_buffer(
+ char *buffer,
+ size_t reserve_size);
+
+#ifndef LTT_TRACE_FAST
+static inline int trace_user_generic_slow_printf_param_buffer(
+ char *buffer,
+ size_t reserve_size)
+#ifndef LTT_TRACE
+{
+}
+#else
+{
+ int ret = 0;
+ {
+ ret = ltt_trace_generic(ltt_facility_user_generic_B1865E44, event_user_generic_slow_printf, buffer, reserve_size, LTT_BLOCKING, 0);
+ }
+
+ return ret;
+
+}
+#endif //LTT_TRACE
+#endif //!LTT_TRACE_FAST
+
+#ifdef LTT_TRACE_FAST
+static inline int trace_user_generic_slow_printf(
+ const char * lttng_param_string)
+#ifndef LTT_TRACE
+{
+}
+#else
+{
+ unsigned int index;
+ struct ltt_trace_info *trace = thread_trace_info;
+ struct ltt_buf *ltt_buf;
+ char *buffer = NULL;
+ size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
+ size_t *to_base = &real_to_base;
+ size_t real_to = 0;
+ size_t *to = &real_to;
+ size_t real_len = 0;
+ size_t *len = &real_len;
+ size_t reserve_size;
+ size_t slot_size;
+ const char *real_from;
+ const char **from = &real_from;
+ uint64_t tsc;
+ if (!trace) {
+ ltt_thread_init();
+ trace = thread_trace_info;
+ }
+
+
+ /* For each field, calculate the field size. */
+ /* size = *to_base + *to + *len */
+ /* Assume that the padding for alignment starts at a
+ * sizeof(void *) address. */
+
+ *from = (const char*)lttng_param_string;
+ lttng_write_string_user_generic_slow_printf_string(buffer, to_base, to, from, len, lttng_param_string);
+
+ reserve_size = *to_base + *to + *len;
+ trace->nesting++;
+ index = ltt_get_index_from_facility(ltt_facility_user_generic_B1865E44,
+ event_user_generic_slow_printf);
+
+ {
+ ltt_buf = ltt_get_channel_from_index(trace, index);
+ slot_size = 0;
+ buffer = ltt_reserve_slot(trace, ltt_buf,
+ reserve_size, &slot_size, &tsc);
+ if (!buffer)
+ goto end; /* buffer full */
+
+ *to_base = *to = *len = 0;
+
+ buffer = ltt_write_event_header(trace, ltt_buf, buffer,
+ ltt_facility_user_generic_B1865E44, event_user_generic_slow_printf,
+ reserve_size, tsc);
+ *from = (const char*)lttng_param_string;
+ lttng_write_string_user_generic_slow_printf_string(buffer, to_base, to, from, len, lttng_param_string);
+
+ /* Flush pending memcpy */
+ if (*len != 0) {
+ memcpy(buffer+*to_base+*to, *from, *len);
+ *to += *len;
+ *len = 0;
+ }
+
+ ltt_commit_slot(ltt_buf, buffer, slot_size);
+
+}
+
+end:
+ trace->nesting--;
+}
+#endif //LTT_TRACE
+#endif //LTT_TRACE_FAST
+
+/* Event function_entry structures */
+
+/* Event function_entry logging function */
+static inline __attribute__((no_instrument_function)) int trace_user_generic_function_entry(
+ const void * lttng_param_this_fn,
+ const void * lttng_param_call_site);
+
+#ifndef LTT_TRACE_FAST
+static inline int trace_user_generic_function_entry(
+ const void * lttng_param_this_fn,
+ const void * lttng_param_call_site)
+#ifndef LTT_TRACE
+{
+}
+#else
+{
+ int ret = 0;
+ char *buffer = NULL;
+ size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
+ size_t *to_base = &real_to_base;
+ size_t real_to = 0;
+ size_t *to = &real_to;
+ size_t real_len = 0;
+ size_t *len = &real_len;
+ size_t reserve_size;
+ size_t slot_size;
+ size_t align;
+ const char *real_from;
+ const char **from = &real_from;
+ /* For each field, calculate the field size. */
+ /* size = *to_base + *to + *len */
+ /* Assume that the padding for alignment starts at a
+ * sizeof(void *) address. */
+
+ *from = (const char*)<tng_param_this_fn;
+ align = sizeof(const void *);
+
+ if (*len == 0) {
+ *to += ltt_align(*to, align); /* align output */
+ } else {
+ *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
+ }
+
+ *len += sizeof(const void *);
+
+ *from = (const char*)<tng_param_call_site;
+ align = sizeof(const void *);
+
+ if (*len == 0) {
+ *to += ltt_align(*to, align); /* align output */
+ } else {
+ *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
+ }
+
+ *len += sizeof(const void *);
+
+ reserve_size = *to_base + *to + *len;
+ {
+ char stack_buffer[reserve_size];
+ buffer = stack_buffer;
+
+ *to_base = *to = *len = 0;
+
+ *from = (const char*)<tng_param_this_fn;
+ align = sizeof(const void *);
+
+ if (*len == 0) {
+ *to += ltt_align(*to, align); /* align output */
+ } else {
+ *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
+ }
+
+ *len += sizeof(const void *);
+
+ /* Flush pending memcpy */
+ if (*len != 0) {
+ memcpy(buffer+*to_base+*to, *from, *len);
+ *to += *len;
+ *len = 0;
+ }
+
+ *from = (const char*)<tng_param_call_site;
+ align = sizeof(const void *);
+
+ if (*len == 0) {
+ *to += ltt_align(*to, align); /* align output */
+ } else {
+ *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
+ }
+
+ *len += sizeof(const void *);
+
+ /* Flush pending memcpy */
+ if (*len != 0) {
+ memcpy(buffer+*to_base+*to, *from, *len);
+ *to += *len;
+ *len = 0;
+ }
+
+ ret = ltt_trace_generic(ltt_facility_user_generic_B1865E44, event_user_generic_function_entry, buffer, reserve_size, LTT_BLOCKING, 0);
+ }
+
+ return ret;
+
+}
+#endif //LTT_TRACE
+#endif //!LTT_TRACE_FAST
+
+#ifdef LTT_TRACE_FAST
+static inline __attribute__((no_instrument_function)) int trace_user_generic_function_entry(
+ const void * lttng_param_this_fn,
+ const void * lttng_param_call_site)
+#ifndef LTT_TRACE
+{
+}
+#else
+{
+ unsigned int index;
+ struct ltt_trace_info *trace = thread_trace_info;
+ struct ltt_buf *ltt_buf;
+ char *buffer = NULL;
+ size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
+ size_t *to_base = &real_to_base;
+ size_t real_to = 0;
+ size_t *to = &real_to;
+ size_t real_len = 0;
+ size_t *len = &real_len;
+ size_t reserve_size;
+ size_t slot_size;
+ size_t align;
+ const char *real_from;
+ const char **from = &real_from;
+ uint64_t tsc;
+ if (!trace) {
+ ltt_thread_init();
+ trace = thread_trace_info;
+ }
+
+
+ /* For each field, calculate the field size. */
+ /* size = *to_base + *to + *len */
+ /* Assume that the padding for alignment starts at a
+ * sizeof(void *) address. */
+
+ *from = (const char*)<tng_param_this_fn;
+ align = sizeof(const void *);
+
+ if (*len == 0) {
+ *to += ltt_align(*to, align); /* align output */
+ } else {
+ *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
+ }
+
+ *len += sizeof(const void *);
+
+ *from = (const char*)<tng_param_call_site;
+ align = sizeof(const void *);
+
+ if (*len == 0) {
+ *to += ltt_align(*to, align); /* align output */
+ } else {
+ *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
+ }
+
+ *len += sizeof(const void *);
+
+ reserve_size = *to_base + *to + *len;
+ trace->nesting++;
+ index = ltt_get_index_from_facility(ltt_facility_user_generic_B1865E44,
+ event_user_generic_function_entry);
+
+ {
+ ltt_buf = ltt_get_channel_from_index(trace, index);
+ slot_size = 0;
+ buffer = ltt_reserve_slot(trace, ltt_buf,
+ reserve_size, &slot_size, &tsc);
+ if (!buffer)
+ goto end; /* buffer full */
+
+ *to_base = *to = *len = 0;
+
+ buffer = ltt_write_event_header(trace, ltt_buf, buffer,
+ ltt_facility_user_generic_B1865E44, event_user_generic_function_entry,
+ reserve_size, tsc);
+ *from = (const char*)<tng_param_this_fn;
+ align = sizeof(const void *);
+
+ if (*len == 0) {
+ *to += ltt_align(*to, align); /* align output */
+ } else {
+ *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
+ }
+
+ *len += sizeof(const void *);
+
+ /* Flush pending memcpy */
+ if (*len != 0) {
+ memcpy(buffer+*to_base+*to, *from, *len);
+ *to += *len;
+ *len = 0;
+ }
+
+ *from = (const char*)<tng_param_call_site;
+ align = sizeof(const void *);
+
+ if (*len == 0) {
+ *to += ltt_align(*to, align); /* align output */
+ } else {
+ *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
+ }
+
+ *len += sizeof(const void *);
+
+ /* Flush pending memcpy */
+ if (*len != 0) {
+ memcpy(buffer+*to_base+*to, *from, *len);
+ *to += *len;
+ *len = 0;
+ }
+
+ ltt_commit_slot(ltt_buf, buffer, slot_size);
+
+}
+
+end:
+ trace->nesting--;
+}
+#endif //LTT_TRACE
+#endif //LTT_TRACE_FAST
+
+/* Event function_exit structures */
+
+/* Event function_exit logging function */
+static inline __attribute__((no_instrument_function)) int trace_user_generic_function_exit(
+ const void * lttng_param_this_fn,
+ const void * lttng_param_call_site);
+
+#ifndef LTT_TRACE_FAST
+static inline int trace_user_generic_function_exit(
+ const void * lttng_param_this_fn,
+ const void * lttng_param_call_site)
+#ifndef LTT_TRACE
+{
+}
+#else
+{
+ int ret = 0;
+ char *buffer = NULL;
+ size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
+ size_t *to_base = &real_to_base;
+ size_t real_to = 0;
+ size_t *to = &real_to;
+ size_t real_len = 0;
+ size_t *len = &real_len;
+ size_t reserve_size;
+ size_t slot_size;
+ size_t align;
+ const char *real_from;
+ const char **from = &real_from;
+ /* For each field, calculate the field size. */
+ /* size = *to_base + *to + *len */
+ /* Assume that the padding for alignment starts at a
+ * sizeof(void *) address. */
+
+ *from = (const char*)<tng_param_this_fn;
+ align = sizeof(const void *);
+
+ if (*len == 0) {
+ *to += ltt_align(*to, align); /* align output */
+ } else {
+ *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
+ }
+
+ *len += sizeof(const void *);
+
+ *from = (const char*)<tng_param_call_site;
+ align = sizeof(const void *);
+
+ if (*len == 0) {
+ *to += ltt_align(*to, align); /* align output */
+ } else {
+ *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
+ }
+
+ *len += sizeof(const void *);
+
+ reserve_size = *to_base + *to + *len;
+ {
+ char stack_buffer[reserve_size];
+ buffer = stack_buffer;
+
+ *to_base = *to = *len = 0;
+
+ *from = (const char*)<tng_param_this_fn;
+ align = sizeof(const void *);
+
+ if (*len == 0) {
+ *to += ltt_align(*to, align); /* align output */
+ } else {
+ *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
+ }
+
+ *len += sizeof(const void *);
+
+ /* Flush pending memcpy */
+ if (*len != 0) {
+ memcpy(buffer+*to_base+*to, *from, *len);
+ *to += *len;
+ *len = 0;
+ }
+
+ *from = (const char*)<tng_param_call_site;
+ align = sizeof(const void *);
+
+ if (*len == 0) {
+ *to += ltt_align(*to, align); /* align output */
+ } else {
+ *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
+ }
+
+ *len += sizeof(const void *);
+
+ /* Flush pending memcpy */
+ if (*len != 0) {
+ memcpy(buffer+*to_base+*to, *from, *len);
+ *to += *len;
+ *len = 0;
+ }
+
+ ret = ltt_trace_generic(ltt_facility_user_generic_B1865E44, event_user_generic_function_exit, buffer, reserve_size, LTT_BLOCKING, 0);
+ }
+
+ return ret;
+
+}
+#endif //LTT_TRACE
+#endif //!LTT_TRACE_FAST
+
+#ifdef LTT_TRACE_FAST
+static inline __attribute__((no_instrument_function)) int trace_user_generic_function_exit(
+ const void * lttng_param_this_fn,
+ const void * lttng_param_call_site)
+#ifndef LTT_TRACE
+{
+}
+#else
+{
+ unsigned int index;
+ struct ltt_trace_info *trace = thread_trace_info;
+ struct ltt_buf *ltt_buf;
+ char *buffer = NULL;
+ size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
+ size_t *to_base = &real_to_base;
+ size_t real_to = 0;
+ size_t *to = &real_to;
+ size_t real_len = 0;
+ size_t *len = &real_len;
+ size_t reserve_size;
+ size_t slot_size;
+ size_t align;
+ const char *real_from;
+ const char **from = &real_from;
+ uint64_t tsc;
+ if (!trace) {
+ ltt_thread_init();
+ trace = thread_trace_info;
+ }
+
+
+ /* For each field, calculate the field size. */
+ /* size = *to_base + *to + *len */
+ /* Assume that the padding for alignment starts at a
+ * sizeof(void *) address. */
+
+ *from = (const char*)<tng_param_this_fn;
+ align = sizeof(const void *);
+
+ if (*len == 0) {
+ *to += ltt_align(*to, align); /* align output */
+ } else {
+ *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
+ }
+
+ *len += sizeof(const void *);
+
+ *from = (const char*)<tng_param_call_site;
+ align = sizeof(const void *);
+
+ if (*len == 0) {
+ *to += ltt_align(*to, align); /* align output */
+ } else {
+ *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
+ }
+
+ *len += sizeof(const void *);
+
+ reserve_size = *to_base + *to + *len;
+ trace->nesting++;
+ index = ltt_get_index_from_facility(ltt_facility_user_generic_B1865E44,
+ event_user_generic_function_exit);
+
+ {
+ ltt_buf = ltt_get_channel_from_index(trace, index);
+ slot_size = 0;
+ buffer = ltt_reserve_slot(trace, ltt_buf,
+ reserve_size, &slot_size, &tsc);
+ if (!buffer)
+ goto end; /* buffer full */
+
+ *to_base = *to = *len = 0;
+
+ buffer = ltt_write_event_header(trace, ltt_buf, buffer,
+ ltt_facility_user_generic_B1865E44, event_user_generic_function_exit,
+ reserve_size, tsc);
+ *from = (const char*)<tng_param_this_fn;
+ align = sizeof(const void *);
+
+ if (*len == 0) {
+ *to += ltt_align(*to, align); /* align output */
+ } else {
+ *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
+ }
+
+ *len += sizeof(const void *);
+
+ /* Flush pending memcpy */
+ if (*len != 0) {
+ memcpy(buffer+*to_base+*to, *from, *len);
+ *to += *len;
+ *len = 0;
+ }
+
+ *from = (const char*)<tng_param_call_site;
+ align = sizeof(const void *);
+
+ if (*len == 0) {
+ *to += ltt_align(*to, align); /* align output */
+ } else {
+ *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
+ }
+
+ *len += sizeof(const void *);
+
+ /* Flush pending memcpy */
+ if (*len != 0) {
+ memcpy(buffer+*to_base+*to, *from, *len);
+ *to += *len;
+ *len = 0;
+ }
+
+ ltt_commit_slot(ltt_buf, buffer, slot_size);
+
+}
+
+end:
+ trace->nesting--;
+}
+#endif //LTT_TRACE
+#endif //LTT_TRACE_FAST
+
+/* Event thread_brand structures */
+static inline void lttng_write_string_user_generic_thread_brand_name(
+ char *buffer,
+ size_t *to_base,
+ size_t *to,
+ const char **from,
+ size_t *len,
+ const char * obj)
+{
+ size_t size;
+ size_t align;
+
+ /* Flush pending memcpy */
+ if (*len != 0) {
+ if (buffer != NULL)
+ memcpy(buffer+*to_base+*to, *from, *len);
+ }
+ *to += *len;
+ *len = 0;
+
+ align = sizeof(char);
+
+ if (*len == 0) {
+ *to += ltt_align(*to, align); /* align output */
+ } else {
+ *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
+ }
+
+ /* Contains variable sized fields : must explode the structure */
+
+ size = strlen(obj) + 1; /* Include final NULL char. */
+ if (buffer != NULL)
+ memcpy(buffer+*to_base+*to, obj, size);
+ *to += size;
+
+ /* Realign the *to_base on arch size, set *to to 0 */
+ *to += ltt_align(*to, sizeof(void *));
+ *to_base = *to_base+*to;
+ *to = 0;
+
+ /* Put source *from just after the C string */
+ *from += size;
+}
+
+
+/* Event thread_brand logging function */
+static inline int trace_user_generic_thread_brand(
+ const char * lttng_param_name);
+
+#ifndef LTT_TRACE_FAST
+static inline int trace_user_generic_thread_brand(
+ const char * lttng_param_name)
+#ifndef LTT_TRACE
+{
+}
+#else
+{
+ int ret = 0;
+ char *buffer = NULL;
+ size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
+ size_t *to_base = &real_to_base;
+ size_t real_to = 0;
+ size_t *to = &real_to;
+ size_t real_len = 0;
+ size_t *len = &real_len;
+ size_t reserve_size;
+ size_t slot_size;
+ const char *real_from;
+ const char **from = &real_from;
+ /* For each field, calculate the field size. */
+ /* size = *to_base + *to + *len */
+ /* Assume that the padding for alignment starts at a
+ * sizeof(void *) address. */
+
+ *from = (const char*)lttng_param_name;
+ lttng_write_string_user_generic_thread_brand_name(buffer, to_base, to, from, len, lttng_param_name);
+
+ reserve_size = *to_base + *to + *len;
+ {
+ char stack_buffer[reserve_size];
+ buffer = stack_buffer;
+
+ *to_base = *to = *len = 0;
+
+ *from = (const char*)lttng_param_name;
+ lttng_write_string_user_generic_thread_brand_name(buffer, to_base, to, from, len, lttng_param_name);
+
+ /* Flush pending memcpy */
+ if (*len != 0) {
+ memcpy(buffer+*to_base+*to, *from, *len);
+ *to += *len;
+ *len = 0;
+ }
+
+ ret = ltt_trace_generic(ltt_facility_user_generic_B1865E44, event_user_generic_thread_brand, buffer, reserve_size, LTT_BLOCKING, 1);
+ }
+
+ return ret;
+
+}
+#endif //LTT_TRACE
+#endif //!LTT_TRACE_FAST
+
+#ifdef LTT_TRACE_FAST
+static inline int trace_user_generic_thread_brand(
+ const char * lttng_param_name)
+#ifndef LTT_TRACE
+{
+}
+#else
+{
+ unsigned int index;
+ struct ltt_trace_info *trace = thread_trace_info;
+ struct ltt_buf *ltt_buf;
+ char *buffer = NULL;
+ size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
+ size_t *to_base = &real_to_base;
+ size_t real_to = 0;
+ size_t *to = &real_to;
+ size_t real_len = 0;
+ size_t *len = &real_len;
+ size_t reserve_size;
+ size_t slot_size;
+ const char *real_from;
+ const char **from = &real_from;
+ uint64_t tsc;
+ if (!trace) {
+ ltt_thread_init();
+ trace = thread_trace_info;
+ }
+
+
+ /* For each field, calculate the field size. */
+ /* size = *to_base + *to + *len */
+ /* Assume that the padding for alignment starts at a
+ * sizeof(void *) address. */
+
+ *from = (const char*)lttng_param_name;
+ lttng_write_string_user_generic_thread_brand_name(buffer, to_base, to, from, len, lttng_param_name);
+
+ reserve_size = *to_base + *to + *len;
+ trace->nesting++;
+ index = ltt_get_index_from_facility(ltt_facility_user_generic_B1865E44,
+ event_user_generic_thread_brand);
+
+ {
+ ltt_buf = ltt_get_channel_from_index(trace, index);
+ slot_size = 0;
+ buffer = ltt_reserve_slot(trace, ltt_buf,
+ reserve_size, &slot_size, &tsc);
+ if (!buffer)
+ goto end; /* buffer full */
+
+ *to_base = *to = *len = 0;
+
+ buffer = ltt_write_event_header(trace, ltt_buf, buffer,
+ ltt_facility_user_generic_B1865E44, event_user_generic_thread_brand,
+ reserve_size, tsc);
+ *from = (const char*)lttng_param_name;
+ lttng_write_string_user_generic_thread_brand_name(buffer, to_base, to, from, len, lttng_param_name);
+
+ /* Flush pending memcpy */
+ if (*len != 0) {
+ memcpy(buffer+*to_base+*to, *from, *len);
+ *to += *len;
+ *len = 0;
+ }
+
+ ltt_commit_slot(ltt_buf, buffer, slot_size);
+
+}
+
+end:
+ trace->nesting--;
+}
+#endif //LTT_TRACE
+#endif //LTT_TRACE_FAST
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif //_LTT_FACILITY_USER_GENERIC_H_
--- /dev/null
+
+/* LTTng user-space "fast" tracing header
+ *
+ * Copyright 2006 Mathieu Desnoyers
+ *
+ */
+
+#ifndef _LTT_USERTRACE_FAST_H
+#define _LTT_USERTRACE_FAST_H
+
+#ifdef LTT_TRACE
+#ifdef LTT_TRACE_FAST
+
+#include <errno.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <syscall.h>
+#include <semaphore.h>
+#include <signal.h>
+
+#include <ltt/ltt-facility-id-user_generic.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef LTT_N_SUBBUFS
+#define LTT_N_SUBBUFS 2
+#endif //LTT_N_SUBBUFS
+
+#ifndef LTT_SUBBUF_SIZE_PROCESS
+#define LTT_SUBBUF_SIZE_PROCESS 1048576
+#endif //LTT_BUF_SIZE_CPU
+
+#define LTT_BUF_SIZE_PROCESS (LTT_SUBBUF_SIZE_PROCESS * LTT_N_SUBBUFS)
+
+#ifndef LTT_USERTRACE_ROOT
+#define LTT_USERTRACE_ROOT "/tmp/ltt-usertrace"
+#endif //LTT_USERTRACE_ROOT
+
+
+/* Buffer offset macros */
+
+#define BUFFER_OFFSET(offset, buf) (offset & (buf->alloc_size-1))
+#define SUBBUF_OFFSET(offset, buf) (offset & (buf->subbuf_size-1))
+#define SUBBUF_ALIGN(offset, buf) \
+ (((offset) + buf->subbuf_size) & (~(buf->subbuf_size-1)))
+#define SUBBUF_TRUNC(offset, buf) \
+ ((offset) & (~(buf->subbuf_size-1)))
+#define SUBBUF_INDEX(offset, buf) \
+ (BUFFER_OFFSET(offset,buf)/buf->subbuf_size)
+
+
+#define LTT_TRACER_MAGIC_NUMBER 0x00D6B7ED
+#define LTT_TRACER_VERSION_MAJOR 0
+#define LTT_TRACER_VERSION_MINOR 8
+
+#ifndef atomic_cmpxchg
+#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
+#endif //atomic_cmpxchg
+
+struct ltt_trace_header {
+ uint32_t magic_number;
+ uint32_t arch_type;
+ uint32_t arch_variant;
+ uint32_t float_word_order; /* Only useful for user space traces */
+ uint8_t arch_size;
+ //uint32_t system_type;
+ uint8_t major_version;
+ uint8_t minor_version;
+ uint8_t flight_recorder;
+ uint8_t has_heartbeat;
+ uint8_t has_alignment; /* Event header alignment */
+ uint8_t tsc_lsb_truncate;
+ uint8_t tscbits;
+ uint32_t freq_scale;
+ uint64_t start_freq;
+ uint64_t start_tsc;
+ uint64_t start_monotonic;
+ uint64_t start_time_sec;
+ uint64_t start_time_usec;
+} __attribute((packed));
+
+
+struct ltt_block_start_header {
+ struct {
+ uint64_t cycle_count;
+ uint64_t freq; /* khz */
+ } begin;
+ struct {
+ uint64_t cycle_count;
+ uint64_t freq; /* khz */
+ } end;
+ uint32_t lost_size; /* Size unused at the end of the buffer */
+ uint32_t buf_size; /* The size of this sub-buffer */
+ struct ltt_trace_header trace;
+} __attribute((packed));
+
+
+
+struct ltt_buf {
+ void *start;
+ atomic_t offset;
+ atomic_t consumed;
+ atomic_t reserve_count[LTT_N_SUBBUFS];
+ atomic_t commit_count[LTT_N_SUBBUFS];
+
+ atomic_t events_lost;
+ atomic_t corrupted_subbuffers;
+ sem_t writer_sem; /* semaphore on which the writer waits */
+ unsigned int alloc_size;
+ unsigned int subbuf_size;
+};
+
+struct ltt_trace_info {
+ int init;
+ int filter;
+ pid_t daemon_id;
+ int nesting;
+ struct {
+ struct ltt_buf process;
+ char process_buf[LTT_BUF_SIZE_PROCESS] __attribute__ ((aligned (8)));
+ } channel;
+};
+
+
+struct ltt_event_header_nohb {
+ uint64_t timestamp;
+ unsigned char facility_id;
+ unsigned char event_id;
+ uint16_t event_size;
+} __attribute((packed));
+
+extern __thread struct ltt_trace_info *thread_trace_info;
+
+void ltt_thread_init(void);
+
+void __attribute__((no_instrument_function))
+ ltt_usertrace_fast_buffer_switch(void);
+
+/* Get the offset of the channel in the ltt_trace_struct */
+#define GET_CHANNEL_INDEX(chan) \
+ (unsigned int)&((struct ltt_trace_info*)NULL)->channel.chan
+
+/* ltt_get_index_from_facility
+ *
+ * Get channel index from facility and event id.
+ *
+ * @fID : facility ID
+ * @eID : event number
+ *
+ * Get the channel index into which events must be written for the given
+ * facility and event number. We get this structure offset as soon as possible
+ * and remember it so we pass through this logic only once per trace call (not
+ * for every trace).
+ */
+static inline unsigned int __attribute__((no_instrument_function))
+ ltt_get_index_from_facility(uint8_t fID,
+ uint8_t eID)
+{
+ return GET_CHANNEL_INDEX(process);
+}
+
+
+static inline struct ltt_buf * __attribute__((no_instrument_function))
+ ltt_get_channel_from_index(
+ struct ltt_trace_info *trace, unsigned int index)
+{
+ return (struct ltt_buf *)((void*)trace+index);
+}
+
+
+/*
+ * ltt_get_header_size
+ *
+ * Calculate alignment offset for arch size void*. This is the
+ * alignment offset of the event header.
+ *
+ * Important note :
+ * The event header must be a size multiple of the void* size. This is necessary
+ * to be able to calculate statically the alignment offset of the variable
+ * length data fields that follows. The total offset calculated here :
+ *
+ * Alignment of header struct on arch size
+ * + sizeof(header struct)
+ * + padding added to end of struct to align on arch size.
+ * */
+static inline unsigned char __attribute__((no_instrument_function))
+ ltt_get_header_size(struct ltt_trace_info *trace,
+ void *address,
+ size_t data_size,
+ size_t *before_hdr_pad)
+{
+ unsigned int padding;
+ unsigned int header;
+ size_t after_hdr_pad;
+
+ header = sizeof(struct ltt_event_header_nohb);
+
+ /* Padding before the header. Calculated dynamically */
+ *before_hdr_pad = ltt_align((unsigned long)address, header);
+ padding = *before_hdr_pad;
+
+ /* Padding after header, considering header aligned on ltt_align.
+ * Calculated statically if header size if known. */
+ after_hdr_pad = ltt_align(header, sizeof(void*));
+ padding += after_hdr_pad;
+
+ return header+padding;
+}
+
+
+/* ltt_write_event_header
+ *
+ * Writes the event header to the pointer.
+ *
+ * @channel : pointer to the channel structure
+ * @ptr : buffer pointer
+ * @fID : facility ID
+ * @eID : event ID
+ * @event_size : size of the event, excluding the event header.
+ * @tsc : time stamp counter.
+ */
+static inline char *__attribute__((no_instrument_function))
+ ltt_write_event_header(
+ struct ltt_trace_info *trace, struct ltt_buf *buf,
+ void *ptr, uint8_t fID, uint32_t eID, size_t event_size,
+ uint64_t tsc)
+{
+ size_t after_hdr_pad;
+ struct ltt_event_header_nohb *nohb;
+
+ event_size = min(event_size, 0xFFFFU);
+ nohb = (struct ltt_event_header_nohb *)(ptr);
+ nohb->timestamp = (uint64_t)tsc;
+ nohb->facility_id = fID;
+ nohb->event_id = eID;
+ nohb->event_size = (uint16_t)event_size;
+ after_hdr_pad = ltt_align(sizeof(*nohb), sizeof(void*));
+ return ptr + sizeof(*nohb) + after_hdr_pad;
+}
+
+
+
+static inline uint64_t __attribute__((no_instrument_function))
+ltt_get_timestamp()
+{
+ return get_cycles();
+}
+
+static inline unsigned int __attribute__((no_instrument_function))
+ltt_subbuf_header_len(struct ltt_buf *buf)
+{
+ return sizeof(struct ltt_block_start_header);
+}
+
+
+
+static inline void __attribute__((no_instrument_function))
+ltt_write_trace_header(struct ltt_trace_header *header)
+{
+ header->magic_number = LTT_TRACER_MAGIC_NUMBER;
+ header->major_version = LTT_TRACER_VERSION_MAJOR;
+ header->minor_version = LTT_TRACER_VERSION_MINOR;
+ header->float_word_order = 0; //FIXME
+ header->arch_type = 0; //FIXME LTT_ARCH_TYPE;
+ header->arch_size = sizeof(void*);
+ header->arch_variant = 0; //FIXME LTT_ARCH_VARIANT;
+ header->flight_recorder = 0;
+ header->has_heartbeat = 0;
+ header->tsc_lsb_truncate = 0;
+ header->tscbits = 0;
+
+#ifndef LTT_PACK
+ header->has_alignment = sizeof(void*);
+#else
+ header->has_alignment = 0;
+#endif
+
+ //FIXME
+ header->freq_scale = 0;
+ header->start_freq = 0;
+ header->start_tsc = 0;
+ header->start_monotonic = 0;
+ header->start_time_sec = 0;
+ header->start_time_usec = 0;
+}
+
+
+static inline void __attribute__((no_instrument_function))
+ltt_buffer_begin_callback(struct ltt_buf *buf,
+ uint64_t tsc, unsigned int subbuf_idx)
+{
+ struct ltt_block_start_header *header =
+ (struct ltt_block_start_header*)
+ (buf->start + (subbuf_idx*buf->subbuf_size));
+
+ header->begin.cycle_count = tsc;
+ header->begin.freq = 0; //ltt_frequency();
+
+ header->lost_size = 0xFFFFFFFF; // for debugging...
+
+ header->buf_size = buf->subbuf_size;
+
+ ltt_write_trace_header(&header->trace);
+
+}
+
+
+
+static inline void __attribute__((no_instrument_function))
+ltt_buffer_end_callback(struct ltt_buf *buf,
+ uint64_t tsc, unsigned int offset, unsigned int subbuf_idx)
+{
+ struct ltt_block_start_header *header =
+ (struct ltt_block_start_header*)
+ (buf->start + (subbuf_idx*buf->subbuf_size));
+ /* offset is assumed to never be 0 here : never deliver a completely
+ * empty subbuffer. */
+ /* The lost size is between 0 and subbuf_size-1 */
+ header->lost_size = SUBBUF_OFFSET((buf->subbuf_size - offset),
+ buf);
+ header->end.cycle_count = tsc;
+ header->end.freq = 0; //ltt_frequency();
+}
+
+
+static inline void __attribute__((no_instrument_function))
+ltt_deliver_callback(struct ltt_buf *buf,
+ unsigned subbuf_idx,
+ void *subbuf)
+{
+ ltt_usertrace_fast_buffer_switch();
+}
+
+
+/* ltt_reserve_slot
+ *
+ * Atomic slot reservation in a LTTng buffer. It will take care of
+ * sub-buffer switching.
+ *
+ * Parameters:
+ *
+ * @trace : the trace structure to log to.
+ * @buf : the buffer to reserve space into.
+ * @data_size : size of the variable length data to log.
+ * @slot_size : pointer to total size of the slot (out)
+ * @tsc : pointer to the tsc at the slot reservation (out)
+ * @before_hdr_pad : dynamic padding before the event header.
+ * @after_hdr_pad : dynamic padding after the event header.
+ *
+ * Return : NULL if not enough space, else returns the pointer
+ * to the beginning of the reserved slot. */
+static inline void * __attribute__((no_instrument_function)) ltt_reserve_slot(
+ struct ltt_trace_info *trace,
+ struct ltt_buf *ltt_buf,
+ unsigned int data_size,
+ size_t *slot_size,
+ uint64_t *tsc)
+{
+ int offset_begin, offset_end, offset_old;
+ //int has_switch;
+ int begin_switch, end_switch_current, end_switch_old;
+ int reserve_commit_diff = 0;
+ unsigned int size;
+ size_t before_hdr_pad;
+ int consumed_old, consumed_new;
+ int commit_count, reserve_count;
+ int ret;
+ sigset_t oldset, set;
+
+ do {
+ offset_old = atomic_read(<t_buf->offset);
+ offset_begin = offset_old;
+ //has_switch = 0;
+ begin_switch = 0;
+ end_switch_current = 0;
+ end_switch_old = 0;
+ *tsc = ltt_get_timestamp();
+ if(*tsc == 0) {
+ /* Error in getting the timestamp, event lost */
+ atomic_inc(<t_buf->events_lost);
+ return NULL;
+ }
+
+ if(SUBBUF_OFFSET(offset_begin, ltt_buf) == 0) {
+ begin_switch = 1; /* For offset_begin */
+ } else {
+ size = ltt_get_header_size(trace, ltt_buf->start + offset_begin,
+ data_size, &before_hdr_pad)
+ + data_size;
+
+ if((SUBBUF_OFFSET(offset_begin, ltt_buf)+size)>ltt_buf->subbuf_size) {
+ //has_switch = 1;
+ end_switch_old = 1; /* For offset_old */
+ begin_switch = 1; /* For offset_begin */
+ }
+ }
+
+ if(begin_switch) {
+ if(end_switch_old) {
+ offset_begin = SUBBUF_ALIGN(offset_begin, ltt_buf);
+ }
+ offset_begin = offset_begin + ltt_subbuf_header_len(ltt_buf);
+ /* Test new buffer integrity */
+ reserve_commit_diff =
+ atomic_read(<t_buf->reserve_count[SUBBUF_INDEX(offset_begin,
+ ltt_buf)])
+ - atomic_read(<t_buf->commit_count[SUBBUF_INDEX(offset_begin,
+ ltt_buf)]);
+
+ if(reserve_commit_diff == 0) {
+ /* Next buffer not corrupted. */
+ //if((SUBBUF_TRUNC(offset_begin, ltt_buf)
+ // - SUBBUF_TRUNC(atomic_read(<t_buf->consumed), ltt_buf))
+ // >= ltt_buf->alloc_size) {
+ {
+ /* sem_wait is not signal safe. Disable signals around it.
+ * Signals are kept disabled to make sure we win the cmpxchg. */
+ /* Disable signals */
+ ret = sigfillset(&set);
+ if(ret) perror("LTT Error in sigfillset\n");
+
+ ret = pthread_sigmask(SIG_BLOCK, &set, &oldset);
+ if(ret) perror("LTT Error in pthread_sigmask\n");
+
+ /* We detect if a signal came between
+ * the offset read and signal disabling:
+ * if it is the case, then we restart
+ * the loop after reenabling signals. It
+ * means that it's a signal that has
+ * won the buffer switch.*/
+ if(offset_old != atomic_read(<t_buf->offset)) {
+ ret = pthread_sigmask(SIG_SETMASK, &oldset, NULL);
+ if(ret) perror("LTT Error in pthread_sigmask\n");
+ continue;
+ }
+ /* If the offset is still the same, then
+ * we can safely proceed to do the
+ * buffer switch without being
+ * interrupted by a signal. */
+ sem_wait(<t_buf->writer_sem);
+
+ }
+ /* go on with the write */
+
+ //} else {
+ // /* next buffer not corrupted, we are either in overwrite mode or
+ // * the buffer is not full. It's safe to write in this new subbuffer.*/
+ //}
+ } else {
+ /* Next subbuffer corrupted. Force pushing reader even in normal
+ * mode. It's safe to write in this new subbuffer. */
+ /* No sem_post is required because we fall through without doing a
+ * sem_wait. */
+ }
+ size = ltt_get_header_size(trace, ltt_buf->start + offset_begin,
+ data_size, &before_hdr_pad) + data_size;
+ if((SUBBUF_OFFSET(offset_begin,ltt_buf)+size)>ltt_buf->subbuf_size) {
+ /* Event too big for subbuffers, report error, don't complete
+ * the sub-buffer switch. */
+ atomic_inc(<t_buf->events_lost);
+ if(reserve_commit_diff == 0) {
+ ret = pthread_sigmask(SIG_SETMASK, &oldset, NULL);
+ if(ret) perror("LTT Error in pthread_sigmask\n");
+ }
+ return NULL;
+ } else {
+ /* We just made a successful buffer switch and the event fits in the
+ * new subbuffer. Let's write. */
+ }
+ } else {
+ /* Event fits in the current buffer and we are not on a switch boundary.
+ * It's safe to write */
+ }
+ offset_end = offset_begin + size;
+
+ if((SUBBUF_OFFSET(offset_end, ltt_buf)) == 0) {
+ /* The offset_end will fall at the very beginning of the next subbuffer.
+ */
+ end_switch_current = 1; /* For offset_begin */
+ }
+
+ } while(atomic_cmpxchg(<t_buf->offset, offset_old, offset_end)
+ != offset_old);
+
+ /* Push the reader if necessary */
+ do {
+ consumed_old = atomic_read(<t_buf->consumed);
+ /* If buffer is in overwrite mode, push the reader consumed count if
+ the write position has reached it and we are not at the first
+ iteration (don't push the reader farther than the writer).
+ This operation can be done concurrently by many writers in the
+ same buffer, the writer being at the fartest write position sub-buffer
+ index in the buffer being the one which will win this loop. */
+ /* If the buffer is not in overwrite mode, pushing the reader only
+ happen if a sub-buffer is corrupted */
+ if((SUBBUF_TRUNC(offset_end-1, ltt_buf)
+ - SUBBUF_TRUNC(consumed_old, ltt_buf))
+ >= ltt_buf->alloc_size)
+ consumed_new = SUBBUF_ALIGN(consumed_old, ltt_buf);
+ else {
+ consumed_new = consumed_old;
+ break;
+ }
+ } while(atomic_cmpxchg(<t_buf->consumed, consumed_old, consumed_new)
+ != consumed_old);
+
+ if(consumed_old != consumed_new) {
+ /* Reader pushed : we are the winner of the push, we can therefore
+ reequilibrate reserve and commit. Atomic increment of the commit
+ count permits other writers to play around with this variable
+ before us. We keep track of corrupted_subbuffers even in overwrite mode :
+ we never want to write over a non completely committed sub-buffer :
+ possible causes : the buffer size is too low compared to the unordered
+ data input, or there is a writer who died between the reserve and the
+ commit. */
+ if(reserve_commit_diff) {
+ /* We have to alter the sub-buffer commit count : a sub-buffer is
+ corrupted. We do not deliver it. */
+ atomic_add(reserve_commit_diff,
+ <t_buf->commit_count[SUBBUF_INDEX(offset_begin, ltt_buf)]);
+ atomic_inc(<t_buf->corrupted_subbuffers);
+ }
+ }
+
+
+ if(end_switch_old) {
+ /* old subbuffer */
+ /* Concurrency safe because we are the last and only thread to alter this
+ sub-buffer. As long as it is not delivered and read, no other thread can
+ alter the offset, alter the reserve_count or call the
+ client_buffer_end_callback on this sub-buffer.
+ The only remaining threads could be the ones with pending commits. They
+ will have to do the deliver themself.
+ Not concurrency safe in overwrite mode. We detect corrupted subbuffers
+ with commit and reserve counts. We keep a corrupted sub-buffers count
+ and push the readers across these sub-buffers.
+ Not concurrency safe if a writer is stalled in a subbuffer and
+ another writer switches in, finding out it's corrupted. The result will
+ be than the old (uncommited) subbuffer will be declared corrupted, and
+ that the new subbuffer will be declared corrupted too because of the
+ commit count adjustment.
+ Note : offset_old should never be 0 here.*/
+ ltt_buffer_end_callback(ltt_buf, *tsc, offset_old,
+ SUBBUF_INDEX((offset_old-1), ltt_buf));
+ /* Setting this reserve_count will allow the sub-buffer to be delivered by
+ the last committer. */
+ reserve_count =
+ atomic_add_return((SUBBUF_OFFSET((offset_old-1), ltt_buf)+1),
+ <t_buf->reserve_count[SUBBUF_INDEX((offset_old-1), ltt_buf)]);
+ if(reserve_count
+ == atomic_read(<t_buf->commit_count[SUBBUF_INDEX((offset_old-1),
+ ltt_buf)])) {
+ ltt_deliver_callback(ltt_buf, SUBBUF_INDEX((offset_old-1), ltt_buf),
+ NULL);
+ }
+ }
+
+ if(begin_switch) {
+ /* Enable signals : this is what guaranteed that same reserve which did the
+ * sem_wait does in fact win the cmpxchg for the offset. We only call
+ * these system calls on buffer boundaries because of their performance
+ * cost. */
+ if(reserve_commit_diff == 0) {
+ ret = pthread_sigmask(SIG_SETMASK, &oldset, NULL);
+ if(ret) perror("LTT Error in pthread_sigmask\n");
+ }
+ /* New sub-buffer */
+ /* This code can be executed unordered : writers may already have written
+ to the sub-buffer before this code gets executed, caution. */
+ /* The commit makes sure that this code is executed before the deliver
+ of this sub-buffer */
+ ltt_buffer_begin_callback(ltt_buf, *tsc, SUBBUF_INDEX(offset_begin, ltt_buf));
+ commit_count = atomic_add_return(ltt_subbuf_header_len(ltt_buf),
+ <t_buf->commit_count[SUBBUF_INDEX(offset_begin, ltt_buf)]);
+ /* Check if the written buffer has to be delivered */
+ if(commit_count
+ == atomic_read(<t_buf->reserve_count[SUBBUF_INDEX(offset_begin,
+ ltt_buf)])) {
+ ltt_deliver_callback(ltt_buf, SUBBUF_INDEX(offset_begin, ltt_buf), NULL);
+ }
+ }
+
+ if(end_switch_current) {
+ /* current subbuffer */
+ /* Concurrency safe because we are the last and only thread to alter this
+ sub-buffer. As long as it is not delivered and read, no other thread can
+ alter the offset, alter the reserve_count or call the
+ client_buffer_end_callback on this sub-buffer.
+ The only remaining threads could be the ones with pending commits. They
+ will have to do the deliver themself.
+ Not concurrency safe in overwrite mode. We detect corrupted subbuffers
+ with commit and reserve counts. We keep a corrupted sub-buffers count
+ and push the readers across these sub-buffers.
+ Not concurrency safe if a writer is stalled in a subbuffer and
+ another writer switches in, finding out it's corrupted. The result will
+ be than the old (uncommited) subbuffer will be declared corrupted, and
+ that the new subbuffer will be declared corrupted too because of the
+ commit count adjustment. */
+ ltt_buffer_end_callback(ltt_buf, *tsc, offset_end,
+ SUBBUF_INDEX((offset_end-1), ltt_buf));
+ /* Setting this reserve_count will allow the sub-buffer to be delivered by
+ the last committer. */
+ reserve_count =
+ atomic_add_return((SUBBUF_OFFSET((offset_end-1), ltt_buf)+1),
+ <t_buf->reserve_count[SUBBUF_INDEX((offset_end-1), ltt_buf)]);
+ if(reserve_count
+ == atomic_read(<t_buf->commit_count[SUBBUF_INDEX((offset_end-1),
+ ltt_buf)])) {
+ ltt_deliver_callback(ltt_buf, SUBBUF_INDEX((offset_end-1), ltt_buf), NULL);
+ }
+ }
+
+ *slot_size = size;
+
+ //BUG_ON(*slot_size != (data_size + *before_hdr_pad + *after_hdr_pad + *header_size));
+ //BUG_ON(*slot_size != (offset_end - offset_begin));
+
+ return ltt_buf->start + BUFFER_OFFSET(offset_begin, ltt_buf) + before_hdr_pad;
+}
+
+
+/* ltt_commit_slot
+ *
+ * Atomic unordered slot commit. Increments the commit count in the
+ * specified sub-buffer, and delivers it if necessary.
+ *
+ * Parameters:
+ *
+ * @buf : the buffer to commit to.
+ * @reserved : address of the end of the event header.
+ * @slot_size : size of the reserved slot.
+ *
+ */
+static inline void __attribute__((no_instrument_function)) ltt_commit_slot(
+ struct ltt_buf *ltt_buf,
+ void *reserved,
+ unsigned int slot_size)
+{
+ unsigned int offset_end = reserved - ltt_buf->start;
+ int commit_count;
+
+ commit_count = atomic_add_return(slot_size,
+ <t_buf->commit_count[SUBBUF_INDEX(offset_end-1,
+ ltt_buf)]);
+
+ /* Check if all commits have been done */
+ if(commit_count ==
+ atomic_read(<t_buf->reserve_count[SUBBUF_INDEX(offset_end-1, ltt_buf)])) {
+ ltt_deliver_callback(ltt_buf, SUBBUF_INDEX(offset_end-1, ltt_buf), NULL);
+ }
+}
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif //LTT_TRACE_FAST
+#endif //LTT_TRACE
+#endif //_LTT_USERTRACE_FAST_H
--- /dev/null
+/*
+ * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
+ */
+#ifndef __LTT_USERTRACE_PPC_H
+#define __LTT_USERTRACE_PPC_H
+
+#ifdef __powerpc64__
+#include <ltt/atomic-ppc64.h>
+#include <ltt/system-ppc64.h>
+#else
+#include <ltt/ppc_asm-ppc.h>
+#include <ltt/atomic-ppc.h>
+#include <ltt/system-ppc.h>
+#include <ltt/timex-ppc.h>
+#endif
+
+
+#endif /* __LTT_USERTRACE_PPC_H */
--- /dev/null
+/*****************************************************************************
+ * ltt-usertrace.h
+ *
+ * LTT userspace tracing header
+ *
+ * Mathieu Desnoyers, March 2006
+ */
+
+#ifndef _LTT_USERTRACE_H
+#define _LTT_USERTRACE_H
+
+#include <errno.h>
+#include <syscall.h>
+#include <string.h>
+#include <stdint.h>
+#include <sys/types.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#define inline inline __attribute__((always_inline))
+
+#if defined(__powerpc__) || defined(__powerpc64__)
+#ifdef __powerpc64__
+#include <ltt/atomic-ppc64.h>
+#include <ltt/system-ppc64.h>
+#include <asm/timex.h>
+#else
+#include <ltt/ppc_asm-ppc.h>
+#include <ltt/atomic-ppc.h>
+#include <ltt/system-ppc.h>
+#include <ltt/timex-ppc.h>
+#endif
+#elif defined(__x86_64__)
+#include <ltt/kernelutils-x86_64.h>
+#elif defined(__i386__)
+#include <ltt/kernelutils-i386.h>
+#elif defined(__arm__)
+#include <ltt/kernelutils-arm.h>
+#elif defined(__SH4__)
+#include <ltt/kernelutils-sh.h>
+#else
+#error "Unsupported architecture"
+#endif
+
+#ifndef min
+#define min(a,b) ((a)<(b)?(a):(b))
+#endif
+
+#ifdef i386
+#define __NR_ltt_trace_generic 328
+#define __NR_ltt_register_generic 329
+#undef NR_syscalls
+#define NR_syscalls 330
+#endif
+
+#ifdef __x86_64__
+#define __NR_ltt_trace_generic 286
+#define __NR_ltt_register_generic 287
+#undef NR_syscalls
+#define NR_syscalls 288
+#endif
+
+#ifdef __powerpc__
+#define __NR_ltt_trace_generic 309
+#define __NR_ltt_register_generic 310
+#undef NR_syscalls
+#define NR_syscalls 311
+#endif
+
+#ifdef __powerpc64__
+#define __NR_ltt_trace_generic 309
+#define __NR_ltt_register_generic 310
+#undef NR_syscalls
+#define NR_syscalls 311
+#endif
+
+#ifdef __arm__
+#define __NR_ltt_trace_generic 352
+#define __NR_ltt_register_generic 353
+#undef NR_syscalls
+#define NR_syscalls 354
+#endif
+
+//FIXME : setup for MIPS
+
+#ifndef _LIBC
+// Put in bits/syscall.h
+#define SYS_ltt_trace_generic __NR_ltt_trace_generic
+#define SYS_ltt_register_generic __NR_ltt_register_generic
+#endif
+
+#define FACNAME_LEN 32
+
+/* LTT userspace tracing is non blocking by default when buffers are full */
+#ifndef LTT_BLOCKING
+#define LTT_BLOCKING 0
+#endif //LTT_BLOCKING
+
+typedef unsigned int ltt_facility_t;
+
+struct user_facility_info {
+ char name[FACNAME_LEN];
+ uint32_t num_events;
+ uint32_t alignment;
+ uint32_t checksum;
+ uint32_t int_size;
+ uint32_t long_size;
+ uint32_t pointer_size;
+ uint32_t size_t_size;
+};
+#if 0
+static inline __attribute__((no_instrument_function))
+_syscall5(int, ltt_trace_generic, unsigned int, facility_id,
+ unsigned int, event_id, void *, data, size_t, data_size, int, blocking)
+static inline __attribute__((no_instrument_function))
+_syscall2(int, ltt_register_generic, unsigned int *, facility_id,
+ const struct user_facility_info *, info)
+#endif //0
+
+#define ltt_register_generic(...) syscall(__NR_ltt_register_generic, __VA_ARGS__)
+#define ltt_trace_generic(...) syscall(__NR_ltt_trace_generic, __VA_ARGS__)
+
+static inline unsigned int __attribute__((no_instrument_function))
+ ltt_align(size_t align_drift, size_t size_of_type);
+
+#ifndef LTT_PACK
+/* Calculate the offset needed to align the type */
+static inline unsigned int
+ ltt_align(size_t align_drift, size_t size_of_type)
+{
+ size_t alignment = min(sizeof(void*), size_of_type);
+
+ return ((alignment - align_drift) & (alignment-1));
+}
+#define LTT_ALIGN
+#else
+static inline unsigned int ltt_align(size_t align_drift, size_t size_of_type)
+{
+ return 0;
+}
+#define LTT_ALIGN __attribute__((packed))
+#endif //LTT_PACK
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#ifdef LTT_TRACE_FAST
+#include <ltt/ltt-usertrace-fast.h>
+#endif //LTT_TRACE_FAST
+
+#endif //_LTT_USERTRACE_H
--- /dev/null
+/*
+ * include/asm-ppc/ppc_asm.h
+ *
+ * Definitions used by various bits of low-level assembly code on PowerPC.
+ *
+ * Copyright (C) 1995-1999 Gary Thomas, Paul Mackerras, Cort Dougan.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _PPC_ASM_PPC_H
+#define _PPC_ASM_PPC_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Macros for storing registers into and loading registers from
+ * exception frames.
+ */
+#define SAVE_GPR(n, base) stw n,GPR0+4*(n)(base)
+#define SAVE_2GPRS(n, base) SAVE_GPR(n, base); SAVE_GPR(n+1, base)
+#define SAVE_4GPRS(n, base) SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base)
+#define SAVE_8GPRS(n, base) SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base)
+#define SAVE_10GPRS(n, base) SAVE_8GPRS(n, base); SAVE_2GPRS(n+8, base)
+#define REST_GPR(n, base) lwz n,GPR0+4*(n)(base)
+#define REST_2GPRS(n, base) REST_GPR(n, base); REST_GPR(n+1, base)
+#define REST_4GPRS(n, base) REST_2GPRS(n, base); REST_2GPRS(n+2, base)
+#define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base)
+#define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base)
+
+#define SAVE_NVGPRS(base) SAVE_GPR(13, base); SAVE_8GPRS(14, base); \
+ SAVE_10GPRS(22, base)
+#define REST_NVGPRS(base) REST_GPR(13, base); REST_8GPRS(14, base); \
+ REST_10GPRS(22, base)
+
+#define SAVE_FPR(n, base) stfd n,THREAD_FPR0+8*(n)(base)
+#define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base)
+#define SAVE_4FPRS(n, base) SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base)
+#define SAVE_8FPRS(n, base) SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base)
+#define SAVE_16FPRS(n, base) SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base)
+#define SAVE_32FPRS(n, base) SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base)
+#define REST_FPR(n, base) lfd n,THREAD_FPR0+8*(n)(base)
+#define REST_2FPRS(n, base) REST_FPR(n, base); REST_FPR(n+1, base)
+#define REST_4FPRS(n, base) REST_2FPRS(n, base); REST_2FPRS(n+2, base)
+#define REST_8FPRS(n, base) REST_4FPRS(n, base); REST_4FPRS(n+4, base)
+#define REST_16FPRS(n, base) REST_8FPRS(n, base); REST_8FPRS(n+8, base)
+#define REST_32FPRS(n, base) REST_16FPRS(n, base); REST_16FPRS(n+16, base)
+
+#define SAVE_VR(n,b,base) li b,THREAD_VR0+(16*(n)); stvx n,b,base
+#define SAVE_2VR(n,b,base) SAVE_VR(n,b,base); SAVE_VR(n+1,b,base)
+#define SAVE_4VR(n,b,base) SAVE_2VR(n,b,base); SAVE_2VR(n+2,b,base)
+#define SAVE_8VR(n,b,base) SAVE_4VR(n,b,base); SAVE_4VR(n+4,b,base)
+#define SAVE_16VR(n,b,base) SAVE_8VR(n,b,base); SAVE_8VR(n+8,b,base)
+#define SAVE_32VR(n,b,base) SAVE_16VR(n,b,base); SAVE_16VR(n+16,b,base)
+#define REST_VR(n,b,base) li b,THREAD_VR0+(16*(n)); lvx n,b,base
+#define REST_2VR(n,b,base) REST_VR(n,b,base); REST_VR(n+1,b,base)
+#define REST_4VR(n,b,base) REST_2VR(n,b,base); REST_2VR(n+2,b,base)
+#define REST_8VR(n,b,base) REST_4VR(n,b,base); REST_4VR(n+4,b,base)
+#define REST_16VR(n,b,base) REST_8VR(n,b,base); REST_8VR(n+8,b,base)
+#define REST_32VR(n,b,base) REST_16VR(n,b,base); REST_16VR(n+16,b,base)
+
+#define SAVE_EVR(n,s,base) evmergehi s,s,n; stw s,THREAD_EVR0+4*(n)(base)
+#define SAVE_2EVR(n,s,base) SAVE_EVR(n,s,base); SAVE_EVR(n+1,s,base)
+#define SAVE_4EVR(n,s,base) SAVE_2EVR(n,s,base); SAVE_2EVR(n+2,s,base)
+#define SAVE_8EVR(n,s,base) SAVE_4EVR(n,s,base); SAVE_4EVR(n+4,s,base)
+#define SAVE_16EVR(n,s,base) SAVE_8EVR(n,s,base); SAVE_8EVR(n+8,s,base)
+#define SAVE_32EVR(n,s,base) SAVE_16EVR(n,s,base); SAVE_16EVR(n+16,s,base)
+
+#define REST_EVR(n,s,base) lwz s,THREAD_EVR0+4*(n)(base); evmergelo n,s,n
+#define REST_2EVR(n,s,base) REST_EVR(n,s,base); REST_EVR(n+1,s,base)
+#define REST_4EVR(n,s,base) REST_2EVR(n,s,base); REST_2EVR(n+2,s,base)
+#define REST_8EVR(n,s,base) REST_4EVR(n,s,base); REST_4EVR(n+4,s,base)
+#define REST_16EVR(n,s,base) REST_8EVR(n,s,base); REST_8EVR(n+8,s,base)
+#define REST_32EVR(n,s,base) REST_16EVR(n,s,base); REST_16EVR(n+16,s,base)
+
+#ifdef CONFIG_PPC601_SYNC_FIX
+#define SYNC \
+BEGIN_FTR_SECTION \
+ sync; \
+ isync; \
+END_FTR_SECTION_IFSET(CPU_FTR_601)
+#define SYNC_601 \
+BEGIN_FTR_SECTION \
+ sync; \
+END_FTR_SECTION_IFSET(CPU_FTR_601)
+#define ISYNC_601 \
+BEGIN_FTR_SECTION \
+ isync; \
+END_FTR_SECTION_IFSET(CPU_FTR_601)
+#else
+#define SYNC
+#define SYNC_601
+#define ISYNC_601
+#endif
+
+#ifndef CONFIG_SMP
+#define TLBSYNC
+#else /* CONFIG_SMP */
+/* tlbsync is not implemented on 601 */
+#define TLBSYNC \
+BEGIN_FTR_SECTION \
+ tlbsync; \
+ sync; \
+END_FTR_SECTION_IFCLR(CPU_FTR_601)
+#endif
+
+/*
+ * This instruction is not implemented on the PPC 603 or 601; however, on
+ * the 403GCX and 405GP tlbia IS defined and tlbie is not.
+ * All of these instructions exist in the 8xx, they have magical powers,
+ * and they must be used.
+ */
+
+#if !defined(CONFIG_4xx) && !defined(CONFIG_8xx)
+#define tlbia \
+ li r4,1024; \
+ mtctr r4; \
+ lis r4,KERNELBASE@h; \
+0: tlbie r4; \
+ addi r4,r4,0x1000; \
+ bdnz 0b
+#endif
+
+#ifdef CONFIG_BOOKE
+#define tophys(rd,rs) \
+ addis rd,rs,0
+
+#define tovirt(rd,rs) \
+ addis rd,rs,0
+
+#else /* CONFIG_BOOKE */
+/*
+ * On APUS (Amiga PowerPC cpu upgrade board), we don't know the
+ * physical base address of RAM at compile time.
+ */
+#define tophys(rd,rs) \
+0: addis rd,rs,-KERNELBASE@h; \
+ .section ".vtop_fixup","aw"; \
+ .align 1; \
+ .long 0b; \
+ .previous
+
+#define tovirt(rd,rs) \
+0: addis rd,rs,KERNELBASE@h; \
+ .section ".ptov_fixup","aw"; \
+ .align 1; \
+ .long 0b; \
+ .previous
+#endif /* CONFIG_BOOKE */
+
+/*
+ * On 64-bit cpus, we use the rfid instruction instead of rfi, but
+ * we then have to make sure we preserve the top 32 bits except for
+ * the 64-bit mode bit, which we clear.
+ */
+#ifdef CONFIG_PPC64BRIDGE
+#define FIX_SRR1(ra, rb) \
+ mr rb,ra; \
+ mfmsr ra; \
+ clrldi ra,ra,1; /* turn off 64-bit mode */ \
+ rldimi ra,rb,0,32
+#define RFI .long 0x4c000024 /* rfid instruction */
+#define MTMSRD(r) .long (0x7c000164 + ((r) << 21)) /* mtmsrd */
+#define CLR_TOP32(r) rlwinm (r),(r),0,0,31 /* clear top 32 bits */
+
+#else
+#define FIX_SRR1(ra, rb)
+#ifndef CONFIG_40x
+#define RFI rfi
+#else
+#define RFI rfi; b . /* Prevent prefetch past rfi */
+#endif
+#define MTMSRD(r) mtmsr r
+#define CLR_TOP32(r)
+#endif /* CONFIG_PPC64BRIDGE */
+
+#define RFCI .long 0x4c000066 /* rfci instruction */
+#define RFDI .long 0x4c00004e /* rfdi instruction */
+#define RFMCI .long 0x4c00004c /* rfmci instruction */
+
+#ifdef CONFIG_IBM405_ERR77
+#define PPC405_ERR77(ra,rb) dcbt ra, rb;
+#define PPC405_ERR77_SYNC sync;
+#else
+#define PPC405_ERR77(ra,rb)
+#define PPC405_ERR77_SYNC
+#endif
+
+/* The boring bits... */
+
+/* Condition Register Bit Fields */
+
+#define cr0 0
+#define cr1 1
+#define cr2 2
+#define cr3 3
+#define cr4 4
+#define cr5 5
+#define cr6 6
+#define cr7 7
+
+
+/* General Purpose Registers (GPRs) */
+
+#define r0 0
+#define r1 1
+#define r2 2
+#define r3 3
+#define r4 4
+#define r5 5
+#define r6 6
+#define r7 7
+#define r8 8
+#define r9 9
+#define r10 10
+#define r11 11
+#define r12 12
+#define r13 13
+#define r14 14
+#define r15 15
+#define r16 16
+#define r17 17
+#define r18 18
+#define r19 19
+#define r20 20
+#define r21 21
+#define r22 22
+#define r23 23
+#define r24 24
+#define r25 25
+#define r26 26
+#define r27 27
+#define r28 28
+#define r29 29
+#define r30 30
+#define r31 31
+
+
+/* Floating Point Registers (FPRs) */
+
+#define fr0 0
+#define fr1 1
+#define fr2 2
+#define fr3 3
+#define fr4 4
+#define fr5 5
+#define fr6 6
+#define fr7 7
+#define fr8 8
+#define fr9 9
+#define fr10 10
+#define fr11 11
+#define fr12 12
+#define fr13 13
+#define fr14 14
+#define fr15 15
+#define fr16 16
+#define fr17 17
+#define fr18 18
+#define fr19 19
+#define fr20 20
+#define fr21 21
+#define fr22 22
+#define fr23 23
+#define fr24 24
+#define fr25 25
+#define fr26 26
+#define fr27 27
+#define fr28 28
+#define fr29 29
+#define fr30 30
+#define fr31 31
+
+#define vr0 0
+#define vr1 1
+#define vr2 2
+#define vr3 3
+#define vr4 4
+#define vr5 5
+#define vr6 6
+#define vr7 7
+#define vr8 8
+#define vr9 9
+#define vr10 10
+#define vr11 11
+#define vr12 12
+#define vr13 13
+#define vr14 14
+#define vr15 15
+#define vr16 16
+#define vr17 17
+#define vr18 18
+#define vr19 19
+#define vr20 20
+#define vr21 21
+#define vr22 22
+#define vr23 23
+#define vr24 24
+#define vr25 25
+#define vr26 26
+#define vr27 27
+#define vr28 28
+#define vr29 29
+#define vr30 30
+#define vr31 31
+
+#define evr0 0
+#define evr1 1
+#define evr2 2
+#define evr3 3
+#define evr4 4
+#define evr5 5
+#define evr6 6
+#define evr7 7
+#define evr8 8
+#define evr9 9
+#define evr10 10
+#define evr11 11
+#define evr12 12
+#define evr13 13
+#define evr14 14
+#define evr15 15
+#define evr16 16
+#define evr17 17
+#define evr18 18
+#define evr19 19
+#define evr20 20
+#define evr21 21
+#define evr22 22
+#define evr23 23
+#define evr24 24
+#define evr25 25
+#define evr26 26
+#define evr27 27
+#define evr28 28
+#define evr29 29
+#define evr30 30
+#define evr31 31
+
+/* some stab codes */
+#define N_FUN 36
+#define N_RSYM 64
+#define N_SLINE 68
+#define N_SO 100
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif //_PPC_ASM_PPC_H
--- /dev/null
+/*
+ * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
+ */
+#ifndef __PPC_SYSTEM_H
+#define __PPC_SYSTEM_H
+
+#include <asm/atomic.h>
+#include <asm/hw_irq.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Memory barrier.
+ * The sync instruction guarantees that all memory accesses initiated
+ * by this processor have been performed (with respect to all other
+ * mechanisms that access memory). The eieio instruction is a barrier
+ * providing an ordering (separately) for (a) cacheable stores and (b)
+ * loads and stores to non-cacheable memory (e.g. I/O devices).
+ *
+ * mb() prevents loads and stores being reordered across this point.
+ * rmb() prevents loads being reordered across this point.
+ * wmb() prevents stores being reordered across this point.
+ * read_barrier_depends() prevents data-dependent loads being reordered
+ * across this point (nop on PPC).
+ *
+ * We can use the eieio instruction for wmb, but since it doesn't
+ * give any ordering guarantees about loads, we have to use the
+ * stronger but slower sync instruction for mb and rmb.
+ */
+#define mb() __asm__ __volatile__ ("sync" : : : "memory")
+#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
+#define wmb() __asm__ __volatile__ ("eieio" : : : "memory")
+#define read_barrier_depends() do { } while(0)
+
+#define set_mb(var, value) do { var = value; mb(); } while (0)
+#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+
+#ifdef CONFIG_SMP
+#define smp_mb() mb()
+#define smp_rmb() rmb()
+#define smp_wmb() wmb()
+#define smp_read_barrier_depends() read_barrier_depends()
+#else
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#define smp_read_barrier_depends() do { } while(0)
+#endif /* CONFIG_SMP */
+
+static inline unsigned long
+xchg_u32(volatile void *p, unsigned long val)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__ ("\n\
+1: lwarx %0,0,%2 \n"
+ PPC405_ERR77(0,%2)
+" stwcx. %3,0,%2 \n\
+ bne- 1b"
+ : "=&r" (prev), "=m" (*(volatile unsigned long *)p)
+ : "r" (p), "r" (val), "m" (*(volatile unsigned long *)p)
+ : "cc", "memory");
+
+ return prev;
+}
+
+/*
+ * This function doesn't exist, so you'll get a linker error
+ * if something tries to do an invalid xchg().
+ */
+extern void __xchg_called_with_bad_pointer(void);
+
+#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+#define tas(ptr) (xchg((ptr),1))
+
+static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
+{
+ switch (size) {
+ case 4:
+ return (unsigned long) xchg_u32(ptr, x);
+#if 0 /* xchg_u64 doesn't exist on 32-bit PPC */
+ case 8:
+ return (unsigned long) xchg_u64(ptr, x);
+#endif /* 0 */
+ }
+ __xchg_called_with_bad_pointer();
+ return x;
+
+
+}
+
+extern inline void * xchg_ptr(void * m, void * val)
+{
+ return (void *) xchg_u32(m, (unsigned long) val);
+}
+
+
+#define __HAVE_ARCH_CMPXCHG 1
+
+static inline unsigned long
+__cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new)
+{
+ unsigned int prev;
+
+ __asm__ __volatile__ ("\n\
+1: lwarx %0,0,%2 \n\
+ cmpw 0,%0,%3 \n\
+ bne 2f \n"
+ PPC405_ERR77(0,%2)
+" stwcx. %4,0,%2 \n\
+ bne- 1b\n"
+#if 0 //only using one CPU at a time (LTT) // def CONFIG_SMP
+" sync\n"
+#endif /* CONFIG_SMP */
+"2:"
+ : "=&r" (prev), "=m" (*p)
+ : "r" (p), "r" (old), "r" (new), "m" (*p)
+ : "cc", "memory");
+
+ return prev;
+}
+
+/* This function doesn't exist, so you'll get a linker error
+ if something tries to do an invalid cmpxchg(). */
+extern void __cmpxchg_called_with_bad_pointer(void);
+
+static inline unsigned long
+__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
+{
+ switch (size) {
+ case 4:
+ return __cmpxchg_u32(ptr, old, new);
+#if 0 /* we don't have __cmpxchg_u64 on 32-bit PPC */
+ case 8:
+ return __cmpxchg_u64(ptr, old, new);
+#endif /* 0 */
+ }
+ __cmpxchg_called_with_bad_pointer();
+ return old;
+}
+
+#define cmpxchg(ptr,o,n) \
+ ({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
+ (unsigned long)_n_, sizeof(*(ptr))); \
+ })
+
+#define arch_align_stack(x) (x)
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* __PPC_SYSTEM_H */
--- /dev/null
+#ifndef __PPC64_SYSTEM_H
+#define __PPC64_SYSTEM_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+//#include <linux/config.h>
+//#include <linux/compiler.h>
+#include <asm/page.h>
+#include <asm/processor.h>
+#include <asm/hw_irq.h>
+#include <asm/memory.h>
+
+/*
+ * Memory barrier.
+ * The sync instruction guarantees that all memory accesses initiated
+ * by this processor have been performed (with respect to all other
+ * mechanisms that access memory). The eieio instruction is a barrier
+ * providing an ordering (separately) for (a) cacheable stores and (b)
+ * loads and stores to non-cacheable memory (e.g. I/O devices).
+ *
+ * mb() prevents loads and stores being reordered across this point.
+ * rmb() prevents loads being reordered across this point.
+ * wmb() prevents stores being reordered across this point.
+ * read_barrier_depends() prevents data-dependent loads being reordered
+ * across this point (nop on PPC).
+ *
+ * We have to use the sync instructions for mb(), since lwsync doesn't
+ * order loads with respect to previous stores. Lwsync is fine for
+ * rmb(), though.
+ * For wmb(), we use sync since wmb is used in drivers to order
+ * stores to system memory with respect to writes to the device.
+ * However, smp_wmb() can be a lighter-weight eieio barrier on
+ * SMP since it is only used to order updates to system memory.
+ */
+#define mb() __asm__ __volatile__ ("sync" : : : "memory")
+#define rmb() __asm__ __volatile__ ("lwsync" : : : "memory")
+#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
+#define read_barrier_depends() do { } while(0)
+
+#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
+#define set_wmb(var, value) do { var = value; smp_wmb(); } while (0)
+
+#ifdef CONFIG_SMP
+#define smp_mb() mb()
+#define smp_rmb() rmb()
+#define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory")
+#define smp_read_barrier_depends() read_barrier_depends()
+#else
+#define smp_mb() __asm__ __volatile__("": : :"memory")
+#define smp_rmb() __asm__ __volatile__("": : :"memory")
+#define smp_wmb() __asm__ __volatile__("": : :"memory")
+#define smp_read_barrier_depends() do { } while(0)
+#endif /* CONFIG_SMP */
+
+/*
+ * Atomic exchange
+ *
+ * Changes the memory location '*ptr' to be val and returns
+ * the previous value stored there.
+ *
+ * Inline asm pulled from arch/ppc/kernel/misc.S so ppc64
+ * is more like most of the other architectures.
+ */
+static inline unsigned long
+__xchg_u32(volatile int *m, unsigned long val)
+{
+ unsigned long dummy;
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1: lwarx %0,0,%3 # __xchg_u32\n\
+ stwcx. %2,0,%3\n\
+2: bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (dummy), "=m" (*m)
+ : "r" (val), "r" (m)
+ : "cc", "memory");
+
+ return (dummy);
+}
+
+static inline unsigned long
+__xchg_u64(volatile long *m, unsigned long val)
+{
+ unsigned long dummy;
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1: ldarx %0,0,%3 # __xchg_u64\n\
+ stdcx. %2,0,%3\n\
+2: bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (dummy), "=m" (*m)
+ : "r" (val), "r" (m)
+ : "cc", "memory");
+
+ return (dummy);
+}
+
+/*
+ * This function doesn't exist, so you'll get a linker error
+ * if something tries to do an invalid xchg().
+ */
+extern void __xchg_called_with_bad_pointer(void);
+
+static inline unsigned long
+__xchg(volatile void *ptr, unsigned long x, int size)
+{
+ switch (size) {
+ case 4:
+ return __xchg_u32(ptr, x);
+ case 8:
+ return __xchg_u64(ptr, x);
+ }
+ __xchg_called_with_bad_pointer();
+ return x;
+}
+
+#define xchg(ptr,x) \
+ ({ \
+ __typeof__(*(ptr)) _x_ = (x); \
+ (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
+ })
+
+#define tas(ptr) (xchg((ptr),1))
+
+#define __HAVE_ARCH_CMPXCHG 1
+
+static inline unsigned long
+__cmpxchg_u32(volatile int *p, int old, int new)
+{
+ unsigned int prev;
+
+ __asm__ __volatile__ (
+ EIEIO_ON_SMP
+"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
+ cmpw 0,%0,%3\n\
+ bne- 2f\n\
+ stwcx. %4,0,%2\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ "\n\
+2:"
+ : "=&r" (prev), "=m" (*p)
+ : "r" (p), "r" (old), "r" (new), "m" (*p)
+ : "cc", "memory");
+
+ return prev;
+}
+
+static inline unsigned long
+__cmpxchg_u64(volatile long *p, unsigned long old, unsigned long new)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__ (
+ EIEIO_ON_SMP
+"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
+ cmpd 0,%0,%3\n\
+ bne- 2f\n\
+ stdcx. %4,0,%2\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ "\n\
+2:"
+ : "=&r" (prev), "=m" (*p)
+ : "r" (p), "r" (old), "r" (new), "m" (*p)
+ : "cc", "memory");
+
+ return prev;
+}
+
+/* This function doesn't exist, so you'll get a linker error
+ if something tries to do an invalid cmpxchg(). */
+extern void __cmpxchg_called_with_bad_pointer(void);
+
+static inline unsigned long
+__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
+{
+ switch (size) {
+ case 4:
+ return __cmpxchg_u32(ptr, old, new);
+ case 8:
+ return __cmpxchg_u64(ptr, old, new);
+ }
+ __cmpxchg_called_with_bad_pointer();
+ return old;
+}
+
+#define cmpxchg(ptr,o,n) \
+ ({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
+ (unsigned long)_n_, sizeof(*(ptr))); \
+ })
+
+/*
+ * We handle most unaligned accesses in hardware. On the other hand
+ * unaligned DMA can be very expensive on some ppc64 IO chips (it does
+ * powers of 2 writes until it reaches sufficient alignment).
+ *
+ * Based on this we disable the IP header alignment in network drivers.
+ */
+#define NET_IP_ALIGN 0
+
+#define arch_align_stack(x) (x)
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif
--- /dev/null
+#ifndef __TIMEX_PPC_H
+#define __TIMEX_PPC_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define CPU_FTR_601 0x00000100
+
+#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
+
+typedef uint64_t cycles_t;
+
+/* On ppc64 this gets us the whole timebase; on ppc32 just the lower half */
+static inline unsigned long get_tbl(void)
+{
+ unsigned long tbl;
+
+//#if defined(CONFIG_403GCX)
+// asm volatile("mfspr %0, 0x3dd" : "=r" (tbl));
+//#else
+ asm volatile("mftb %0" : "=r" (tbl));
+//#endif
+ return tbl;
+}
+
+static inline unsigned int get_tbu(void)
+{
+ unsigned int tbu;
+
+//#if defined(CONFIG_403GCX)
+// asm volatile("mfspr %0, 0x3dc" : "=r" (tbu));
+//#else
+ asm volatile("mftbu %0" : "=r" (tbu));
+//#endif
+ return tbu;
+}
+
+static inline uint64_t get_tb(void)
+{
+ unsigned int tbhi, tblo, tbhi2;
+
+ do {
+ tbhi = get_tbu();
+ tblo = get_tbl();
+ tbhi2 = get_tbu();
+ } while (tbhi != tbhi2);
+
+ return ((uint64_t)tbhi << 32) | tblo;
+}
+
+static inline cycles_t get_cycles(void)
+{
+ return get_tb();
+}
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif //__TIMEX_PPC_H
--- /dev/null
+
+#include <stdio.h>
+#include <unistd.h>
+
+#define LTT_TRACE
+#define LTT_BLOCKING 1
+#include <ltt/ltt-facility-user_generic.h>
+
+
+int main(int argc, char **argv)
+{
+ printf("Will trace the following string : \"Hello world! Have a nice day.\"\n");
+ printf("every microsecond.\n");
+ printf("Abort with CTRL-C.\n");
+ printf("No file is created with this example : it logs through a kernel\n");
+ printf("system call. See the LTTng lttctl command to start tracing.\n");
+
+ while(1) {
+ trace_user_generic_string("Hello world! Have a nice day.");
+ usleep(1);
+ }
+
+ return 0;
+}
+
--- /dev/null
+
+#include <stdio.h>
+#include <unistd.h>
+
+#define LTT_TRACE
+#define LTT_TRACE_FAST
+#include <ltt/ltt-facility-user_generic.h>
+
+
+int main(int argc, char **argv)
+{
+ printf("Will trace the following string : Running fast! in an infinite loop.\n");
+ printf("Abort with CTRL-C or it will quickly fill up your disk.\n");
+ printf("See the result file in /tmp/ltt-usertrace.\n");
+
+ int i;
+ while(1) {
+ trace_user_generic_string("Running fast!");
+ }
+
+ return 0;
+}
+
--- /dev/null
+
+
+#include <stdio.h>
+#include <unistd.h>
+
+
+
+
+void test_function(void)
+{
+ printf("we are in a test function\n");
+}
+
+
+int main(int argc, char **argv)
+{
+ printf("Abort with CTRL-C.\n");
+ printf("See the result file in /tmp/ltt-usertrace.\n");
+
+
+ while(1) {
+ test_function();
+ sleep(1);
+ }
+
+ return 0;
+}
+
--- /dev/null
+
+#include <stdio.h>
+#include <unistd.h>
+
+#define LTT_TRACE
+#define LTT_BLOCKING 1
+#include <ltt/ltt-facility-user_generic.h>
+#include <ltt/ltt-facility-custom-user_generic.h>
+// Notice the inclusion of ltt-facility-custom-user_generic.h for the
+// slow_printf support
+
+
+int main(int argc, char **argv)
+{
+ printf("Will trace a printf of an incrementing counter.\n");
+ printf("Abort with CTRL-C.\n");
+ printf("No file is created with this example : it logs through a kernel\n");
+ printf("system call. See the LTTng lttctl command to start tracing.\n");
+
+ unsigned int count = 0;
+
+ while(1) {
+ trace_user_generic_slow_printf("in: %s at: %s:%d: Counter value is: %u.",
+ __FILE__, __func__, __LINE__, count);
+ count++;
+ sleep(1);
+ }
+
+ return 0;
+}
+
--- /dev/null
+
+#include <stdio.h>
+#include <unistd.h>
+
+#define LTT_TRACE
+#define LTT_BLOCKING 1
+#include <ltt/ltt-facility-user_generic.h>
+
+
+int main(int argc, char **argv)
+{
+ printf("Will create a branded thread\n");
+ trace_user_generic_thread_brand("Sample_brand");
+
+ sleep(2);
+
+ return 0;
+}
+
--- /dev/null
+
+#include <pthread.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+
+#define LTT_TRACE
+#define LTT_TRACE_FAST
+#include <ltt/ltt-facility-user_generic.h>
+
+
+void *thr1(void *arg)
+{
+ int i;
+ ltt_thread_init(); /* This init is not required : it will be done
+ automatically anyways at the first tracing call site */
+ printf("thread 1, thread id : %lu, pid %lu\n", pthread_self(), getpid());
+
+ for(i=0; i<100000; i++) {
+ trace_user_generic_string("Hello world! Have a nice day.");
+ }
+ pthread_exit((void*)1);
+}
+
+
+/* Example of a _bad_ thread, which still works with the tracing */
+void *thr2(void *arg)
+{
+ int i;
+ /* See ? no init */
+ printf("thread 2, thread id : %lu, pid %lu\n", pthread_self(), getpid());
+
+ for(i=0; i<100000; i++) {
+ trace_user_generic_string("Hello world! Have a nice day.");
+ }
+ /* This thread is a bad citizen : returning like this will cause its cancel
+ * routines not to be executed. This is still detected by the tracer, but only
+ * when the complete process dies. This is not recommended if you create a
+ * huge amount of threads */
+ return ((void*)2);
+}
+
+
+int main()
+{
+ int err;
+ pthread_t tid1, tid2;
+ void *tret;
+
+ printf("Will trace the following string : Hello world! Have a nice day.\n");
+ printf("It will stop automatically.\n");
+ printf("See the result file in /tmp/ltt-usertrace.\n");
+
+ printf("thread main, thread id : %lu, pid %lu\n", pthread_self(), getpid());
+ err = pthread_create(&tid1, NULL, thr1, NULL);
+ if(err!=0) exit(1);
+
+ err = pthread_create(&tid2, NULL, thr2, NULL);
+ if(err!=0) exit(1);
+
+ err = pthread_join(tid1, &tret);
+ if(err!= 0) exit(1);
+
+ err = pthread_join(tid2, &tret);
+ if(err!= 0) exit(1);
+
+ return 0;
+}
--- /dev/null
+
+#include <pthread.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+
+#define LTT_TRACE
+//this one is a non blocking sample (not #define LTT_BLOCKING 1)
+#include <ltt/ltt-facility-user_generic.h>
+
+
+void *thr1(void *arg)
+{
+ printf("thread 1, thread id : %lu, pid %lu\n", pthread_self(), getpid());
+
+ while(1) {
+ trace_user_generic_string("Hello world! Have a nice day.");
+ sleep(2);
+ }
+ pthread_exit((void*)1);
+}
+
+
+/* Example of a _bad_ thread, which still works with the tracing */
+void *thr2(void *arg)
+{
+ printf("thread 2, thread id : %lu, pid %lu\n", pthread_self(), getpid());
+ sleep(1);
+ while(1) {
+ trace_user_generic_string("Hello world! Have a nice day.");
+ sleep(2);
+ }
+ return ((void*)2);
+}
+
+
+int main()
+{
+ int err;
+ pthread_t tid1, tid2;
+ void *tret;
+
+ printf("Will trace the following string : Hello world! Have a nice day.\n");
+ printf("Press CTRL-C to stop.\n");
+ printf("No file is created with this example : it logs through a kernel\n");
+ printf("system call. See the LTTng lttctl command to start tracing.\n\n");
+
+ printf("thread main, thread id : %lu, pid %lu\n", pthread_self(), getpid());
+ err = pthread_create(&tid1, NULL, thr1, NULL);
+ if(err!=0) exit(1);
+
+ err = pthread_create(&tid2, NULL, thr2, NULL);
+ if(err!=0) exit(1);
+
+ err = pthread_join(tid1, &tret);
+ if(err!= 0) exit(1);
+
+ err = pthread_join(tid2, &tret);
+ if(err!= 0) exit(1);
+
+ return 0;
+}
--- /dev/null
+
+#include <stdio.h>
+#include <unistd.h>
+
+#define LTT_TRACE
+#define LTT_BLOCKING 1
+#include <ltt/ltt-facility-user_generic.h>
+
+
+int main(int argc, char **argv)
+{
+ printf("Will trace the following string : \"Hello world! Have a nice day.\"\n");
+ printf("every second.\n");
+ printf("Abort with CTRL-C.\n");
+ printf("No file is created with this example : it logs through a kernel\n");
+ printf("system call. See the LTTng lttctl command to start tracing.\n");
+
+ while(1) {
+ trace_user_generic_string("Hello world! Have a nice day.");
+ sleep(1);
+ }
+
+ return 0;
+}
+
--- /dev/null
+#
+# Spec file for LTT Usertrace
+#
+Summary: Linux Trace Toolkit Userspace Tracing Package
+Name: ltt-usertrace
+Version: 0.13
+License: GPL
+Release: 1
+Group: Applications/Development
+Source: http://ltt.polymtl.ca/packages/%{name}-%{version}.tar.gz
+URL: http://ltt.polymtl.ca
+Packager: Martin Bisson <bissonm@discreet.com>
+BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
+
+# Where do we install the libs
+%ifarch x86_64 ppc64 ppc64iseries ia64
+%define libdir /usr/lib64
+%else
+%define libdir /usr/lib
+%endif
+
+
+%description
+This packages makes it possible to do userspace tracing with the Linux
+Trace Toolkit.
+
+%prep
+%setup -q
+
+%build
+make libs
+
+%install
+rm -rf $RPM_BUILD_ROOT
+mkdir -p $RPM_BUILD_ROOT $RPM_BUILD_ROOT/usr/include $RPM_BUILD_ROOT/%{libdir}
+make INCLUDE_DIR=$RPM_BUILD_ROOT/usr/include LIB_DIR=$RPM_BUILD_ROOT/%{libdir} install
+
+%post
+echo "Running ldconfig (might take a while)"
+ldconfig
+
+%postun
+echo "Running ldconfig (might take a while)"
+ldconfig
+
+%files
+/usr/include/ltt
+/usr/include/ltt/atomic-ppc.h
+/usr/include/ltt/atomic-ppc64.h
+/usr/include/ltt/kernelutils-x86_64.h
+/usr/include/ltt/kernelutils-i386.h
+/usr/include/ltt/ltt-facility-custom-user_generic.h
+/usr/include/ltt/ltt-facility-id-user_generic.h
+/usr/include/ltt/ltt-facility-user_generic.h
+/usr/include/ltt/ltt-usertrace-fast.h
+/usr/include/ltt/ltt-usertrace-ppc.h
+/usr/include/ltt/ltt-usertrace.h
+/usr/include/ltt/ppc_asm-ppc.h
+/usr/include/ltt/system-ppc.h
+/usr/include/ltt/system-ppc64.h
+/usr/include/ltt/timex-ppc.h
+%{libdir}/libltt-instrument-functions.a
+%{libdir}/libltt-instrument-functions.so
+%{libdir}/libltt-instrument-functions.so.0
+%{libdir}/libltt-loader-user_generic.a
+%{libdir}/libltt-loader-user_generic.so
+%{libdir}/libltt-loader-user_generic.so.0
+%{libdir}/libltt-usertrace-fast.a
+%{libdir}/libltt-usertrace-fast.so
+%{libdir}/libltt-usertrace-fast.so.0
+++ /dev/null
-
-CC=gcc
-INCLUDE_DIR?=/usr/include
-LIB_DIR?=/usr/lib
-RANLIB=ranlib
-
-LTT_CFLAGS=-I. -O2 -L. -fPIC
-# note : x86_64 needs -fPIC ? FIXME
-
-#For testing lib ltt-usertrace-fast
-#CFLAGS+=-DLTT_SUBBUF_SIZE_CPU=134217728
-#CFLAGS+=-DLTT_NULL_OUTPUT_TEST
-
-all: libs samples
-
-#SAMPLE PROGRAMS
-
-samples: sample sample-highspeed sample-printf \
- sample-instrument-fct sample-thread-slow sample-thread-fast sample-thread-brand sample-block
-
-sample: sample.c
- $(CC) $(LTT_CFLAGS) $(CFLAGS) -lltt-loader-user_generic -o $@ $^
-sample-block: sample-block.c
- $(CC) $(LTT_CFLAGS) $(CFLAGS) -lltt-loader-user_generic -o $@ $^
-
-sample-thread-brand: sample-thread-brand.c
- $(CC) $(LTT_CFLAGS) $(CFLAGS) -lltt-loader-user_generic -o $@ $^
-
-sample-highspeed: sample-highspeed.c
- $(CC) $(LTT_CFLAGS) $(CFLAGS) -lltt-usertrace-fast -lltt-loader-user_generic -o $@ $^
-
-sample-printf: sample-printf.c
- $(CC) $(LTT_CFLAGS) $(CFLAGS) -lltt-loader-user_generic -o $@ $^
-
-sample-instrument-fct: sample-instrument-fct.c
- $(CC) $(LTT_CFLAGS) $(CFLAGS) -g -finstrument-functions -lltt-instrument-functions -o $@ $^
-
-sample-thread-slow: sample-thread-slow.c
- $(CC) $(LTT_CFLAGS) $(CFLAGS) -lpthread -lltt-loader-user_generic -o $@ $^
-
-sample-thread-fast: sample-thread-fast.c
- $(CC) $(LTT_CFLAGS) $(CFLAGS) -lpthread -lltt-usertrace-fast -lltt-loader-user_generic -o $@ $^
-
-
-#LIBRAIRIES
-
-libs: libltt-instrument-functions.a libltt-instrument-functions.so.0 \
- libltt-usertrace-fast.a libltt-usertrace-fast.so.0 \
- libltt-loader-user_generic.a libltt-loader-user_generic.so.0
-
-libltt-usertrace-fast.a: ltt-usertrace-fast.o
- @rm -f libltt-usertrace-fast.a
- $(AR) rc $@ $^
- $(RANLIB) $@
-
-libltt-usertrace-fast.so.0: ltt-usertrace-fast.o
- @rm -f libltt-usertrace-fast.so libltt-usertrace-fast.so.0
- $(CC) $(LTT_CFLAGS) $(CFLAGS) -lpthread -shared -Wl,-soname,libltt-usertrace-fast.so -o $@ $^
- ln -s libltt-usertrace-fast.so.0 libltt-usertrace-fast.so
-
-libltt-instrument-functions.a: ltt-instrument-functions.o ltt-facility-loader-user_generic.o ltt-usertrace-fast.o
- @rm -f libltt-instrument-functions.a
- $(AR) rc $@ $^
- $(RANLIB) $@
-
-libltt-instrument-functions.so.0: ltt-instrument-functions.o ltt-facility-loader-user_generic.o ltt-usertrace-fast.o
- @rm -f libltt-instrument-functions.so libltt-instrument-functions.so.0
- $(CC) $(LTT_CFLAGS) $(CFLAGS) -lpthread -shared -Wl,-soname,libltt-instrument-functions.so -o $@ $^
- ln -s libltt-instrument-functions.so.0 libltt-instrument-functions.so
-
-libltt-loader-user_generic.a: ltt-facility-loader-user_generic.o
- @rm -f libltt-loader-user_generic.a
- $(AR) rc $@ $^
- $(RANLIB) $@
-
-libltt-loader-user_generic.so.0: ltt-facility-loader-user_generic.o
- @rm -f libltt-loader-user_generic.so libltt-loader-user_generic.so.0
- $(CC) $(LTT_CFLAGS) $(CFLAGS) -lpthread -shared -Wl,-soname,libltt-loader-user_generic.so -o $@ $^
- ln -s libltt-loader-user_generic.so.0 libltt-loader-user_generic.so
-
-%.o: %.c
- $(CC) $(LTT_CFLAGS) $(CFLAGS) -c -o $@ $+
-
-.PHONY : clean install libs install_libs install_headers samples
-
-install_headers:
- if [ ! -e "$(INCLUDE_DIR)/ltt" ] ; then mkdir $(INCLUDE_DIR)/ltt ; fi
- cp -f ltt/*.h $(INCLUDE_DIR)/ltt
-
-install_libs:
- cp -df libltt-instrument-functions.so* libltt-instrument-functions.a $(LIB_DIR)
- cp -df libltt-usertrace-fast.so* libltt-usertrace-fast.a $(LIB_DIR)
- cp -df libltt-loader-user_generic.so* libltt-loader-user_generic.a $(LIB_DIR)
-
-install: install_headers libs install_libs
-
-clean:
- find . -name \*~ | xargs rm -fr *.o sample-thread sample sample-highspeed sample-printf sample-instrument-fct libltt-instrument-functions.so* libltt-instrument-functions.a libltt-usertrace-fast.a libltt-usertrace-fast.so* libltt-loader-user_generic.so* libltt-loader-user_generic.a sample-thread-slow sample-thread-fast sample-thread-brand sample-block java/*.class java/Sample.h java/TestBrand.h
+++ /dev/null
-
-LTTng usertrace package
-
-Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
-March 2006
-
-This package contains all the user space headers and c files necessary to make
-your application and library trace through an active LTTng tracer. Here is a
-short quickstart guide of it.
-
-Here are the currently supported architectures :
-x86
-(please add the ltt_trace_generic and ltt_register_generic system calls to
-other architectures as you need them : it will work magically)
-
-* Compile your kernel with the latest LTTng patch. Make sure the option
- "Allow tracing from userspace" is _active_!
- See the QUICKSTART guide at http://ltt.polymtl.ca/ for details about how to
- setup a working tracer and viewer. See the genevent installation step : it is
- required for method #2 below.
-
-* Extract the latest ltt-usertrace archive :
-su
-cd /usr/src
-wget http://ltt.polymtl.ca/packages/ltt-usertrace-x.x.tar.gz
-gzip -cd ltt-usertrace-x.x.tar.gz | tar xvof -
-
-* Build the sample programs and install the headers and librairies into your
-system :
-(32 bits)
-su
-cd /usr/src/ltt-usertrace
-make clean
-make install (will build and install headers and libraries)
-make
-(64 bits)
-su
-cd /usr/src/ltt-usertrace
-make clean
-LIB_DIR=/usr/lib64 make install CFLAGS=-m64
-make CFLAGS=-m64
-
-Feel free to look at the sample programs and the Makefile : they demonstrate
-very well the features of the usertrace package and how to use them.
-
-* There are three ways to trace information from your application. The choice
- will principally depend on the trace data rate.
-
-1) Easy way, but slow (printf style)
- See sample-printf.c for code example.
-
-- Add the following statements to your program source (the define must come
- _before_ the includes!) :
-
-#define LTT_TRACE
-#define LTT_BLOCKING 1
-#include <ltt/ltt-facility-user_generic.h>
-#include <ltt/ltt-facility-custom-user_generic.h>
-
-Note the define of LTT_BLOCKING to 1 : if a trace buffer is full, your
-application will block. The default of this parameter is 0 (non blocking) :
-events are lost when trace buffer is full. The choice is up to you.
-
-- Add something like the following sample line in your code. Note that this is a
- very standard format string, this is only a suggested presentation.
-
-trace_user_generic_slow_printf("in: %s at: %s:%d: Counter value is: %u.",
- __FILE__, __func__, __LINE__, count);
-
-- Compile your application with at least these parameters to gcc (it is splitted
- on two lines, joined by a "\") :
-gcc -D LTT_SHOW_DEBUG -I /usr/src/usertrace-generic -o myapp myapp.c \
- /usr/src/usertrace-generic/ltt-facility-loader-user_generic.c
-
-To see what the final result looks like :
-- Start tracing
-- Start your application
- ** You should see the following message when your program starts and the
- LTT_SHOW_DEBUG is defined :
- "LTT : ltt-facility-user_generic init in userspace"
- If you don't then you forgot to compile the facility loader in your
- application. If you find this output annoying, you can remove the
- "-D LTT_SHOW_DEBUG" gcc parameter, which will make the facility loader
- silent.
-- Stop tracing
-Then, to see only the user_generic events :
-lttv -m textDump -t /tmp/trace1 -e "event.facility=user_generic"
-
-It will show :
-user_generic.slow_printf: 35885.922829472 (/cpu_0), 15521, 7453, SYSCALL { "in: sample-printf.c at: main:18: Counter value is: 0." }
-user_generic.slow_printf: 35886.925685289 (/cpu_0), 15521, 7453, SYSCALL { "in: sample-printf.c at: main:18: Counter value is: 1." }
-...
-
-
-
-2) The second way to log events is still easy. The advantage is that it
- will make it easier to identify your data in the trace viewer afterward.
- Please read the comments in method 1) explained previously, as they
- are not repeated here.
- See sample.c and sample-thread-slow.c for code example.
-
-- Go to the ltt-usertrace directory
-su
-cd /usr/src/ltt-usertrace
-
-- Create your own facility (i.e. user_myfacility.xml).
- See the ones available in /usr/share/LinuxTraceToolkitViewer/facilities for
- examples.
- You facility _must_ be named following this standard : "user_*", where * is
- whatever you like. If it is not, it will be rejected by the kernel with a
- Operation not permitted (can be seen with the -D LTT_SHOW_DEBUG compilation
- parameter).
-
-user_myfacility.xml:
-
-<?xml version="1.0"?>
-<facility name="user_myfacility">
- <description>Sample facility</description>
- <event name="myevent">
- <description>Sample event</description>
- <field name="file"><string></field>
- <field name="function"><string></field>
- <field name="line"><int></field>
- <field name="firstval"><long></field>
- <field name="secondval"><pointer></field>
- </event>
-</facility>
-
-- AN IMPORTANT STEP FOLLOWS :
- *copy* the user_myfacility.xml file in your system :
-su
-cp user_myfacility.xml /usr/share/LinuxTraceToolkitViewer/facilities
-
-- Use genevent to create the c code and headers :
-su
-cd /tmp
-mkdir genevent
-cd genevent
-for a in /usr/share/LinuxTraceToolkitViewer/facilities/user_*.xml;
- do /usr/local/bin/genevent $a;
-done
-cd /usr/src/usertrace-generic
-cp /tmp/genevent/*load* .
-cd ltt
-cp /tmp/genevent/ltt-facility-id-user_myfacility.h .
-cp /tmp/genevent/ltt-facility-user_myfacility.h .
-cd ..
-make install
-
-- Add the following statements to your program source (the define must come
- _before_ the includes!) :
-
-#define LTT_TRACE
-#define LTT_BLOCKING 1
-#include <ltt/ltt-facility-user_myfacility.h>
-
-- Add a call following the trace_user_myfacility_myevent function found in
- /usr/include/ltt/ltt-facility-user_myfacility.h in your program.
-For instance :
-trace_user_myfacility_myevent(__FILE__, __func__, __LINE__, 1234, (void*)0xF0F0F0F0);
-
-- Compile your application with at least these parameters to gcc (it is splitted
- on two lines, joined by a "\") :
-gcc -I /usr/src/usertrace-generic -o myapp myapp.c \
- /usr/src/usertrace-generic/ltt-facility-loader-user_myfacility.c
-
-To see what the final result looks like :
-- Start tracing
-- Start your application
-- Stop tracing
-Then, to see only the user_myfacility events :
-lttv -m textDump -t /tmp/trace1 -e "event.facility=user_myfacility"
-
-It will show, for example :
-user_myfacility.myevent: 39507.805584526 (/cpu_1), 15829, 15736, SYSCALL { "myapp.c", "main", 8, 1234, 0xf0f0f0f0 }
-
-
-3) The third way to trace information from your application
-
-This method is cleary the _FASTEST_. It is principally I/O (disk and memory)
-bound. It will create a companion process for each of you program's thread which
-will dump the tracing information into /tmp/ltt-usertrace.
-
-See sample-highspeed.c and sample-thread-fast.c for code example.
-
-- Add the following statements to your program source (the define must come
- _before_ the includes!) :
-
-#define LTT_TRACE
-#define LTT_TRACE_FAST
-#include <ltt/ltt-facility-user_myfacility.h>
-
-- Add a call following the trace_user_myfacility_myevent function found in
- /usr/include/ltt/ltt-facility-user_myfacility.h in your program.
-For instance :
-trace_user_myfacility_myevent(__FILE__, __func__, __LINE__, 1234, (void*)0xF0F0F0F0);
-
-- Compile your application with at least these parameters to gcc (it is splitted
- on two lines, joined by a "\") :
-gcc -lltt-usertrace-fast -I /usr/src/usertrace-generic -o myapp myapp.c \
- /usr/src/usertrace-generic/ltt-facility-loader-user_myfacility.c
-
-It requires a supplementary operation when you take the trace :
-- Start tracing (with lttctl)
-- Start your application
-- Let your application run...
-- Stop tracing
-- Move or copy /tmp/ltt-usertrace info your trace.
-i.e., if your trace is in /tmp/trace1 :
-su
-mv /tmp/ltt-usertrace /tmp/trace1
-
-
-Then, to see only the user_myfacility events :
-lttv -m textDump -t /tmp/trace1 -e "event.facility=user_myfacility"
-
-It will show, for example :
-user_myfacility.myevent: 39507.805584526 (/ltt-usertrace/process-26174.26174.39236180500380_0), 15829, 15736, USER_MODE { "myapp.c", "main", 8, 1234, 0xf0f0f0f0 }
-
-
-
-* Fun feature : function instrumentation
-
-Here is how to generate a full trace of you program function calls.
-See the sample-instrument-fct.c example program.
-
-- Compile your application with at least these parameters to gcc (it is splitted
- on two lines, joined by a "\") :
-gcc -g -finstrument-functions \
- -lltt-instrument-functions -o myapp myapp.c
-
-To see what the final result looks like :
-- Start tracing
-- Start your application
-- Stop tracing
-Then, to see only the function_entry and function_exit events :
-lttv -m textDump -t /tmp/trace1 -e "event.facility=user_generic & (event.name=function_entry & event.name=function_exit)"
-
-It will show, for example :
-user_generic.function_entry: 59329.709939111 (/ltt-usertrace/process-26202.0.39949996866578_0), 19250, 18581, USER_MODE { 0x8048454, 0x80484c2 }
-user_generic.function_exit: 59329.709944613 (/ltt-usertrace/process-26202.0.39949996866578_0), 19250, 18581, USER_MODE { 0x8048454, 0x80484c2 }
-
-you can then use (from the binutils package)
-addr2line -e sample-instrument-fct -i -f 0x8048454
-Which shows :
-test_function
-/usr/src/usertrace-generic/sample-instrument-fct.c:12
-
-The lookup in LTTV through libbfd has not been implemented yet.
-
-
-* Instrumentation of a java program
-
-See the java/ directory of this package. You will have to create a C library
-that holds the tracing functions, following the java-instrument-string.c. It has
-to be called from the Java code as shown in Sample.java.
-
-The generate.sh scripts compiles and executes the Java program with the JNI
-tracing library.
-
+++ /dev/null
-// The Sample.java file
-public class Sample
-{
- // Declaration of the Native (C) function
- private static native void trace_java_generic_string(String arg);
- static {
- System.loadLibrary("ltt-java-string");
- }
-
- public static void main(String[] args)
- {
- Sample.trace_java_generic_string("Tracing from java");
- }
-}
+++ /dev/null
-
-import ltt.*;
-
-// The Sample.java file
-public class TestBrand
-{
- public static void main(String[] args)
- {
- ltt.ThreadBrand.trace_java_generic_thread_brand("Brand_test");
- }
-}
+++ /dev/null
-
-package ltt;
-
-// The ThreadBrand.java file
-public class ThreadBrand
-{
- // Declaration of the Native (C) function
- public static native void trace_java_generic_thread_brand(String arg);
- static {
- System.loadLibrary("ltt-java-thread_brand");
- }
-}
+++ /dev/null
-#!/bin/sh
-
-export CLASSPATH=.:/usr/lib/jvm/java-1.5.0-sun-1.5.0.06/bin
-
-#Sample
-javac Sample.java
-javah -jni Sample
-gcc -I /usr/lib/jvm/java-1.5.0-sun-1.5.0.06/include \
- -I /usr/lib/jvm/java-1.5.0-sun-1.5.0.06/include/linux \
- -shared -Wl,-soname,libltt-java-string \
- -o libltt-java-string.so ltt-java-string.c \
- ../ltt-facility-loader-user_generic.c
-LD_LIBRARY_PATH=. java Sample
-
-#TestBrand
-echo javac Threadbrand
-javac -d . ThreadBrand.java
-echo javah Threadbrand
-javah -jni ltt.ThreadBrand
-echo gcc
-gcc -I /usr/lib/jvm/java-1.5.0-sun-1.5.0.06/include \
- -I /usr/lib/jvm/java-1.5.0-sun-1.5.0.06/include/linux \
- -shared -Wl,-soname,libltt-java-thread_brand \
- -o libltt-java-thread_brand.so ltt-java-thread_brand.c \
- ../ltt-facility-loader-user_generic.c
-echo javac test
-javac TestBrand.java
-echo run
-LD_LIBRARY_PATH=. java TestBrand
+++ /dev/null
-
-#include <jni.h>
-#include "Sample.h"
-#include <stdio.h>
-#include <unistd.h>
-
-#define LTT_TRACE
-#define LTT_BLOCKING 1
-#include <ltt/ltt-facility-user_generic.h>
-
-JNIEXPORT void JNICALL Java_Sample_trace_1java_1generic_1string
- (JNIEnv *env, jobject obj, jstring jstr)
-{
- const char *str;
- str = (*env)->GetStringUTFChars(env, jstr, NULL);
- if (str == NULL) return; // out of memory error thrown
- trace_user_generic_string(str);
- (*env)->ReleaseStringUTFChars(env, jstr, str);
-}
-
+++ /dev/null
-
-#include <jni.h>
-#include "Sample.h"
-#include <stdio.h>
-#include <unistd.h>
-
-#define LTT_TRACE
-#define LTT_BLOCKING 1
-#include <ltt/ltt-facility-user_generic.h>
-
-JNIEXPORT void JNICALL Java_ltt_ThreadBrand_trace_1java_1generic_1thread_1brand
- (JNIEnv *env, jclass jc, jstring jstr)
-{
- const char *str;
- str = (*env)->GetStringUTFChars(env, jstr, NULL);
- if (str == NULL) return; // out of memory error thrown
- trace_user_generic_thread_brand(str);
- (*env)->ReleaseStringUTFChars(env, jstr, str);
-}
-
+++ /dev/null
-/*
- * ltt-facility-loader-user_generic.c
- *
- * (C) Copyright 2005 -
- * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
- *
- * Contains the LTT user space facility loader.
- *
- */
-
-
-#define LTT_TRACE
-#include <error.h>
-#include <stdio.h>
-#include <ltt/ltt-usertrace.h>
-#include "ltt-facility-loader-user_generic.h"
-
-static struct user_facility_info facility = {
- .name = LTT_FACILITY_NAME,
- .num_events = LTT_FACILITY_NUM_EVENTS,
-#ifndef LTT_PACK
- .alignment = LTT_FACILITY_ALIGNMENT?sizeof(void*):0,
-#else
- .alignment = 0,
-#endif //LTT_PACK
- .checksum = LTT_FACILITY_CHECKSUM,
- .int_size = sizeof(int),
- .long_size = sizeof(long),
- .pointer_size = sizeof(void*),
- .size_t_size = sizeof(size_t)
-};
-
-static void __attribute__((constructor)) __ltt_user_init(void)
-{
- int err;
-#ifdef LTT_SHOW_DEBUG
- printf("LTT : ltt-facility-user_generic init in userspace\n");
-#endif //LTT_SHOW_DEBUG
-
- err = ltt_register_generic(<T_FACILITY_SYMBOL, &facility);
- LTT_FACILITY_CHECKSUM_SYMBOL = LTT_FACILITY_SYMBOL;
-
- if (err) {
-#ifdef LTT_SHOW_DEBUG
- perror("Error in ltt_register_generic");
-#endif //LTT_SHOW_DEBUG
- }
-}
-
+++ /dev/null
-#ifndef _LTT_FACILITY_LOADER_USER_GENERIC_H_
-#define _LTT_FACILITY_LOADER_USER_GENERIC_H_
-
-#include <ltt/ltt-usertrace.h>
-#include <ltt/ltt-facility-id-user_generic.h>
-
-ltt_facility_t ltt_facility_user_generic;
-ltt_facility_t ltt_facility_user_generic_B1865E44;
-
-#define LTT_FACILITY_SYMBOL ltt_facility_user_generic
-#define LTT_FACILITY_CHECKSUM_SYMBOL ltt_facility_user_generic_B1865E44
-#define LTT_FACILITY_CHECKSUM 0xB1865E44
-#define LTT_FACILITY_NAME "user_generic"
-#define LTT_FACILITY_NUM_EVENTS facility_user_generic_num_events
-
-#define LTT_FACILITY_ALIGNMENT 1
-
-#endif //_LTT_FACILITY_LOADER_USER_GENERIC_H_
+++ /dev/null
-/****************************************************************************
- * ltt-instrument-functions.c
- *
- * Mathieu Desnoyers
- * March 2006
- */
-
-#define inline inline __attribute__((always_inline))
-
-#define LTT_TRACE
-#define LTT_TRACE_FAST
-#include <ltt/ltt-usertrace-fast.h>
-#include <ltt/ltt-facility-user_generic.h>
-
-void __attribute__((no_instrument_function)) __cyg_profile_func_enter (
- void *this_fn,
- void *call_site)
-{
- /* don't care about the return value */
- trace_user_generic_function_entry(this_fn, call_site);
-}
-
-void __attribute__((no_instrument_function)) __cyg_profile_func_exit (
- void *this_fn,
- void *call_site)
-{
- /* don't care about the return value */
- trace_user_generic_function_exit(this_fn, call_site);
-}
-
+++ /dev/null
-/* LTTng user-space "fast" library
- *
- * This daemon is spawned by each traced thread (to share the mmap).
- *
- * Its job is to dump periodically this buffer to disk (when it receives a
- * SIGUSR1 from its parent).
- *
- * It uses the control information in the shared memory area (producer/consumer
- * count).
- *
- * When the parent thread dies (yes, those thing may happen) ;) , this daemon
- * will flush the last buffer and write it to disk.
- *
- * Supplement note for streaming : the daemon is responsible for flushing
- * periodically the buffer if it is streaming data.
- *
- *
- * Notes :
- * shm memory is typically limited to 4096 units (system wide limit SHMMNI in
- * /proc/sys/kernel/shmmni). As it requires computation time upon creation, we
- * do not use it : we will use a shared mmap() instead which is passed through
- * the fork().
- * MAP_SHARED mmap segment. Updated when msync or munmap are called.
- * MAP_ANONYMOUS.
- * Memory mapped by mmap() is preserved across fork(2), with the same
- * attributes.
- *
- * Eventually, there will be two mode :
- * * Slow thread spawn : a fork() is done for each new thread. If the process
- * dies, the data is not lost.
- * * Fast thread spawn : a pthread_create() is done by the application for each
- * new thread.
- *
- * We use a timer to check periodically if the parent died. I think it is less
- * intrusive than a ptrace() on the parent, which would get every signal. The
- * side effect of this is that we won't be notified if the parent does an
- * exec(). In this case, we will just sit there until the parent exits.
- *
- *
- * Copyright 2006 Mathieu Desnoyers
- *
- */
-
-#define inline inline __attribute__((always_inline))
-
-#define _GNU_SOURCE
-#define LTT_TRACE
-#define LTT_TRACE_FAST
-#include <unistd.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <signal.h>
-#include <syscall.h>
-#include <features.h>
-#include <pthread.h>
-#include <malloc.h>
-#include <string.h>
-#include <signal.h>
-#include <fcntl.h>
-#include <stdlib.h>
-#include <errno.h>
-#include <sys/param.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <sys/stat.h>
-#include <sys/mman.h>
-#include <unistd.h>
-#include <sys/syscall.h>
-
-#include <ltt/ltt-usertrace.h>
-
-#define gettid() syscall(__NR_gettid)
-
-#ifdef LTT_SHOW_DEBUG
-#define dbg_printf(...) printf(__VA_ARGS__)
-#else
-#define dbg_printf(...)
-#endif //LTT_SHOW_DEBUG
-
-
-enum force_switch_mode { FORCE_ACTIVE, FORCE_FLUSH };
-
-/* Writer (the traced application) */
-
-__thread struct ltt_trace_info *thread_trace_info = NULL;
-
-void ltt_usertrace_fast_buffer_switch(void)
-{
- struct ltt_trace_info *tmp = thread_trace_info;
- if(tmp)
- kill(tmp->daemon_id, SIGUSR1);
-}
-
-/* The cleanup should never be called from a signal handler */
-static void ltt_usertrace_fast_cleanup(void *arg)
-{
- struct ltt_trace_info *tmp = thread_trace_info;
- if(tmp) {
- thread_trace_info = NULL;
- kill(tmp->daemon_id, SIGUSR2);
- munmap(tmp, sizeof(*tmp));
- }
-}
-
-/* Reader (the disk dumper daemon) */
-
-static pid_t traced_pid = 0;
-static pid_t traced_tid = 0;
-static int parent_exited = 0;
-static int fd_process = -1;
-static char outfile_name[PATH_MAX];
-static char identifier_name[PATH_MAX];
-
-/* signal handling */
-static void handler_sigusr1(int signo)
-{
- dbg_printf("LTT Signal %d received : parent buffer switch.\n", signo);
-}
-
-static void handler_sigusr2(int signo)
-{
- dbg_printf("LTT Signal %d received : parent exited.\n", signo);
- parent_exited = 1;
-}
-
-static void handler_sigalarm(int signo)
-{
- dbg_printf("LTT Signal %d received\n", signo);
-
- if(getppid() != traced_pid) {
- /* Parent died */
- dbg_printf("LTT Parent %lu died, cleaning up\n", traced_pid);
- traced_pid = 0;
- }
- alarm(3);
-}
-
-/* Do a buffer switch. Don't switch if buffer is completely empty */
-static void flush_buffer(struct ltt_buf *ltt_buf, enum force_switch_mode mode)
-{
- uint64_t tsc;
- int offset_begin, offset_end, offset_old;
- int reserve_commit_diff;
- int consumed_old, consumed_new;
- int commit_count, reserve_count;
- int end_switch_old;
-
- do {
- offset_old = atomic_read(<t_buf->offset);
- offset_begin = offset_old;
- end_switch_old = 0;
- tsc = ltt_get_timestamp();
- if(tsc == 0) {
- /* Error in getting the timestamp : should not happen : it would
- * mean we are called from an NMI during a write seqlock on xtime. */
- return;
- }
-
- if(SUBBUF_OFFSET(offset_begin, ltt_buf) != 0) {
- offset_begin = SUBBUF_ALIGN(offset_begin, ltt_buf);
- end_switch_old = 1;
- } else {
- /* we do not have to switch : buffer is empty */
- return;
- }
- if(mode == FORCE_ACTIVE)
- offset_begin += ltt_subbuf_header_len(ltt_buf);
- /* Always begin_switch in FORCE_ACTIVE mode */
-
- /* Test new buffer integrity */
- reserve_commit_diff =
- atomic_read(
- <t_buf->reserve_count[SUBBUF_INDEX(offset_begin, ltt_buf)])
- - atomic_read(
- <t_buf->commit_count[SUBBUF_INDEX(offset_begin, ltt_buf)]);
- if(reserve_commit_diff == 0) {
- /* Next buffer not corrupted. */
- if(mode == FORCE_ACTIVE
- && (offset_begin-atomic_read(<t_buf->consumed))
- >= ltt_buf->alloc_size) {
- /* We do not overwrite non consumed buffers and we are full : ignore
- switch while tracing is active. */
- return;
- }
- } else {
- /* Next subbuffer corrupted. Force pushing reader even in normal mode */
- }
-
- offset_end = offset_begin;
- } while(atomic_cmpxchg(<t_buf->offset, offset_old, offset_end)
- != offset_old);
-
-
- if(mode == FORCE_ACTIVE) {
- /* Push the reader if necessary */
- do {
- consumed_old = atomic_read(<t_buf->consumed);
- /* If buffer is in overwrite mode, push the reader consumed count if
- the write position has reached it and we are not at the first
- iteration (don't push the reader farther than the writer).
- This operation can be done concurrently by many writers in the
- same buffer, the writer being at the fartest write position sub-buffer
- index in the buffer being the one which will win this loop. */
- /* If the buffer is not in overwrite mode, pushing the reader only
- happen if a sub-buffer is corrupted */
- if((SUBBUF_TRUNC(offset_end-1, ltt_buf)
- - SUBBUF_TRUNC(consumed_old, ltt_buf))
- >= ltt_buf->alloc_size)
- consumed_new = SUBBUF_ALIGN(consumed_old, ltt_buf);
- else {
- consumed_new = consumed_old;
- break;
- }
- } while(atomic_cmpxchg(<t_buf->consumed, consumed_old, consumed_new)
- != consumed_old);
-
- if(consumed_old != consumed_new) {
- /* Reader pushed : we are the winner of the push, we can therefore
- reequilibrate reserve and commit. Atomic increment of the commit
- count permits other writers to play around with this variable
- before us. We keep track of corrupted_subbuffers even in overwrite
- mode :
- we never want to write over a non completely committed sub-buffer :
- possible causes : the buffer size is too low compared to the unordered
- data input, or there is a writer who died between the reserve and the
- commit. */
- if(reserve_commit_diff) {
- /* We have to alter the sub-buffer commit count : a sub-buffer is
- corrupted */
- atomic_add(reserve_commit_diff,
- <t_buf->commit_count[SUBBUF_INDEX(offset_begin, ltt_buf)]);
- atomic_inc(<t_buf->corrupted_subbuffers);
- }
- }
- }
-
- /* Always switch */
-
- if(end_switch_old) {
- /* old subbuffer */
- /* Concurrency safe because we are the last and only thread to alter this
- sub-buffer. As long as it is not delivered and read, no other thread can
- alter the offset, alter the reserve_count or call the
- client_buffer_end_callback on this sub-buffer.
- The only remaining threads could be the ones with pending commits. They
- will have to do the deliver themself.
- Not concurrency safe in overwrite mode. We detect corrupted subbuffers with
- commit and reserve counts. We keep a corrupted sub-buffers count and push
- the readers across these sub-buffers.
- Not concurrency safe if a writer is stalled in a subbuffer and
- another writer switches in, finding out it's corrupted. The result will be
- than the old (uncommited) subbuffer will be declared corrupted, and that
- the new subbuffer will be declared corrupted too because of the commit
- count adjustment.
- Offset old should never be 0. */
- ltt_buffer_end_callback(ltt_buf, tsc, offset_old,
- SUBBUF_INDEX((offset_old), ltt_buf));
- /* Setting this reserve_count will allow the sub-buffer to be delivered by
- the last committer. */
- reserve_count = atomic_add_return((SUBBUF_OFFSET((offset_old-1),
- ltt_buf) + 1),
- <t_buf->reserve_count[SUBBUF_INDEX((offset_old),
- ltt_buf)]);
- if(reserve_count == atomic_read(
- <t_buf->commit_count[SUBBUF_INDEX((offset_old), ltt_buf)])) {
- ltt_deliver_callback(ltt_buf, SUBBUF_INDEX((offset_old), ltt_buf), NULL);
- }
- }
-
- if(mode == FORCE_ACTIVE) {
- /* New sub-buffer */
- /* This code can be executed unordered : writers may already have written
- to the sub-buffer before this code gets executed, caution. */
- /* The commit makes sure that this code is executed before the deliver
- of this sub-buffer */
- ltt_buffer_begin_callback(ltt_buf, tsc, SUBBUF_INDEX(offset_begin, ltt_buf));
- commit_count = atomic_add_return(ltt_subbuf_header_len(ltt_buf),
- <t_buf->commit_count[SUBBUF_INDEX(offset_begin, ltt_buf)]);
- /* Check if the written buffer has to be delivered */
- if(commit_count == atomic_read(
- <t_buf->reserve_count[SUBBUF_INDEX(offset_begin, ltt_buf)])) {
- ltt_deliver_callback(ltt_buf, SUBBUF_INDEX(offset_begin, ltt_buf), NULL);
- }
- }
-
-}
-
-
-static int open_output_files(void)
-{
- int ret;
- int fd;
- /* Open output files */
- umask(00000);
- ret = mkdir(LTT_USERTRACE_ROOT, 0777);
- if(ret < 0 && errno != EEXIST) {
- perror("LTT Error in creating output (mkdir)");
- exit(-1);
- }
- ret = chdir(LTT_USERTRACE_ROOT);
- if(ret < 0) {
- perror("LTT Error in creating output (chdir)");
- exit(-1);
- }
- snprintf(identifier_name, PATH_MAX-1, "%lu.%lu.%llu",
- traced_tid, traced_pid, get_cycles());
- snprintf(outfile_name, PATH_MAX-1, "process-%s", identifier_name);
-
-#ifndef LTT_NULL_OUTPUT_TEST
- fd = creat(outfile_name, 0644);
-#else
- /* NULL test */
- ret = symlink("/dev/null", outfile_name);
- if(ret < 0) {
- perror("error in symlink");
- exit(-1);
- }
- fd = open(outfile_name, O_WRONLY);
- if(fd_process < 0) {
- perror("Error in open");
- exit(-1);
- }
-#endif //LTT_NULL_OUTPUT_TEST
- return fd;
-}
-
-static inline int ltt_buffer_get(struct ltt_buf *ltt_buf,
- unsigned int *offset)
-{
- unsigned int consumed_old, consumed_idx;
- consumed_old = atomic_read(<t_buf->consumed);
- consumed_idx = SUBBUF_INDEX(consumed_old, ltt_buf);
-
- if(atomic_read(<t_buf->commit_count[consumed_idx])
- != atomic_read(<t_buf->reserve_count[consumed_idx])) {
- return -EAGAIN;
- }
- if((SUBBUF_TRUNC(atomic_read(<t_buf->offset), ltt_buf)
- -SUBBUF_TRUNC(consumed_old, ltt_buf)) == 0) {
- return -EAGAIN;
- }
-
- *offset = consumed_old;
-
- return 0;
-}
-
-static inline int ltt_buffer_put(struct ltt_buf *ltt_buf,
- unsigned int offset)
-{
- unsigned int consumed_old, consumed_new;
- int ret;
-
- consumed_old = offset;
- consumed_new = SUBBUF_ALIGN(consumed_old, ltt_buf);
- if(atomic_cmpxchg(<t_buf->consumed, consumed_old, consumed_new)
- != consumed_old) {
- /* We have been pushed by the writer : the last buffer read _is_
- * corrupted!
- * It can also happen if this is a buffer we never got. */
- return -EIO;
- } else {
- if(traced_pid == 0 || parent_exited) return 0;
-
- ret = sem_post(<t_buf->writer_sem);
- if(ret < 0) {
- printf("error in sem_post");
- }
- }
- return ret;
-}
-
-static int read_subbuffer(struct ltt_buf *ltt_buf, int fd)
-{
- unsigned int consumed_old;
- int err;
- dbg_printf("LTT read buffer\n");
-
-
- err = ltt_buffer_get(ltt_buf, &consumed_old);
- if(err != 0) {
- if(err != -EAGAIN) dbg_printf("LTT Reserving sub buffer failed\n");
- goto get_error;
- }
- if(fd_process == -1) {
- fd_process = fd = open_output_files();
- }
-
- err = TEMP_FAILURE_RETRY(write(fd,
- ltt_buf->start
- + (consumed_old & ((ltt_buf->alloc_size)-1)),
- ltt_buf->subbuf_size));
-
- if(err < 0) {
- perror("Error in writing to file");
- goto write_error;
- }
-#if 0
- err = fsync(pair->trace);
- if(err < 0) {
- ret = errno;
- perror("Error in writing to file");
- goto write_error;
- }
-#endif //0
-write_error:
- err = ltt_buffer_put(ltt_buf, consumed_old);
-
- if(err != 0) {
- if(err == -EIO) {
- dbg_printf("Reader has been pushed by the writer, last subbuffer corrupted.\n");
- /* FIXME : we may delete the last written buffer if we wish. */
- }
- goto get_error;
- }
-
-get_error:
- return err;
-}
-
-/* This function is called by ltt_rw_init which has signals blocked */
-static void ltt_usertrace_fast_daemon(struct ltt_trace_info *shared_trace_info,
- sigset_t oldset, pid_t l_traced_pid, pthread_t l_traced_tid)
-{
- struct sigaction act;
- int ret;
-
- traced_pid = l_traced_pid;
- traced_tid = l_traced_tid;
-
- dbg_printf("LTT ltt_usertrace_fast_daemon : init is %d, pid is %lu, traced_pid is %lu, traced_tid is %lu\n",
- shared_trace_info->init, getpid(), traced_pid, traced_tid);
-
- act.sa_handler = handler_sigusr1;
- act.sa_flags = 0;
- sigemptyset(&(act.sa_mask));
- sigaddset(&(act.sa_mask), SIGUSR1);
- sigaction(SIGUSR1, &act, NULL);
-
- act.sa_handler = handler_sigusr2;
- act.sa_flags = 0;
- sigemptyset(&(act.sa_mask));
- sigaddset(&(act.sa_mask), SIGUSR2);
- sigaction(SIGUSR2, &act, NULL);
-
- act.sa_handler = handler_sigalarm;
- act.sa_flags = 0;
- sigemptyset(&(act.sa_mask));
- sigaddset(&(act.sa_mask), SIGALRM);
- sigaction(SIGALRM, &act, NULL);
-
- alarm(3);
-
- while(1) {
- ret = sigsuspend(&oldset);
- if(ret != -1) {
- perror("LTT Error in sigsuspend\n");
- }
- if(traced_pid == 0) break; /* parent died */
- if(parent_exited) break;
- dbg_printf("LTT Doing a buffer switch read. pid is : %lu\n", getpid());
-
- do {
- ret = read_subbuffer(&shared_trace_info->channel.process, fd_process);
- } while(ret == 0);
- }
- /* The parent thread is dead and we have finished with the buffer */
-
- /* Buffer force switch (flush). Using FLUSH instead of ACTIVE because we know
- * there is no writer. */
- flush_buffer(&shared_trace_info->channel.process, FORCE_FLUSH);
- do {
- ret = read_subbuffer(&shared_trace_info->channel.process, fd_process);
- } while(ret == 0);
-
- if(fd_process != -1)
- close(fd_process);
-
- ret = sem_destroy(&shared_trace_info->channel.process.writer_sem);
- if(ret < 0) {
- perror("error in sem_destroy");
- }
- munmap(shared_trace_info, sizeof(*shared_trace_info));
-
- exit(0);
-}
-
-
-/* Reader-writer initialization */
-
-static enum ltt_process_role { LTT_ROLE_WRITER, LTT_ROLE_READER }
- role = LTT_ROLE_WRITER;
-
-
-void ltt_rw_init(void)
-{
- pid_t pid;
- struct ltt_trace_info *shared_trace_info;
- int ret;
- sigset_t set, oldset;
- pid_t l_traced_pid = getpid();
- pid_t l_traced_tid = gettid();
-
- /* parent : create the shared memory map */
- shared_trace_info = mmap(0, sizeof(*thread_trace_info),
- PROT_READ|PROT_WRITE, MAP_SHARED|MAP_ANONYMOUS, 0, 0);
- shared_trace_info->init=0;
- shared_trace_info->filter=0;
- shared_trace_info->daemon_id=0;
- shared_trace_info->nesting=0;
- memset(&shared_trace_info->channel.process, 0,
- sizeof(shared_trace_info->channel.process));
- //Need NPTL!
- ret = sem_init(&shared_trace_info->channel.process.writer_sem, 1,
- LTT_N_SUBBUFS);
- if(ret < 0) {
- perror("error in sem_init");
- }
- shared_trace_info->channel.process.alloc_size = LTT_BUF_SIZE_PROCESS;
- shared_trace_info->channel.process.subbuf_size = LTT_SUBBUF_SIZE_PROCESS;
- shared_trace_info->channel.process.start =
- shared_trace_info->channel.process_buf;
- ltt_buffer_begin_callback(&shared_trace_info->channel.process,
- ltt_get_timestamp(), 0);
-
- shared_trace_info->init = 1;
-
- /* Disable signals */
- ret = sigfillset(&set);
- if(ret) {
- dbg_printf("LTT Error in sigfillset\n");
- }
-
- ret = pthread_sigmask(SIG_BLOCK, &set, &oldset);
- if(ret) {
- dbg_printf("LTT Error in pthread_sigmask\n");
- }
-
- pid = fork();
- if(pid > 0) {
- /* Parent */
- shared_trace_info->daemon_id = pid;
- thread_trace_info = shared_trace_info;
-
- /* Enable signals */
- ret = pthread_sigmask(SIG_SETMASK, &oldset, NULL);
- if(ret) {
- dbg_printf("LTT Error in pthread_sigmask\n");
- }
- } else if(pid == 0) {
- pid_t sid;
- /* Child */
- role = LTT_ROLE_READER;
- sid = setsid();
- //Not a good idea to renice, unless futex wait eventually implement
- //priority inheritence.
- //ret = nice(1);
- //if(ret < 0) {
- // perror("Error in nice");
- //}
- if(sid < 0) {
- perror("Error setting sid");
- }
- ltt_usertrace_fast_daemon(shared_trace_info, oldset, l_traced_pid,
- l_traced_tid);
- /* Should never return */
- exit(-1);
- } else if(pid < 0) {
- /* fork error */
- perror("LTT Error in forking ltt-usertrace-fast");
- }
-}
-
-static __thread struct _pthread_cleanup_buffer cleanup_buffer;
-
-void ltt_thread_init(void)
-{
- _pthread_cleanup_push(&cleanup_buffer, ltt_usertrace_fast_cleanup, NULL);
- ltt_rw_init();
-}
-
-void __attribute__((constructor)) __ltt_usertrace_fast_init(void)
-{
- dbg_printf("LTT usertrace-fast init\n");
-
- ltt_rw_init();
-}
-
-void __attribute__((destructor)) __ltt_usertrace_fast_fini(void)
-{
- if(role == LTT_ROLE_WRITER) {
- dbg_printf("LTT usertrace-fast fini\n");
- ltt_usertrace_fast_cleanup(NULL);
- }
-}
-
+++ /dev/null
-/*
- * PowerPC atomic operations
- */
-
-#ifndef _ASM_PPC_ATOMIC_H_
-#define _ASM_PPC_ATOMIC_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-typedef struct { volatile int counter; } atomic_t;
-
-#define ATOMIC_INIT(i) { (i) }
-
-#define atomic_read(v) ((v)->counter)
-#define atomic_set(v,i) (((v)->counter) = (i))
-
-extern void atomic_clear_mask(unsigned long mask, unsigned long *addr);
-
-#if 0 // We only do operation on one CPU at a time (LTT)
-#define SMP_SYNC "sync"
-#define SMP_ISYNC "\n\tisync"
-#else
-#define SMP_SYNC ""
-#define SMP_ISYNC
-#endif
-
-/* Erratum #77 on the 405 means we need a sync or dcbt before every stwcx.
- * The old ATOMIC_SYNC_FIX covered some but not all of this.
- */
-#ifdef CONFIG_IBM405_ERR77
-#define PPC405_ERR77(ra,rb) "dcbt " #ra "," #rb ";"
-#else
-#define PPC405_ERR77(ra,rb)
-#endif
-
-static __inline__ void atomic_add(int a, atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
-"1: lwarx %0,0,%3 # atomic_add\n\
- add %0,%2,%0\n"
- PPC405_ERR77(0,%3)
-" stwcx. %0,0,%3 \n\
- bne- 1b"
- : "=&r" (t), "=m" (v->counter)
- : "r" (a), "r" (&v->counter), "m" (v->counter)
- : "cc");
-}
-
-static __inline__ int atomic_add_return(int a, atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
-"1: lwarx %0,0,%2 # atomic_add_return\n\
- add %0,%1,%0\n"
- PPC405_ERR77(0,%2)
-" stwcx. %0,0,%2 \n\
- bne- 1b"
- SMP_ISYNC
- : "=&r" (t)
- : "r" (a), "r" (&v->counter)
- : "cc", "memory");
-
- return t;
-}
-
-#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-
-static __inline__ void atomic_sub(int a, atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
-"1: lwarx %0,0,%3 # atomic_sub\n\
- subf %0,%2,%0\n"
- PPC405_ERR77(0,%3)
-" stwcx. %0,0,%3 \n\
- bne- 1b"
- : "=&r" (t), "=m" (v->counter)
- : "r" (a), "r" (&v->counter), "m" (v->counter)
- : "cc");
-}
-
-static __inline__ int atomic_sub_return(int a, atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
-"1: lwarx %0,0,%2 # atomic_sub_return\n\
- subf %0,%1,%0\n"
- PPC405_ERR77(0,%2)
-" stwcx. %0,0,%2 \n\
- bne- 1b"
- SMP_ISYNC
- : "=&r" (t)
- : "r" (a), "r" (&v->counter)
- : "cc", "memory");
-
- return t;
-}
-
-static __inline__ void atomic_inc(atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
-"1: lwarx %0,0,%2 # atomic_inc\n\
- addic %0,%0,1\n"
- PPC405_ERR77(0,%2)
-" stwcx. %0,0,%2 \n\
- bne- 1b"
- : "=&r" (t), "=m" (v->counter)
- : "r" (&v->counter), "m" (v->counter)
- : "cc");
-}
-
-static __inline__ int atomic_inc_return(atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
-"1: lwarx %0,0,%1 # atomic_inc_return\n\
- addic %0,%0,1\n"
- PPC405_ERR77(0,%1)
-" stwcx. %0,0,%1 \n\
- bne- 1b"
- SMP_ISYNC
- : "=&r" (t)
- : "r" (&v->counter)
- : "cc", "memory");
-
- return t;
-}
-
-/*
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
-static __inline__ void atomic_dec(atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
-"1: lwarx %0,0,%2 # atomic_dec\n\
- addic %0,%0,-1\n"
- PPC405_ERR77(0,%2)\
-" stwcx. %0,0,%2\n\
- bne- 1b"
- : "=&r" (t), "=m" (v->counter)
- : "r" (&v->counter), "m" (v->counter)
- : "cc");
-}
-
-static __inline__ int atomic_dec_return(atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
-"1: lwarx %0,0,%1 # atomic_dec_return\n\
- addic %0,%0,-1\n"
- PPC405_ERR77(0,%1)
-" stwcx. %0,0,%1\n\
- bne- 1b"
- SMP_ISYNC
- : "=&r" (t)
- : "r" (&v->counter)
- : "cc", "memory");
-
- return t;
-}
-
-#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
-#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
-
-/*
- * Atomically test *v and decrement if it is greater than 0.
- * The function returns the old value of *v minus 1.
- */
-static __inline__ int atomic_dec_if_positive(atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
-"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
- addic. %0,%0,-1\n\
- blt- 2f\n"
- PPC405_ERR77(0,%1)
-" stwcx. %0,0,%1\n\
- bne- 1b"
- SMP_ISYNC
- "\n\
-2:" : "=&r" (t)
- : "r" (&v->counter)
- : "cc", "memory");
-
- return t;
-}
-
-#define __MB __asm__ __volatile__ (SMP_SYNC : : : "memory")
-#define smp_mb__before_atomic_dec() __MB
-#define smp_mb__after_atomic_dec() __MB
-#define smp_mb__before_atomic_inc() __MB
-#define smp_mb__after_atomic_inc() __MB
-
-#ifdef __cplusplus
-} /* end of extern "C" */
-#endif
-
-#endif /* _ASM_PPC_ATOMIC_H_ */
+++ /dev/null
-/*
- * PowerPC64 atomic operations
- *
- * Copyright (C) 2001 Paul Mackerras <paulus@au.ibm.com>, IBM
- * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _ASM_PPC64_ATOMIC_H_
-#define _ASM_PPC64_ATOMIC_H_
-
-#include <asm/memory.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-typedef struct { volatile int counter; } atomic_t;
-
-#define ATOMIC_INIT(i) { (i) }
-
-#define atomic_read(v) ((v)->counter)
-#define atomic_set(v,i) (((v)->counter) = (i))
-
-static __inline__ void atomic_add(int a, atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
-"1: lwarx %0,0,%3 # atomic_add\n\
- add %0,%2,%0\n\
- stwcx. %0,0,%3\n\
- bne- 1b"
- : "=&r" (t), "=m" (v->counter)
- : "r" (a), "r" (&v->counter), "m" (v->counter)
- : "cc");
-}
-
-static __inline__ int atomic_add_return(int a, atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
- EIEIO_ON_SMP
-"1: lwarx %0,0,%2 # atomic_add_return\n\
- add %0,%1,%0\n\
- stwcx. %0,0,%2\n\
- bne- 1b"
- ISYNC_ON_SMP
- : "=&r" (t)
- : "r" (a), "r" (&v->counter)
- : "cc", "memory");
-
- return t;
-}
-
-#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-
-static __inline__ void atomic_sub(int a, atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
-"1: lwarx %0,0,%3 # atomic_sub\n\
- subf %0,%2,%0\n\
- stwcx. %0,0,%3\n\
- bne- 1b"
- : "=&r" (t), "=m" (v->counter)
- : "r" (a), "r" (&v->counter), "m" (v->counter)
- : "cc");
-}
-
-static __inline__ int atomic_sub_return(int a, atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
- EIEIO_ON_SMP
-"1: lwarx %0,0,%2 # atomic_sub_return\n\
- subf %0,%1,%0\n\
- stwcx. %0,0,%2\n\
- bne- 1b"
- ISYNC_ON_SMP
- : "=&r" (t)
- : "r" (a), "r" (&v->counter)
- : "cc", "memory");
-
- return t;
-}
-
-static __inline__ void atomic_inc(atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
-"1: lwarx %0,0,%2 # atomic_inc\n\
- addic %0,%0,1\n\
- stwcx. %0,0,%2\n\
- bne- 1b"
- : "=&r" (t), "=m" (v->counter)
- : "r" (&v->counter), "m" (v->counter)
- : "cc");
-}
-
-static __inline__ int atomic_inc_return(atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
- EIEIO_ON_SMP
-"1: lwarx %0,0,%1 # atomic_inc_return\n\
- addic %0,%0,1\n\
- stwcx. %0,0,%1\n\
- bne- 1b"
- ISYNC_ON_SMP
- : "=&r" (t)
- : "r" (&v->counter)
- : "cc", "memory");
-
- return t;
-}
-
-/*
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
-static __inline__ void atomic_dec(atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
-"1: lwarx %0,0,%2 # atomic_dec\n\
- addic %0,%0,-1\n\
- stwcx. %0,0,%2\n\
- bne- 1b"
- : "=&r" (t), "=m" (v->counter)
- : "r" (&v->counter), "m" (v->counter)
- : "cc");
-}
-
-static __inline__ int atomic_dec_return(atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
- EIEIO_ON_SMP
-"1: lwarx %0,0,%1 # atomic_dec_return\n\
- addic %0,%0,-1\n\
- stwcx. %0,0,%1\n\
- bne- 1b"
- ISYNC_ON_SMP
- : "=&r" (t)
- : "r" (&v->counter)
- : "cc", "memory");
-
- return t;
-}
-
-#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
-#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
-
-/*
- * Atomically test *v and decrement if it is greater than 0.
- * The function returns the old value of *v minus 1.
- */
-static __inline__ int atomic_dec_if_positive(atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
- EIEIO_ON_SMP
-"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
- addic. %0,%0,-1\n\
- blt- 2f\n\
- stwcx. %0,0,%1\n\
- bne- 1b"
- ISYNC_ON_SMP
- "\n\
-2:" : "=&r" (t)
- : "r" (&v->counter)
- : "cc", "memory");
-
- return t;
-}
-
-#define smp_mb__before_atomic_dec() smp_mb()
-#define smp_mb__after_atomic_dec() smp_mb()
-#define smp_mb__before_atomic_inc() smp_mb()
-#define smp_mb__after_atomic_inc() smp_mb()
-
-#ifdef __cplusplus
-} /* end of extern "C" */
-#endif
-
-#endif /* _ASM_PPC64_ATOMIC_H_ */
+++ /dev/null
-/*****************************************************************************
- * kernelutils-arm.h
- *
- * This file holds the code needed by LTT usertrace that comes from the
- * kernel headers. Since including kernel headers is not recommended in
- * userspace programs/libraries, we rewrote implementations HIGHLY INSPIRED
- * (i.e. copied/pasted) from the original kernel headers (2.6.18).
- *
- * Do not use these functions within signal handlers, as the architecture offers
- * no atomic operations. (Mathieu Desnoyers) It is safe to do multithreaded
- * tracing though, as the buffers are per thread.
- *
- * Deepak Saxena, October 2006
- */
-
-#ifndef _KERNELUTILS_ARM_H
-#define _KERNELUTILS_ARM_H
-
-#include <time.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-typedef struct { volatile int counter; } atomic_t;
-
-#define atomic_read(v) ((v)->counter)
-
-static inline int atomic_add_return(int i, atomic_t *v)
-{
- unsigned long flags;
- int val;
-
- val = v->counter;
- v->counter = val += i;
-
- return val;
-}
-
-#define atomic_add(i, v) (void) atomic_add_return(i, v)
-#define atomic_inc(v) (void) atomic_add_return(1, v)
-
-static inline unsigned long cmpxchg(volatile void *ptr,
- unsigned long old,
- unsigned long new)
-{
- unsigned long flags, prev;
- volatile unsigned long *p = ptr;
-
- if ((prev = *p) == old)
- *p = new;
- return(prev);
-}
-
-static inline unsigned long long get_cycles(void)
-{
- struct timespec tp;
- clock_gettime(CLOCK_MONOTONIC, &tp);
- return tp.tv_sec * 1000000000 + tp.tv_nsec;
-}
-
-
-#ifdef __cplusplus
-} /* end of extern "C" */
-#endif
-
-#endif
+++ /dev/null
-/*****************************************************************************
- * kernelutils-x386.h
- *
- * This file holds the code needed by LTT usertrace that comes from the
- * kernel headers. Since including kernel headers is not recommended in
- * userspace programs/libraries, we rewrote implementations HIGHLY INSPIRED
- * (i.e. copied/pasted) from the original kernel headers (2.6.17).
- *
- * Martin Bisson, July 2006
- * Mathieu Desnoyers, August 2006
- */
-
-#ifndef _KERNELUTILS_I386_H
-#define _KERNELUTILS_I386_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-// We are careful, so we assume a possibly SMP machine
-#define LOCK "lock ; "
-#define LOCK_PREFIX "lock ; "
-
-
-// From atomic.h
-
-
-/*
- * Make sure gcc doesn't try to be clever and move things around
- * on us. We need to use _exactly_ the address the user gave us,
- * not some alias that contains the same information.
- */
-typedef struct { volatile int counter; } atomic_t;
-
-
-/**
- * atomic_read - read atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically reads the value of @v.
- */
-#define atomic_read(v) ((v)->counter)
-
-/**
- * atomic_add - add integer to atomic variable
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v.
- */
-static __inline__ void atomic_add(int i, atomic_t *v)
-{
- __asm__ __volatile__(
- LOCK "addl %1,%0"
- :"=m" (v->counter)
- :"ir" (i), "m" (v->counter));
-}
-
-/**
- * atomic_inc - increment atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1.
- */
-static __inline__ void atomic_inc(atomic_t *v)
-{
- __asm__ __volatile__(
- LOCK "incl %0"
- :"=m" (v->counter)
- :"m" (v->counter));
-}
-
-/**
- * atomic_add_return - add and return
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v and returns @i + @v
- */
-static __inline__ int atomic_add_return(int i, atomic_t *v)
-{
- int __i = i;
- __asm__ __volatile__(
- LOCK "xaddl %0, %1;"
- :"=r"(i)
- :"m"(v->counter), "0"(i));
- return i + __i;
-}
-
-
-
-
-// From system.h
-
-struct __xchg_dummy { unsigned long a[100]; };
-#define __xg(x) ((struct __xchg_dummy *)(x))
-
-
-/*
- * Atomic compare and exchange. Compare OLD with MEM, if identical,
- * store NEW in MEM. Return the initial value in MEM. Success is
- * indicated by comparing RETURN with OLD.
- */
-
-#define __HAVE_ARCH_CMPXCHG 1
-#define cmpxchg(ptr,o,n)\
- ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
- (unsigned long)(n),sizeof(*(ptr))))
-
-static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
- unsigned long newval, int size)
-{
- unsigned long prev;
- switch (size) {
- case 1:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(newval), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 2:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
- : "=a"(prev)
- : "r"(newval), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 4:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
- : "=a"(prev)
- : "r"(newval), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- }
- return old;
-}
-
-
-// From msr.h
-
-#define rdtscll(val) \
- __asm__ __volatile__("rdtsc" : "=A" (val))
-
-// From timex.h
-
-typedef unsigned long long cycles_t;
-
-static inline cycles_t get_cycles (void)
-{
- unsigned long long ret;
-
- rdtscll(ret);
- return ret;
-}
-
-#ifdef __cplusplus
-} /* end of extern "C" */
-#endif
-
-#endif // _KERNELUTILS_I386_H
+++ /dev/null
-/*****************************************************************************
- * kernelutils-x86_64.h
- *
- * This file holds the code needed by LTT usertrace that comes from the
- * kernel headers. Since including kernel headers is not recommended in
- * userspace programs/libraries, we rewrote implementations HIGHLY INSPIRED
- * (i.e. copied/pasted) from the original kernel headers (2.6.17).
- *
- * Martin Bisson, July 2006
- */
-
-#ifndef _KERNELUTILS_X86_64_H
-#define _KERNELUTILS_X86_64_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-// We are careful, so we assume a possibly SMP machine
-#define LOCK "lock ; "
-#define LOCK_PREFIX "lock ; "
-
-
-
-
-// From atomic.h
-
-
-/*
- * Make sure gcc doesn't try to be clever and move things around
- * on us. We need to use _exactly_ the address the user gave us,
- * not some alias that contains the same information.
- */
-typedef struct { volatile int counter; } atomic_t;
-
-
-/**
- * atomic_read - read atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically reads the value of @v.
- */
-#define atomic_read(v) ((v)->counter)
-
-/**
- * atomic_add - add integer to atomic variable
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v.
- */
-static __inline__ void atomic_add(int i, atomic_t *v)
-{
- __asm__ __volatile__(
- LOCK "addl %1,%0"
- :"=m" (v->counter)
- :"ir" (i), "m" (v->counter));
-}
-
-/**
- * atomic_inc - increment atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1.
- */
-static __inline__ void atomic_inc(atomic_t *v)
-{
- __asm__ __volatile__(
- LOCK "incl %0"
- :"=m" (v->counter)
- :"m" (v->counter));
-}
-
-/**
- * atomic_add_return - add and return
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v and returns @i + @v
- */
-static __inline__ int atomic_add_return(int i, atomic_t *v)
-{
- int __i = i;
- __asm__ __volatile__(
- LOCK "xaddl %0, %1;"
- :"=r"(i)
- :"m"(v->counter), "0"(i));
- return i + __i;
-}
-
-
-
-
-// From system.h
-
-#define __xg(x) ((volatile long *)(x))
-
-
-/*
- * Atomic compare and exchange. Compare OLD with MEM, if identical,
- * store NEW in MEM. Return the initial value in MEM. Success is
- * indicated by comparing RETURN with OLD.
- */
-
-#define __HAVE_ARCH_CMPXCHG 1
-
-static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
- unsigned long new, int size)
-{
- unsigned long prev;
- switch (size) {
- case 1:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 2:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 4:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 8:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- }
- return old;
-}
-
-#define cmpxchg(ptr,o,n)\
- ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
- (unsigned long)(n),sizeof(*(ptr))))
-
-
-
-
-// From msr.h
-
-
-#define rdtscll(val) do { \
- unsigned int __a,__d; \
- asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
- (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
-} while(0)
-
-
-
-
-// From timex.h
-
-typedef unsigned long long cycles_t;
-
-static inline cycles_t get_cycles (void)
-{
- unsigned long long ret;
-
- rdtscll(ret);
- return ret;
-}
-
-#ifdef __cplusplus
-} /* end of extern "C" */
-#endif
-
-#endif // _KERNELUTILS_X86_64_H
+++ /dev/null
-#ifndef _LTT_FACILITY_CUSTOM_USER_GENERIC_H_
-#define _LTT_FACILITY_CUSTOM_USER_GENERIC_H_
-
-#include <sys/types.h>
-#include <ltt/ltt-facility-id-user_generic.h>
-#include <ltt/ltt-usertrace.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdarg.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-static int trace_user_generic_slow_printf(
- const char *fmt, ...)
-#ifndef LTT_TRACE
-{
-}
-#else
-{
- /* Guess we need no more than 100 bytes. */
- int n, size = 104;
- char *p, *np;
- va_list ap;
- int ret;
-
- size += ltt_align(size, sizeof(void*));
- if ((p = malloc (size)) == NULL)
- return -1;
-
- while (1) {
- /* Try to print in the allocated space. */
- va_start(ap, fmt);
- n = vsnprintf (p, size, fmt, ap);
- va_end(ap);
- /* If that worked, trace the string. */
- if (n > -1 && n < size) {
- ret = trace_user_generic_slow_printf_param_buffer(p, n+1+ltt_align(n+1, sizeof(void*)));
- free(p);
- return ret;
- }
- /* Else try again with more space. */
- if (n > -1) /* glibc 2.1 */
- size = n+1; /* precisely what is needed */
- else /* glibc 2.0 */
- size *= 2; /* twice the old size */
- size += ltt_align(size, sizeof(void*));
- if ((np = realloc (p, size)) == NULL) {
- free(p);
- return -1;
- } else {
- p = np;
- }
- }
-}
-#endif //LTT_TRACE
-
-#ifdef __cplusplus
-} /* end of extern "C" */
-#endif
-
-#endif //_LTT_FACILITY_CUSTOM_USER_GENERIC_H_
+++ /dev/null
-#ifndef _LTT_FACILITY_ID_USER_GENERIC_H_
-#define _LTT_FACILITY_ID_USER_GENERIC_H_
-
-#ifdef LTT_TRACE
-#include <ltt/ltt-usertrace.h>
-
-/**** facility handle ****/
-
-extern ltt_facility_t ltt_facility_user_generic_B1865E44;
-extern ltt_facility_t ltt_facility_user_generic;
-
-
-/**** event index ****/
-
-enum user_generic_event {
- event_user_generic_string,
- event_user_generic_string_pointer,
- event_user_generic_slow_printf,
- event_user_generic_function_entry,
- event_user_generic_function_exit,
- event_user_generic_thread_brand,
- facility_user_generic_num_events
-};
-
-#endif //LTT_TRACE
-#endif //_LTT_FACILITY_ID_USER_GENERIC_H_
+++ /dev/null
-#ifndef _LTT_FACILITY_USER_GENERIC_H_
-#define _LTT_FACILITY_USER_GENERIC_H_
-
-#include <sys/types.h>
-#include <ltt/ltt-facility-id-user_generic.h>
-#include <ltt/ltt-usertrace.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* Named types */
-
-/* Event string structures */
-static inline void lttng_write_string_user_generic_string_data(
- char *buffer,
- size_t *to_base,
- size_t *to,
- const char **from,
- size_t *len,
- const char * obj)
-{
- size_t size;
- size_t align;
-
- /* Flush pending memcpy */
- if (*len != 0) {
- if (buffer != NULL)
- memcpy(buffer+*to_base+*to, *from, *len);
- }
- *to += *len;
- *len = 0;
-
- align = sizeof(char);
-
- if (*len == 0) {
- *to += ltt_align(*to, align); /* align output */
- } else {
- *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
- }
-
- /* Contains variable sized fields : must explode the structure */
-
- size = strlen(obj) + 1; /* Include final NULL char. */
- if (buffer != NULL)
- memcpy(buffer+*to_base+*to, obj, size);
- *to += size;
-
- /* Realign the *to_base on arch size, set *to to 0 */
- *to += ltt_align(*to, sizeof(void *));
- *to_base = *to_base+*to;
- *to = 0;
-
- /* Put source *from just after the C string */
- *from += size;
-}
-
-
-/* Event string logging function */
-static inline int trace_user_generic_string(
- const char * lttng_param_data);
-
-#ifndef LTT_TRACE_FAST
-static inline int trace_user_generic_string(
- const char * lttng_param_data)
-#ifndef LTT_TRACE
-{
-}
-#else
-{
- int ret = 0;
- char *buffer = NULL;
- size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
- size_t *to_base = &real_to_base;
- size_t real_to = 0;
- size_t *to = &real_to;
- size_t real_len = 0;
- size_t *len = &real_len;
- size_t reserve_size;
- size_t slot_size;
- const char *real_from;
- const char **from = &real_from;
- /* For each field, calculate the field size. */
- /* size = *to_base + *to + *len */
- /* Assume that the padding for alignment starts at a
- * sizeof(void *) address. */
-
- *from = (const char*)lttng_param_data;
- lttng_write_string_user_generic_string_data(buffer, to_base, to, from, len, lttng_param_data);
-
- reserve_size = *to_base + *to + *len;
- {
- char stack_buffer[reserve_size];
- buffer = stack_buffer;
-
- *to_base = *to = *len = 0;
-
- *from = (const char*)lttng_param_data;
- lttng_write_string_user_generic_string_data(buffer, to_base, to, from, len, lttng_param_data);
-
- /* Flush pending memcpy */
- if (*len != 0) {
- memcpy(buffer+*to_base+*to, *from, *len);
- *to += *len;
- *len = 0;
- }
-
- ret = ltt_trace_generic(ltt_facility_user_generic_B1865E44, event_user_generic_string, buffer, reserve_size, LTT_BLOCKING, 0);
- }
-
- return ret;
-
-}
-#endif //LTT_TRACE
-#endif //!LTT_TRACE_FAST
-
-#ifdef LTT_TRACE_FAST
-static inline int trace_user_generic_string(
- const char * lttng_param_data)
-#ifndef LTT_TRACE
-{
-}
-#else
-{
- unsigned int index;
- struct ltt_trace_info *trace = thread_trace_info;
- struct ltt_buf *ltt_buf;
- char *buffer = NULL;
- size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
- size_t *to_base = &real_to_base;
- size_t real_to = 0;
- size_t *to = &real_to;
- size_t real_len = 0;
- size_t *len = &real_len;
- size_t reserve_size;
- size_t slot_size;
- const char *real_from;
- const char **from = &real_from;
- uint64_t tsc;
- if (!trace) {
- ltt_thread_init();
- trace = thread_trace_info;
- }
-
-
- /* For each field, calculate the field size. */
- /* size = *to_base + *to + *len */
- /* Assume that the padding for alignment starts at a
- * sizeof(void *) address. */
-
- *from = (const char*)lttng_param_data;
- lttng_write_string_user_generic_string_data(buffer, to_base, to, from, len, lttng_param_data);
-
- reserve_size = *to_base + *to + *len;
- trace->nesting++;
- index = ltt_get_index_from_facility(ltt_facility_user_generic_B1865E44,
- event_user_generic_string);
-
- {
- ltt_buf = ltt_get_channel_from_index(trace, index);
- slot_size = 0;
- buffer = ltt_reserve_slot(trace, ltt_buf,
- reserve_size, &slot_size, &tsc);
- if (!buffer)
- goto end; /* buffer full */
-
- *to_base = *to = *len = 0;
-
- buffer = ltt_write_event_header(trace, ltt_buf, buffer,
- ltt_facility_user_generic_B1865E44, event_user_generic_string,
- reserve_size, tsc);
- *from = (const char*)lttng_param_data;
- lttng_write_string_user_generic_string_data(buffer, to_base, to, from, len, lttng_param_data);
-
- /* Flush pending memcpy */
- if (*len != 0) {
- memcpy(buffer+*to_base+*to, *from, *len);
- *to += *len;
- *len = 0;
- }
-
- ltt_commit_slot(ltt_buf, buffer, slot_size);
-
-}
-
-end:
- trace->nesting--;
-}
-#endif //LTT_TRACE
-#endif //LTT_TRACE_FAST
-
-/* Event string_pointer structures */
-static inline void lttng_write_string_user_generic_string_pointer_string(
- char *buffer,
- size_t *to_base,
- size_t *to,
- const char **from,
- size_t *len,
- const char * obj)
-{
- size_t size;
- size_t align;
-
- /* Flush pending memcpy */
- if (*len != 0) {
- if (buffer != NULL)
- memcpy(buffer+*to_base+*to, *from, *len);
- }
- *to += *len;
- *len = 0;
-
- align = sizeof(char);
-
- if (*len == 0) {
- *to += ltt_align(*to, align); /* align output */
- } else {
- *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
- }
-
- /* Contains variable sized fields : must explode the structure */
-
- size = strlen(obj) + 1; /* Include final NULL char. */
- if (buffer != NULL)
- memcpy(buffer+*to_base+*to, obj, size);
- *to += size;
-
- /* Realign the *to_base on arch size, set *to to 0 */
- *to += ltt_align(*to, sizeof(void *));
- *to_base = *to_base+*to;
- *to = 0;
-
- /* Put source *from just after the C string */
- *from += size;
-}
-
-
-/* Event string_pointer logging function */
-static inline int trace_user_generic_string_pointer(
- const char * lttng_param_string,
- const void * lttng_param_pointer);
-
-#ifndef LTT_TRACE_FAST
-static inline int trace_user_generic_string_pointer(
- const char * lttng_param_string,
- const void * lttng_param_pointer)
-#ifndef LTT_TRACE
-{
-}
-#else
-{
- int ret = 0;
- char *buffer = NULL;
- size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
- size_t *to_base = &real_to_base;
- size_t real_to = 0;
- size_t *to = &real_to;
- size_t real_len = 0;
- size_t *len = &real_len;
- size_t reserve_size;
- size_t slot_size;
- size_t align;
- const char *real_from;
- const char **from = &real_from;
- /* For each field, calculate the field size. */
- /* size = *to_base + *to + *len */
- /* Assume that the padding for alignment starts at a
- * sizeof(void *) address. */
-
- *from = (const char*)lttng_param_string;
- lttng_write_string_user_generic_string_pointer_string(buffer, to_base, to, from, len, lttng_param_string);
-
- *from = (const char*)<tng_param_pointer;
- align = sizeof(const void *);
-
- if (*len == 0) {
- *to += ltt_align(*to, align); /* align output */
- } else {
- *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
- }
-
- *len += sizeof(const void *);
-
- reserve_size = *to_base + *to + *len;
- {
- char stack_buffer[reserve_size];
- buffer = stack_buffer;
-
- *to_base = *to = *len = 0;
-
- *from = (const char*)lttng_param_string;
- lttng_write_string_user_generic_string_pointer_string(buffer, to_base, to, from, len, lttng_param_string);
-
- /* Flush pending memcpy */
- if (*len != 0) {
- memcpy(buffer+*to_base+*to, *from, *len);
- *to += *len;
- *len = 0;
- }
-
- *from = (const char*)<tng_param_pointer;
- align = sizeof(const void *);
-
- if (*len == 0) {
- *to += ltt_align(*to, align); /* align output */
- } else {
- *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
- }
-
- *len += sizeof(const void *);
-
- /* Flush pending memcpy */
- if (*len != 0) {
- memcpy(buffer+*to_base+*to, *from, *len);
- *to += *len;
- *len = 0;
- }
-
- ret = ltt_trace_generic(ltt_facility_user_generic_B1865E44, event_user_generic_string_pointer, buffer, reserve_size, LTT_BLOCKING, 0);
- }
-
- return ret;
-
-}
-#endif //LTT_TRACE
-#endif //!LTT_TRACE_FAST
-
-#ifdef LTT_TRACE_FAST
-static inline int trace_user_generic_string_pointer(
- const char * lttng_param_string,
- const void * lttng_param_pointer)
-#ifndef LTT_TRACE
-{
-}
-#else
-{
- unsigned int index;
- struct ltt_trace_info *trace = thread_trace_info;
- struct ltt_buf *ltt_buf;
- char *buffer = NULL;
- size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
- size_t *to_base = &real_to_base;
- size_t real_to = 0;
- size_t *to = &real_to;
- size_t real_len = 0;
- size_t *len = &real_len;
- size_t reserve_size;
- size_t slot_size;
- size_t align;
- const char *real_from;
- const char **from = &real_from;
- uint64_t tsc;
- if (!trace) {
- ltt_thread_init();
- trace = thread_trace_info;
- }
-
-
- /* For each field, calculate the field size. */
- /* size = *to_base + *to + *len */
- /* Assume that the padding for alignment starts at a
- * sizeof(void *) address. */
-
- *from = (const char*)lttng_param_string;
- lttng_write_string_user_generic_string_pointer_string(buffer, to_base, to, from, len, lttng_param_string);
-
- *from = (const char*)<tng_param_pointer;
- align = sizeof(const void *);
-
- if (*len == 0) {
- *to += ltt_align(*to, align); /* align output */
- } else {
- *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
- }
-
- *len += sizeof(const void *);
-
- reserve_size = *to_base + *to + *len;
- trace->nesting++;
- index = ltt_get_index_from_facility(ltt_facility_user_generic_B1865E44,
- event_user_generic_string_pointer);
-
- {
- ltt_buf = ltt_get_channel_from_index(trace, index);
- slot_size = 0;
- buffer = ltt_reserve_slot(trace, ltt_buf,
- reserve_size, &slot_size, &tsc);
- if (!buffer)
- goto end; /* buffer full */
-
- *to_base = *to = *len = 0;
-
- buffer = ltt_write_event_header(trace, ltt_buf, buffer,
- ltt_facility_user_generic_B1865E44, event_user_generic_string_pointer,
- reserve_size, tsc);
- *from = (const char*)lttng_param_string;
- lttng_write_string_user_generic_string_pointer_string(buffer, to_base, to, from, len, lttng_param_string);
-
- /* Flush pending memcpy */
- if (*len != 0) {
- memcpy(buffer+*to_base+*to, *from, *len);
- *to += *len;
- *len = 0;
- }
-
- *from = (const char*)<tng_param_pointer;
- align = sizeof(const void *);
-
- if (*len == 0) {
- *to += ltt_align(*to, align); /* align output */
- } else {
- *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
- }
-
- *len += sizeof(const void *);
-
- /* Flush pending memcpy */
- if (*len != 0) {
- memcpy(buffer+*to_base+*to, *from, *len);
- *to += *len;
- *len = 0;
- }
-
- ltt_commit_slot(ltt_buf, buffer, slot_size);
-
-}
-
-end:
- trace->nesting--;
-}
-#endif //LTT_TRACE
-#endif //LTT_TRACE_FAST
-
-/* Event slow_printf structures */
-static inline void lttng_write_string_user_generic_slow_printf_string(
- char *buffer,
- size_t *to_base,
- size_t *to,
- const char **from,
- size_t *len,
- const char * obj)
-{
- size_t size;
- size_t align;
-
- /* Flush pending memcpy */
- if (*len != 0) {
- if (buffer != NULL)
- memcpy(buffer+*to_base+*to, *from, *len);
- }
- *to += *len;
- *len = 0;
-
- align = sizeof(char);
-
- if (*len == 0) {
- *to += ltt_align(*to, align); /* align output */
- } else {
- *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
- }
-
- /* Contains variable sized fields : must explode the structure */
-
- size = strlen(obj) + 1; /* Include final NULL char. */
- if (buffer != NULL)
- memcpy(buffer+*to_base+*to, obj, size);
- *to += size;
-
- /* Realign the *to_base on arch size, set *to to 0 */
- *to += ltt_align(*to, sizeof(void *));
- *to_base = *to_base+*to;
- *to = 0;
-
- /* Put source *from just after the C string */
- *from += size;
-}
-
-
-/* Event slow_printf logging function */
-static inline int trace_user_generic_slow_printf_param_buffer(
- char *buffer,
- size_t reserve_size);
-
-#ifndef LTT_TRACE_FAST
-static inline int trace_user_generic_slow_printf_param_buffer(
- char *buffer,
- size_t reserve_size)
-#ifndef LTT_TRACE
-{
-}
-#else
-{
- int ret = 0;
- {
- ret = ltt_trace_generic(ltt_facility_user_generic_B1865E44, event_user_generic_slow_printf, buffer, reserve_size, LTT_BLOCKING, 0);
- }
-
- return ret;
-
-}
-#endif //LTT_TRACE
-#endif //!LTT_TRACE_FAST
-
-#ifdef LTT_TRACE_FAST
-static inline int trace_user_generic_slow_printf(
- const char * lttng_param_string)
-#ifndef LTT_TRACE
-{
-}
-#else
-{
- unsigned int index;
- struct ltt_trace_info *trace = thread_trace_info;
- struct ltt_buf *ltt_buf;
- char *buffer = NULL;
- size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
- size_t *to_base = &real_to_base;
- size_t real_to = 0;
- size_t *to = &real_to;
- size_t real_len = 0;
- size_t *len = &real_len;
- size_t reserve_size;
- size_t slot_size;
- const char *real_from;
- const char **from = &real_from;
- uint64_t tsc;
- if (!trace) {
- ltt_thread_init();
- trace = thread_trace_info;
- }
-
-
- /* For each field, calculate the field size. */
- /* size = *to_base + *to + *len */
- /* Assume that the padding for alignment starts at a
- * sizeof(void *) address. */
-
- *from = (const char*)lttng_param_string;
- lttng_write_string_user_generic_slow_printf_string(buffer, to_base, to, from, len, lttng_param_string);
-
- reserve_size = *to_base + *to + *len;
- trace->nesting++;
- index = ltt_get_index_from_facility(ltt_facility_user_generic_B1865E44,
- event_user_generic_slow_printf);
-
- {
- ltt_buf = ltt_get_channel_from_index(trace, index);
- slot_size = 0;
- buffer = ltt_reserve_slot(trace, ltt_buf,
- reserve_size, &slot_size, &tsc);
- if (!buffer)
- goto end; /* buffer full */
-
- *to_base = *to = *len = 0;
-
- buffer = ltt_write_event_header(trace, ltt_buf, buffer,
- ltt_facility_user_generic_B1865E44, event_user_generic_slow_printf,
- reserve_size, tsc);
- *from = (const char*)lttng_param_string;
- lttng_write_string_user_generic_slow_printf_string(buffer, to_base, to, from, len, lttng_param_string);
-
- /* Flush pending memcpy */
- if (*len != 0) {
- memcpy(buffer+*to_base+*to, *from, *len);
- *to += *len;
- *len = 0;
- }
-
- ltt_commit_slot(ltt_buf, buffer, slot_size);
-
-}
-
-end:
- trace->nesting--;
-}
-#endif //LTT_TRACE
-#endif //LTT_TRACE_FAST
-
-/* Event function_entry structures */
-
-/* Event function_entry logging function */
-static inline __attribute__((no_instrument_function)) int trace_user_generic_function_entry(
- const void * lttng_param_this_fn,
- const void * lttng_param_call_site);
-
-#ifndef LTT_TRACE_FAST
-static inline int trace_user_generic_function_entry(
- const void * lttng_param_this_fn,
- const void * lttng_param_call_site)
-#ifndef LTT_TRACE
-{
-}
-#else
-{
- int ret = 0;
- char *buffer = NULL;
- size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
- size_t *to_base = &real_to_base;
- size_t real_to = 0;
- size_t *to = &real_to;
- size_t real_len = 0;
- size_t *len = &real_len;
- size_t reserve_size;
- size_t slot_size;
- size_t align;
- const char *real_from;
- const char **from = &real_from;
- /* For each field, calculate the field size. */
- /* size = *to_base + *to + *len */
- /* Assume that the padding for alignment starts at a
- * sizeof(void *) address. */
-
- *from = (const char*)<tng_param_this_fn;
- align = sizeof(const void *);
-
- if (*len == 0) {
- *to += ltt_align(*to, align); /* align output */
- } else {
- *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
- }
-
- *len += sizeof(const void *);
-
- *from = (const char*)<tng_param_call_site;
- align = sizeof(const void *);
-
- if (*len == 0) {
- *to += ltt_align(*to, align); /* align output */
- } else {
- *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
- }
-
- *len += sizeof(const void *);
-
- reserve_size = *to_base + *to + *len;
- {
- char stack_buffer[reserve_size];
- buffer = stack_buffer;
-
- *to_base = *to = *len = 0;
-
- *from = (const char*)<tng_param_this_fn;
- align = sizeof(const void *);
-
- if (*len == 0) {
- *to += ltt_align(*to, align); /* align output */
- } else {
- *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
- }
-
- *len += sizeof(const void *);
-
- /* Flush pending memcpy */
- if (*len != 0) {
- memcpy(buffer+*to_base+*to, *from, *len);
- *to += *len;
- *len = 0;
- }
-
- *from = (const char*)<tng_param_call_site;
- align = sizeof(const void *);
-
- if (*len == 0) {
- *to += ltt_align(*to, align); /* align output */
- } else {
- *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
- }
-
- *len += sizeof(const void *);
-
- /* Flush pending memcpy */
- if (*len != 0) {
- memcpy(buffer+*to_base+*to, *from, *len);
- *to += *len;
- *len = 0;
- }
-
- ret = ltt_trace_generic(ltt_facility_user_generic_B1865E44, event_user_generic_function_entry, buffer, reserve_size, LTT_BLOCKING, 0);
- }
-
- return ret;
-
-}
-#endif //LTT_TRACE
-#endif //!LTT_TRACE_FAST
-
-#ifdef LTT_TRACE_FAST
-static inline __attribute__((no_instrument_function)) int trace_user_generic_function_entry(
- const void * lttng_param_this_fn,
- const void * lttng_param_call_site)
-#ifndef LTT_TRACE
-{
-}
-#else
-{
- unsigned int index;
- struct ltt_trace_info *trace = thread_trace_info;
- struct ltt_buf *ltt_buf;
- char *buffer = NULL;
- size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
- size_t *to_base = &real_to_base;
- size_t real_to = 0;
- size_t *to = &real_to;
- size_t real_len = 0;
- size_t *len = &real_len;
- size_t reserve_size;
- size_t slot_size;
- size_t align;
- const char *real_from;
- const char **from = &real_from;
- uint64_t tsc;
- if (!trace) {
- ltt_thread_init();
- trace = thread_trace_info;
- }
-
-
- /* For each field, calculate the field size. */
- /* size = *to_base + *to + *len */
- /* Assume that the padding for alignment starts at a
- * sizeof(void *) address. */
-
- *from = (const char*)<tng_param_this_fn;
- align = sizeof(const void *);
-
- if (*len == 0) {
- *to += ltt_align(*to, align); /* align output */
- } else {
- *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
- }
-
- *len += sizeof(const void *);
-
- *from = (const char*)<tng_param_call_site;
- align = sizeof(const void *);
-
- if (*len == 0) {
- *to += ltt_align(*to, align); /* align output */
- } else {
- *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
- }
-
- *len += sizeof(const void *);
-
- reserve_size = *to_base + *to + *len;
- trace->nesting++;
- index = ltt_get_index_from_facility(ltt_facility_user_generic_B1865E44,
- event_user_generic_function_entry);
-
- {
- ltt_buf = ltt_get_channel_from_index(trace, index);
- slot_size = 0;
- buffer = ltt_reserve_slot(trace, ltt_buf,
- reserve_size, &slot_size, &tsc);
- if (!buffer)
- goto end; /* buffer full */
-
- *to_base = *to = *len = 0;
-
- buffer = ltt_write_event_header(trace, ltt_buf, buffer,
- ltt_facility_user_generic_B1865E44, event_user_generic_function_entry,
- reserve_size, tsc);
- *from = (const char*)<tng_param_this_fn;
- align = sizeof(const void *);
-
- if (*len == 0) {
- *to += ltt_align(*to, align); /* align output */
- } else {
- *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
- }
-
- *len += sizeof(const void *);
-
- /* Flush pending memcpy */
- if (*len != 0) {
- memcpy(buffer+*to_base+*to, *from, *len);
- *to += *len;
- *len = 0;
- }
-
- *from = (const char*)<tng_param_call_site;
- align = sizeof(const void *);
-
- if (*len == 0) {
- *to += ltt_align(*to, align); /* align output */
- } else {
- *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
- }
-
- *len += sizeof(const void *);
-
- /* Flush pending memcpy */
- if (*len != 0) {
- memcpy(buffer+*to_base+*to, *from, *len);
- *to += *len;
- *len = 0;
- }
-
- ltt_commit_slot(ltt_buf, buffer, slot_size);
-
-}
-
-end:
- trace->nesting--;
-}
-#endif //LTT_TRACE
-#endif //LTT_TRACE_FAST
-
-/* Event function_exit structures */
-
-/* Event function_exit logging function */
-static inline __attribute__((no_instrument_function)) int trace_user_generic_function_exit(
- const void * lttng_param_this_fn,
- const void * lttng_param_call_site);
-
-#ifndef LTT_TRACE_FAST
-static inline int trace_user_generic_function_exit(
- const void * lttng_param_this_fn,
- const void * lttng_param_call_site)
-#ifndef LTT_TRACE
-{
-}
-#else
-{
- int ret = 0;
- char *buffer = NULL;
- size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
- size_t *to_base = &real_to_base;
- size_t real_to = 0;
- size_t *to = &real_to;
- size_t real_len = 0;
- size_t *len = &real_len;
- size_t reserve_size;
- size_t slot_size;
- size_t align;
- const char *real_from;
- const char **from = &real_from;
- /* For each field, calculate the field size. */
- /* size = *to_base + *to + *len */
- /* Assume that the padding for alignment starts at a
- * sizeof(void *) address. */
-
- *from = (const char*)<tng_param_this_fn;
- align = sizeof(const void *);
-
- if (*len == 0) {
- *to += ltt_align(*to, align); /* align output */
- } else {
- *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
- }
-
- *len += sizeof(const void *);
-
- *from = (const char*)<tng_param_call_site;
- align = sizeof(const void *);
-
- if (*len == 0) {
- *to += ltt_align(*to, align); /* align output */
- } else {
- *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
- }
-
- *len += sizeof(const void *);
-
- reserve_size = *to_base + *to + *len;
- {
- char stack_buffer[reserve_size];
- buffer = stack_buffer;
-
- *to_base = *to = *len = 0;
-
- *from = (const char*)<tng_param_this_fn;
- align = sizeof(const void *);
-
- if (*len == 0) {
- *to += ltt_align(*to, align); /* align output */
- } else {
- *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
- }
-
- *len += sizeof(const void *);
-
- /* Flush pending memcpy */
- if (*len != 0) {
- memcpy(buffer+*to_base+*to, *from, *len);
- *to += *len;
- *len = 0;
- }
-
- *from = (const char*)<tng_param_call_site;
- align = sizeof(const void *);
-
- if (*len == 0) {
- *to += ltt_align(*to, align); /* align output */
- } else {
- *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
- }
-
- *len += sizeof(const void *);
-
- /* Flush pending memcpy */
- if (*len != 0) {
- memcpy(buffer+*to_base+*to, *from, *len);
- *to += *len;
- *len = 0;
- }
-
- ret = ltt_trace_generic(ltt_facility_user_generic_B1865E44, event_user_generic_function_exit, buffer, reserve_size, LTT_BLOCKING, 0);
- }
-
- return ret;
-
-}
-#endif //LTT_TRACE
-#endif //!LTT_TRACE_FAST
-
-#ifdef LTT_TRACE_FAST
-static inline __attribute__((no_instrument_function)) int trace_user_generic_function_exit(
- const void * lttng_param_this_fn,
- const void * lttng_param_call_site)
-#ifndef LTT_TRACE
-{
-}
-#else
-{
- unsigned int index;
- struct ltt_trace_info *trace = thread_trace_info;
- struct ltt_buf *ltt_buf;
- char *buffer = NULL;
- size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
- size_t *to_base = &real_to_base;
- size_t real_to = 0;
- size_t *to = &real_to;
- size_t real_len = 0;
- size_t *len = &real_len;
- size_t reserve_size;
- size_t slot_size;
- size_t align;
- const char *real_from;
- const char **from = &real_from;
- uint64_t tsc;
- if (!trace) {
- ltt_thread_init();
- trace = thread_trace_info;
- }
-
-
- /* For each field, calculate the field size. */
- /* size = *to_base + *to + *len */
- /* Assume that the padding for alignment starts at a
- * sizeof(void *) address. */
-
- *from = (const char*)<tng_param_this_fn;
- align = sizeof(const void *);
-
- if (*len == 0) {
- *to += ltt_align(*to, align); /* align output */
- } else {
- *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
- }
-
- *len += sizeof(const void *);
-
- *from = (const char*)<tng_param_call_site;
- align = sizeof(const void *);
-
- if (*len == 0) {
- *to += ltt_align(*to, align); /* align output */
- } else {
- *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
- }
-
- *len += sizeof(const void *);
-
- reserve_size = *to_base + *to + *len;
- trace->nesting++;
- index = ltt_get_index_from_facility(ltt_facility_user_generic_B1865E44,
- event_user_generic_function_exit);
-
- {
- ltt_buf = ltt_get_channel_from_index(trace, index);
- slot_size = 0;
- buffer = ltt_reserve_slot(trace, ltt_buf,
- reserve_size, &slot_size, &tsc);
- if (!buffer)
- goto end; /* buffer full */
-
- *to_base = *to = *len = 0;
-
- buffer = ltt_write_event_header(trace, ltt_buf, buffer,
- ltt_facility_user_generic_B1865E44, event_user_generic_function_exit,
- reserve_size, tsc);
- *from = (const char*)<tng_param_this_fn;
- align = sizeof(const void *);
-
- if (*len == 0) {
- *to += ltt_align(*to, align); /* align output */
- } else {
- *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
- }
-
- *len += sizeof(const void *);
-
- /* Flush pending memcpy */
- if (*len != 0) {
- memcpy(buffer+*to_base+*to, *from, *len);
- *to += *len;
- *len = 0;
- }
-
- *from = (const char*)<tng_param_call_site;
- align = sizeof(const void *);
-
- if (*len == 0) {
- *to += ltt_align(*to, align); /* align output */
- } else {
- *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
- }
-
- *len += sizeof(const void *);
-
- /* Flush pending memcpy */
- if (*len != 0) {
- memcpy(buffer+*to_base+*to, *from, *len);
- *to += *len;
- *len = 0;
- }
-
- ltt_commit_slot(ltt_buf, buffer, slot_size);
-
-}
-
-end:
- trace->nesting--;
-}
-#endif //LTT_TRACE
-#endif //LTT_TRACE_FAST
-
-/* Event thread_brand structures */
-static inline void lttng_write_string_user_generic_thread_brand_name(
- char *buffer,
- size_t *to_base,
- size_t *to,
- const char **from,
- size_t *len,
- const char * obj)
-{
- size_t size;
- size_t align;
-
- /* Flush pending memcpy */
- if (*len != 0) {
- if (buffer != NULL)
- memcpy(buffer+*to_base+*to, *from, *len);
- }
- *to += *len;
- *len = 0;
-
- align = sizeof(char);
-
- if (*len == 0) {
- *to += ltt_align(*to, align); /* align output */
- } else {
- *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
- }
-
- /* Contains variable sized fields : must explode the structure */
-
- size = strlen(obj) + 1; /* Include final NULL char. */
- if (buffer != NULL)
- memcpy(buffer+*to_base+*to, obj, size);
- *to += size;
-
- /* Realign the *to_base on arch size, set *to to 0 */
- *to += ltt_align(*to, sizeof(void *));
- *to_base = *to_base+*to;
- *to = 0;
-
- /* Put source *from just after the C string */
- *from += size;
-}
-
-
-/* Event thread_brand logging function */
-static inline int trace_user_generic_thread_brand(
- const char * lttng_param_name);
-
-#ifndef LTT_TRACE_FAST
-static inline int trace_user_generic_thread_brand(
- const char * lttng_param_name)
-#ifndef LTT_TRACE
-{
-}
-#else
-{
- int ret = 0;
- char *buffer = NULL;
- size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
- size_t *to_base = &real_to_base;
- size_t real_to = 0;
- size_t *to = &real_to;
- size_t real_len = 0;
- size_t *len = &real_len;
- size_t reserve_size;
- size_t slot_size;
- const char *real_from;
- const char **from = &real_from;
- /* For each field, calculate the field size. */
- /* size = *to_base + *to + *len */
- /* Assume that the padding for alignment starts at a
- * sizeof(void *) address. */
-
- *from = (const char*)lttng_param_name;
- lttng_write_string_user_generic_thread_brand_name(buffer, to_base, to, from, len, lttng_param_name);
-
- reserve_size = *to_base + *to + *len;
- {
- char stack_buffer[reserve_size];
- buffer = stack_buffer;
-
- *to_base = *to = *len = 0;
-
- *from = (const char*)lttng_param_name;
- lttng_write_string_user_generic_thread_brand_name(buffer, to_base, to, from, len, lttng_param_name);
-
- /* Flush pending memcpy */
- if (*len != 0) {
- memcpy(buffer+*to_base+*to, *from, *len);
- *to += *len;
- *len = 0;
- }
-
- ret = ltt_trace_generic(ltt_facility_user_generic_B1865E44, event_user_generic_thread_brand, buffer, reserve_size, LTT_BLOCKING, 1);
- }
-
- return ret;
-
-}
-#endif //LTT_TRACE
-#endif //!LTT_TRACE_FAST
-
-#ifdef LTT_TRACE_FAST
-static inline int trace_user_generic_thread_brand(
- const char * lttng_param_name)
-#ifndef LTT_TRACE
-{
-}
-#else
-{
- unsigned int index;
- struct ltt_trace_info *trace = thread_trace_info;
- struct ltt_buf *ltt_buf;
- char *buffer = NULL;
- size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
- size_t *to_base = &real_to_base;
- size_t real_to = 0;
- size_t *to = &real_to;
- size_t real_len = 0;
- size_t *len = &real_len;
- size_t reserve_size;
- size_t slot_size;
- const char *real_from;
- const char **from = &real_from;
- uint64_t tsc;
- if (!trace) {
- ltt_thread_init();
- trace = thread_trace_info;
- }
-
-
- /* For each field, calculate the field size. */
- /* size = *to_base + *to + *len */
- /* Assume that the padding for alignment starts at a
- * sizeof(void *) address. */
-
- *from = (const char*)lttng_param_name;
- lttng_write_string_user_generic_thread_brand_name(buffer, to_base, to, from, len, lttng_param_name);
-
- reserve_size = *to_base + *to + *len;
- trace->nesting++;
- index = ltt_get_index_from_facility(ltt_facility_user_generic_B1865E44,
- event_user_generic_thread_brand);
-
- {
- ltt_buf = ltt_get_channel_from_index(trace, index);
- slot_size = 0;
- buffer = ltt_reserve_slot(trace, ltt_buf,
- reserve_size, &slot_size, &tsc);
- if (!buffer)
- goto end; /* buffer full */
-
- *to_base = *to = *len = 0;
-
- buffer = ltt_write_event_header(trace, ltt_buf, buffer,
- ltt_facility_user_generic_B1865E44, event_user_generic_thread_brand,
- reserve_size, tsc);
- *from = (const char*)lttng_param_name;
- lttng_write_string_user_generic_thread_brand_name(buffer, to_base, to, from, len, lttng_param_name);
-
- /* Flush pending memcpy */
- if (*len != 0) {
- memcpy(buffer+*to_base+*to, *from, *len);
- *to += *len;
- *len = 0;
- }
-
- ltt_commit_slot(ltt_buf, buffer, slot_size);
-
-}
-
-end:
- trace->nesting--;
-}
-#endif //LTT_TRACE
-#endif //LTT_TRACE_FAST
-
-#ifdef __cplusplus
-} /* end of extern "C" */
-#endif
-
-#endif //_LTT_FACILITY_USER_GENERIC_H_
+++ /dev/null
-
-/* LTTng user-space "fast" tracing header
- *
- * Copyright 2006 Mathieu Desnoyers
- *
- */
-
-#ifndef _LTT_USERTRACE_FAST_H
-#define _LTT_USERTRACE_FAST_H
-
-#ifdef LTT_TRACE
-#ifdef LTT_TRACE_FAST
-
-#include <errno.h>
-#include <pthread.h>
-#include <stdint.h>
-#include <syscall.h>
-#include <semaphore.h>
-#include <signal.h>
-
-#include <ltt/ltt-facility-id-user_generic.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#ifndef LTT_N_SUBBUFS
-#define LTT_N_SUBBUFS 2
-#endif //LTT_N_SUBBUFS
-
-#ifndef LTT_SUBBUF_SIZE_PROCESS
-#define LTT_SUBBUF_SIZE_PROCESS 1048576
-#endif //LTT_BUF_SIZE_CPU
-
-#define LTT_BUF_SIZE_PROCESS (LTT_SUBBUF_SIZE_PROCESS * LTT_N_SUBBUFS)
-
-#ifndef LTT_USERTRACE_ROOT
-#define LTT_USERTRACE_ROOT "/tmp/ltt-usertrace"
-#endif //LTT_USERTRACE_ROOT
-
-
-/* Buffer offset macros */
-
-#define BUFFER_OFFSET(offset, buf) (offset & (buf->alloc_size-1))
-#define SUBBUF_OFFSET(offset, buf) (offset & (buf->subbuf_size-1))
-#define SUBBUF_ALIGN(offset, buf) \
- (((offset) + buf->subbuf_size) & (~(buf->subbuf_size-1)))
-#define SUBBUF_TRUNC(offset, buf) \
- ((offset) & (~(buf->subbuf_size-1)))
-#define SUBBUF_INDEX(offset, buf) \
- (BUFFER_OFFSET(offset,buf)/buf->subbuf_size)
-
-
-#define LTT_TRACER_MAGIC_NUMBER 0x00D6B7ED
-#define LTT_TRACER_VERSION_MAJOR 0
-#define LTT_TRACER_VERSION_MINOR 8
-
-#ifndef atomic_cmpxchg
-#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
-#endif //atomic_cmpxchg
-
-struct ltt_trace_header {
- uint32_t magic_number;
- uint32_t arch_type;
- uint32_t arch_variant;
- uint32_t float_word_order; /* Only useful for user space traces */
- uint8_t arch_size;
- //uint32_t system_type;
- uint8_t major_version;
- uint8_t minor_version;
- uint8_t flight_recorder;
- uint8_t has_heartbeat;
- uint8_t has_alignment; /* Event header alignment */
- uint8_t tsc_lsb_truncate;
- uint8_t tscbits;
- uint32_t freq_scale;
- uint64_t start_freq;
- uint64_t start_tsc;
- uint64_t start_monotonic;
- uint64_t start_time_sec;
- uint64_t start_time_usec;
-} __attribute((packed));
-
-
-struct ltt_block_start_header {
- struct {
- uint64_t cycle_count;
- uint64_t freq; /* khz */
- } begin;
- struct {
- uint64_t cycle_count;
- uint64_t freq; /* khz */
- } end;
- uint32_t lost_size; /* Size unused at the end of the buffer */
- uint32_t buf_size; /* The size of this sub-buffer */
- struct ltt_trace_header trace;
-} __attribute((packed));
-
-
-
-struct ltt_buf {
- void *start;
- atomic_t offset;
- atomic_t consumed;
- atomic_t reserve_count[LTT_N_SUBBUFS];
- atomic_t commit_count[LTT_N_SUBBUFS];
-
- atomic_t events_lost;
- atomic_t corrupted_subbuffers;
- sem_t writer_sem; /* semaphore on which the writer waits */
- unsigned int alloc_size;
- unsigned int subbuf_size;
-};
-
-struct ltt_trace_info {
- int init;
- int filter;
- pid_t daemon_id;
- int nesting;
- struct {
- struct ltt_buf process;
- char process_buf[LTT_BUF_SIZE_PROCESS] __attribute__ ((aligned (8)));
- } channel;
-};
-
-
-struct ltt_event_header_nohb {
- uint64_t timestamp;
- unsigned char facility_id;
- unsigned char event_id;
- uint16_t event_size;
-} __attribute((packed));
-
-extern __thread struct ltt_trace_info *thread_trace_info;
-
-void ltt_thread_init(void);
-
-void __attribute__((no_instrument_function))
- ltt_usertrace_fast_buffer_switch(void);
-
-/* Get the offset of the channel in the ltt_trace_struct */
-#define GET_CHANNEL_INDEX(chan) \
- (unsigned int)&((struct ltt_trace_info*)NULL)->channel.chan
-
-/* ltt_get_index_from_facility
- *
- * Get channel index from facility and event id.
- *
- * @fID : facility ID
- * @eID : event number
- *
- * Get the channel index into which events must be written for the given
- * facility and event number. We get this structure offset as soon as possible
- * and remember it so we pass through this logic only once per trace call (not
- * for every trace).
- */
-static inline unsigned int __attribute__((no_instrument_function))
- ltt_get_index_from_facility(uint8_t fID,
- uint8_t eID)
-{
- return GET_CHANNEL_INDEX(process);
-}
-
-
-static inline struct ltt_buf * __attribute__((no_instrument_function))
- ltt_get_channel_from_index(
- struct ltt_trace_info *trace, unsigned int index)
-{
- return (struct ltt_buf *)((void*)trace+index);
-}
-
-
-/*
- * ltt_get_header_size
- *
- * Calculate alignment offset for arch size void*. This is the
- * alignment offset of the event header.
- *
- * Important note :
- * The event header must be a size multiple of the void* size. This is necessary
- * to be able to calculate statically the alignment offset of the variable
- * length data fields that follows. The total offset calculated here :
- *
- * Alignment of header struct on arch size
- * + sizeof(header struct)
- * + padding added to end of struct to align on arch size.
- * */
-static inline unsigned char __attribute__((no_instrument_function))
- ltt_get_header_size(struct ltt_trace_info *trace,
- void *address,
- size_t data_size,
- size_t *before_hdr_pad)
-{
- unsigned int padding;
- unsigned int header;
- size_t after_hdr_pad;
-
- header = sizeof(struct ltt_event_header_nohb);
-
- /* Padding before the header. Calculated dynamically */
- *before_hdr_pad = ltt_align((unsigned long)address, header);
- padding = *before_hdr_pad;
-
- /* Padding after header, considering header aligned on ltt_align.
- * Calculated statically if header size if known. */
- after_hdr_pad = ltt_align(header, sizeof(void*));
- padding += after_hdr_pad;
-
- return header+padding;
-}
-
-
-/* ltt_write_event_header
- *
- * Writes the event header to the pointer.
- *
- * @channel : pointer to the channel structure
- * @ptr : buffer pointer
- * @fID : facility ID
- * @eID : event ID
- * @event_size : size of the event, excluding the event header.
- * @tsc : time stamp counter.
- */
-static inline char *__attribute__((no_instrument_function))
- ltt_write_event_header(
- struct ltt_trace_info *trace, struct ltt_buf *buf,
- void *ptr, uint8_t fID, uint32_t eID, size_t event_size,
- uint64_t tsc)
-{
- size_t after_hdr_pad;
- struct ltt_event_header_nohb *nohb;
-
- event_size = min(event_size, 0xFFFFU);
- nohb = (struct ltt_event_header_nohb *)(ptr);
- nohb->timestamp = (uint64_t)tsc;
- nohb->facility_id = fID;
- nohb->event_id = eID;
- nohb->event_size = (uint16_t)event_size;
- after_hdr_pad = ltt_align(sizeof(*nohb), sizeof(void*));
- return ptr + sizeof(*nohb) + after_hdr_pad;
-}
-
-
-
-static inline uint64_t __attribute__((no_instrument_function))
-ltt_get_timestamp()
-{
- return get_cycles();
-}
-
-static inline unsigned int __attribute__((no_instrument_function))
-ltt_subbuf_header_len(struct ltt_buf *buf)
-{
- return sizeof(struct ltt_block_start_header);
-}
-
-
-
-static inline void __attribute__((no_instrument_function))
-ltt_write_trace_header(struct ltt_trace_header *header)
-{
- header->magic_number = LTT_TRACER_MAGIC_NUMBER;
- header->major_version = LTT_TRACER_VERSION_MAJOR;
- header->minor_version = LTT_TRACER_VERSION_MINOR;
- header->float_word_order = 0; //FIXME
- header->arch_type = 0; //FIXME LTT_ARCH_TYPE;
- header->arch_size = sizeof(void*);
- header->arch_variant = 0; //FIXME LTT_ARCH_VARIANT;
- header->flight_recorder = 0;
- header->has_heartbeat = 0;
- header->tsc_lsb_truncate = 0;
- header->tscbits = 0;
-
-#ifndef LTT_PACK
- header->has_alignment = sizeof(void*);
-#else
- header->has_alignment = 0;
-#endif
-
- //FIXME
- header->freq_scale = 0;
- header->start_freq = 0;
- header->start_tsc = 0;
- header->start_monotonic = 0;
- header->start_time_sec = 0;
- header->start_time_usec = 0;
-}
-
-
-static inline void __attribute__((no_instrument_function))
-ltt_buffer_begin_callback(struct ltt_buf *buf,
- uint64_t tsc, unsigned int subbuf_idx)
-{
- struct ltt_block_start_header *header =
- (struct ltt_block_start_header*)
- (buf->start + (subbuf_idx*buf->subbuf_size));
-
- header->begin.cycle_count = tsc;
- header->begin.freq = 0; //ltt_frequency();
-
- header->lost_size = 0xFFFFFFFF; // for debugging...
-
- header->buf_size = buf->subbuf_size;
-
- ltt_write_trace_header(&header->trace);
-
-}
-
-
-
-static inline void __attribute__((no_instrument_function))
-ltt_buffer_end_callback(struct ltt_buf *buf,
- uint64_t tsc, unsigned int offset, unsigned int subbuf_idx)
-{
- struct ltt_block_start_header *header =
- (struct ltt_block_start_header*)
- (buf->start + (subbuf_idx*buf->subbuf_size));
- /* offset is assumed to never be 0 here : never deliver a completely
- * empty subbuffer. */
- /* The lost size is between 0 and subbuf_size-1 */
- header->lost_size = SUBBUF_OFFSET((buf->subbuf_size - offset),
- buf);
- header->end.cycle_count = tsc;
- header->end.freq = 0; //ltt_frequency();
-}
-
-
-static inline void __attribute__((no_instrument_function))
-ltt_deliver_callback(struct ltt_buf *buf,
- unsigned subbuf_idx,
- void *subbuf)
-{
- ltt_usertrace_fast_buffer_switch();
-}
-
-
-/* ltt_reserve_slot
- *
- * Atomic slot reservation in a LTTng buffer. It will take care of
- * sub-buffer switching.
- *
- * Parameters:
- *
- * @trace : the trace structure to log to.
- * @buf : the buffer to reserve space into.
- * @data_size : size of the variable length data to log.
- * @slot_size : pointer to total size of the slot (out)
- * @tsc : pointer to the tsc at the slot reservation (out)
- * @before_hdr_pad : dynamic padding before the event header.
- * @after_hdr_pad : dynamic padding after the event header.
- *
- * Return : NULL if not enough space, else returns the pointer
- * to the beginning of the reserved slot. */
-static inline void * __attribute__((no_instrument_function)) ltt_reserve_slot(
- struct ltt_trace_info *trace,
- struct ltt_buf *ltt_buf,
- unsigned int data_size,
- size_t *slot_size,
- uint64_t *tsc)
-{
- int offset_begin, offset_end, offset_old;
- //int has_switch;
- int begin_switch, end_switch_current, end_switch_old;
- int reserve_commit_diff = 0;
- unsigned int size;
- size_t before_hdr_pad;
- int consumed_old, consumed_new;
- int commit_count, reserve_count;
- int ret;
- sigset_t oldset, set;
-
- do {
- offset_old = atomic_read(<t_buf->offset);
- offset_begin = offset_old;
- //has_switch = 0;
- begin_switch = 0;
- end_switch_current = 0;
- end_switch_old = 0;
- *tsc = ltt_get_timestamp();
- if(*tsc == 0) {
- /* Error in getting the timestamp, event lost */
- atomic_inc(<t_buf->events_lost);
- return NULL;
- }
-
- if(SUBBUF_OFFSET(offset_begin, ltt_buf) == 0) {
- begin_switch = 1; /* For offset_begin */
- } else {
- size = ltt_get_header_size(trace, ltt_buf->start + offset_begin,
- data_size, &before_hdr_pad)
- + data_size;
-
- if((SUBBUF_OFFSET(offset_begin, ltt_buf)+size)>ltt_buf->subbuf_size) {
- //has_switch = 1;
- end_switch_old = 1; /* For offset_old */
- begin_switch = 1; /* For offset_begin */
- }
- }
-
- if(begin_switch) {
- if(end_switch_old) {
- offset_begin = SUBBUF_ALIGN(offset_begin, ltt_buf);
- }
- offset_begin = offset_begin + ltt_subbuf_header_len(ltt_buf);
- /* Test new buffer integrity */
- reserve_commit_diff =
- atomic_read(<t_buf->reserve_count[SUBBUF_INDEX(offset_begin,
- ltt_buf)])
- - atomic_read(<t_buf->commit_count[SUBBUF_INDEX(offset_begin,
- ltt_buf)]);
-
- if(reserve_commit_diff == 0) {
- /* Next buffer not corrupted. */
- //if((SUBBUF_TRUNC(offset_begin, ltt_buf)
- // - SUBBUF_TRUNC(atomic_read(<t_buf->consumed), ltt_buf))
- // >= ltt_buf->alloc_size) {
- {
- /* sem_wait is not signal safe. Disable signals around it.
- * Signals are kept disabled to make sure we win the cmpxchg. */
- /* Disable signals */
- ret = sigfillset(&set);
- if(ret) perror("LTT Error in sigfillset\n");
-
- ret = pthread_sigmask(SIG_BLOCK, &set, &oldset);
- if(ret) perror("LTT Error in pthread_sigmask\n");
-
- /* We detect if a signal came between
- * the offset read and signal disabling:
- * if it is the case, then we restart
- * the loop after reenabling signals. It
- * means that it's a signal that has
- * won the buffer switch.*/
- if(offset_old != atomic_read(<t_buf->offset)) {
- ret = pthread_sigmask(SIG_SETMASK, &oldset, NULL);
- if(ret) perror("LTT Error in pthread_sigmask\n");
- continue;
- }
- /* If the offset is still the same, then
- * we can safely proceed to do the
- * buffer switch without being
- * interrupted by a signal. */
- sem_wait(<t_buf->writer_sem);
-
- }
- /* go on with the write */
-
- //} else {
- // /* next buffer not corrupted, we are either in overwrite mode or
- // * the buffer is not full. It's safe to write in this new subbuffer.*/
- //}
- } else {
- /* Next subbuffer corrupted. Force pushing reader even in normal
- * mode. It's safe to write in this new subbuffer. */
- /* No sem_post is required because we fall through without doing a
- * sem_wait. */
- }
- size = ltt_get_header_size(trace, ltt_buf->start + offset_begin,
- data_size, &before_hdr_pad) + data_size;
- if((SUBBUF_OFFSET(offset_begin,ltt_buf)+size)>ltt_buf->subbuf_size) {
- /* Event too big for subbuffers, report error, don't complete
- * the sub-buffer switch. */
- atomic_inc(<t_buf->events_lost);
- if(reserve_commit_diff == 0) {
- ret = pthread_sigmask(SIG_SETMASK, &oldset, NULL);
- if(ret) perror("LTT Error in pthread_sigmask\n");
- }
- return NULL;
- } else {
- /* We just made a successful buffer switch and the event fits in the
- * new subbuffer. Let's write. */
- }
- } else {
- /* Event fits in the current buffer and we are not on a switch boundary.
- * It's safe to write */
- }
- offset_end = offset_begin + size;
-
- if((SUBBUF_OFFSET(offset_end, ltt_buf)) == 0) {
- /* The offset_end will fall at the very beginning of the next subbuffer.
- */
- end_switch_current = 1; /* For offset_begin */
- }
-
- } while(atomic_cmpxchg(<t_buf->offset, offset_old, offset_end)
- != offset_old);
-
- /* Push the reader if necessary */
- do {
- consumed_old = atomic_read(<t_buf->consumed);
- /* If buffer is in overwrite mode, push the reader consumed count if
- the write position has reached it and we are not at the first
- iteration (don't push the reader farther than the writer).
- This operation can be done concurrently by many writers in the
- same buffer, the writer being at the fartest write position sub-buffer
- index in the buffer being the one which will win this loop. */
- /* If the buffer is not in overwrite mode, pushing the reader only
- happen if a sub-buffer is corrupted */
- if((SUBBUF_TRUNC(offset_end-1, ltt_buf)
- - SUBBUF_TRUNC(consumed_old, ltt_buf))
- >= ltt_buf->alloc_size)
- consumed_new = SUBBUF_ALIGN(consumed_old, ltt_buf);
- else {
- consumed_new = consumed_old;
- break;
- }
- } while(atomic_cmpxchg(<t_buf->consumed, consumed_old, consumed_new)
- != consumed_old);
-
- if(consumed_old != consumed_new) {
- /* Reader pushed : we are the winner of the push, we can therefore
- reequilibrate reserve and commit. Atomic increment of the commit
- count permits other writers to play around with this variable
- before us. We keep track of corrupted_subbuffers even in overwrite mode :
- we never want to write over a non completely committed sub-buffer :
- possible causes : the buffer size is too low compared to the unordered
- data input, or there is a writer who died between the reserve and the
- commit. */
- if(reserve_commit_diff) {
- /* We have to alter the sub-buffer commit count : a sub-buffer is
- corrupted. We do not deliver it. */
- atomic_add(reserve_commit_diff,
- <t_buf->commit_count[SUBBUF_INDEX(offset_begin, ltt_buf)]);
- atomic_inc(<t_buf->corrupted_subbuffers);
- }
- }
-
-
- if(end_switch_old) {
- /* old subbuffer */
- /* Concurrency safe because we are the last and only thread to alter this
- sub-buffer. As long as it is not delivered and read, no other thread can
- alter the offset, alter the reserve_count or call the
- client_buffer_end_callback on this sub-buffer.
- The only remaining threads could be the ones with pending commits. They
- will have to do the deliver themself.
- Not concurrency safe in overwrite mode. We detect corrupted subbuffers
- with commit and reserve counts. We keep a corrupted sub-buffers count
- and push the readers across these sub-buffers.
- Not concurrency safe if a writer is stalled in a subbuffer and
- another writer switches in, finding out it's corrupted. The result will
- be than the old (uncommited) subbuffer will be declared corrupted, and
- that the new subbuffer will be declared corrupted too because of the
- commit count adjustment.
- Note : offset_old should never be 0 here.*/
- ltt_buffer_end_callback(ltt_buf, *tsc, offset_old,
- SUBBUF_INDEX((offset_old-1), ltt_buf));
- /* Setting this reserve_count will allow the sub-buffer to be delivered by
- the last committer. */
- reserve_count =
- atomic_add_return((SUBBUF_OFFSET((offset_old-1), ltt_buf)+1),
- <t_buf->reserve_count[SUBBUF_INDEX((offset_old-1), ltt_buf)]);
- if(reserve_count
- == atomic_read(<t_buf->commit_count[SUBBUF_INDEX((offset_old-1),
- ltt_buf)])) {
- ltt_deliver_callback(ltt_buf, SUBBUF_INDEX((offset_old-1), ltt_buf),
- NULL);
- }
- }
-
- if(begin_switch) {
- /* Enable signals : this is what guaranteed that same reserve which did the
- * sem_wait does in fact win the cmpxchg for the offset. We only call
- * these system calls on buffer boundaries because of their performance
- * cost. */
- if(reserve_commit_diff == 0) {
- ret = pthread_sigmask(SIG_SETMASK, &oldset, NULL);
- if(ret) perror("LTT Error in pthread_sigmask\n");
- }
- /* New sub-buffer */
- /* This code can be executed unordered : writers may already have written
- to the sub-buffer before this code gets executed, caution. */
- /* The commit makes sure that this code is executed before the deliver
- of this sub-buffer */
- ltt_buffer_begin_callback(ltt_buf, *tsc, SUBBUF_INDEX(offset_begin, ltt_buf));
- commit_count = atomic_add_return(ltt_subbuf_header_len(ltt_buf),
- <t_buf->commit_count[SUBBUF_INDEX(offset_begin, ltt_buf)]);
- /* Check if the written buffer has to be delivered */
- if(commit_count
- == atomic_read(<t_buf->reserve_count[SUBBUF_INDEX(offset_begin,
- ltt_buf)])) {
- ltt_deliver_callback(ltt_buf, SUBBUF_INDEX(offset_begin, ltt_buf), NULL);
- }
- }
-
- if(end_switch_current) {
- /* current subbuffer */
- /* Concurrency safe because we are the last and only thread to alter this
- sub-buffer. As long as it is not delivered and read, no other thread can
- alter the offset, alter the reserve_count or call the
- client_buffer_end_callback on this sub-buffer.
- The only remaining threads could be the ones with pending commits. They
- will have to do the deliver themself.
- Not concurrency safe in overwrite mode. We detect corrupted subbuffers
- with commit and reserve counts. We keep a corrupted sub-buffers count
- and push the readers across these sub-buffers.
- Not concurrency safe if a writer is stalled in a subbuffer and
- another writer switches in, finding out it's corrupted. The result will
- be than the old (uncommited) subbuffer will be declared corrupted, and
- that the new subbuffer will be declared corrupted too because of the
- commit count adjustment. */
- ltt_buffer_end_callback(ltt_buf, *tsc, offset_end,
- SUBBUF_INDEX((offset_end-1), ltt_buf));
- /* Setting this reserve_count will allow the sub-buffer to be delivered by
- the last committer. */
- reserve_count =
- atomic_add_return((SUBBUF_OFFSET((offset_end-1), ltt_buf)+1),
- <t_buf->reserve_count[SUBBUF_INDEX((offset_end-1), ltt_buf)]);
- if(reserve_count
- == atomic_read(<t_buf->commit_count[SUBBUF_INDEX((offset_end-1),
- ltt_buf)])) {
- ltt_deliver_callback(ltt_buf, SUBBUF_INDEX((offset_end-1), ltt_buf), NULL);
- }
- }
-
- *slot_size = size;
-
- //BUG_ON(*slot_size != (data_size + *before_hdr_pad + *after_hdr_pad + *header_size));
- //BUG_ON(*slot_size != (offset_end - offset_begin));
-
- return ltt_buf->start + BUFFER_OFFSET(offset_begin, ltt_buf) + before_hdr_pad;
-}
-
-
-/* ltt_commit_slot
- *
- * Atomic unordered slot commit. Increments the commit count in the
- * specified sub-buffer, and delivers it if necessary.
- *
- * Parameters:
- *
- * @buf : the buffer to commit to.
- * @reserved : address of the end of the event header.
- * @slot_size : size of the reserved slot.
- *
- */
-static inline void __attribute__((no_instrument_function)) ltt_commit_slot(
- struct ltt_buf *ltt_buf,
- void *reserved,
- unsigned int slot_size)
-{
- unsigned int offset_end = reserved - ltt_buf->start;
- int commit_count;
-
- commit_count = atomic_add_return(slot_size,
- <t_buf->commit_count[SUBBUF_INDEX(offset_end-1,
- ltt_buf)]);
-
- /* Check if all commits have been done */
- if(commit_count ==
- atomic_read(<t_buf->reserve_count[SUBBUF_INDEX(offset_end-1, ltt_buf)])) {
- ltt_deliver_callback(ltt_buf, SUBBUF_INDEX(offset_end-1, ltt_buf), NULL);
- }
-}
-
-#ifdef __cplusplus
-} /* end of extern "C" */
-#endif
-
-#endif //LTT_TRACE_FAST
-#endif //LTT_TRACE
-#endif //_LTT_USERTRACE_FAST_H
+++ /dev/null
-/*
- * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
- */
-#ifndef __LTT_USERTRACE_PPC_H
-#define __LTT_USERTRACE_PPC_H
-
-#ifdef __powerpc64__
-#include <ltt/atomic-ppc64.h>
-#include <ltt/system-ppc64.h>
-#else
-#include <ltt/ppc_asm-ppc.h>
-#include <ltt/atomic-ppc.h>
-#include <ltt/system-ppc.h>
-#include <ltt/timex-ppc.h>
-#endif
-
-
-#endif /* __LTT_USERTRACE_PPC_H */
+++ /dev/null
-/*****************************************************************************
- * ltt-usertrace.h
- *
- * LTT userspace tracing header
- *
- * Mathieu Desnoyers, March 2006
- */
-
-#ifndef _LTT_USERTRACE_H
-#define _LTT_USERTRACE_H
-
-#include <errno.h>
-#include <syscall.h>
-#include <string.h>
-#include <stdint.h>
-#include <sys/types.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-#define inline inline __attribute__((always_inline))
-
-#if defined(__powerpc__) || defined(__powerpc64__)
-#ifdef __powerpc64__
-#include <ltt/atomic-ppc64.h>
-#include <ltt/system-ppc64.h>
-#include <asm/timex.h>
-#else
-#include <ltt/ppc_asm-ppc.h>
-#include <ltt/atomic-ppc.h>
-#include <ltt/system-ppc.h>
-#include <ltt/timex-ppc.h>
-#endif
-#elif defined(__x86_64__)
-#include <ltt/kernelutils-x86_64.h>
-#elif defined(__i386__)
-#include <ltt/kernelutils-i386.h>
-#elif defined(__arm__)
-#include <ltt/kernelutils-arm.h>
-#elif defined(__SH4__)
-#include <ltt/kernelutils-sh.h>
-#else
-#error "Unsupported architecture"
-#endif
-
-#ifndef min
-#define min(a,b) ((a)<(b)?(a):(b))
-#endif
-
-#ifdef i386
-#define __NR_ltt_trace_generic 328
-#define __NR_ltt_register_generic 329
-#undef NR_syscalls
-#define NR_syscalls 330
-#endif
-
-#ifdef __x86_64__
-#define __NR_ltt_trace_generic 286
-#define __NR_ltt_register_generic 287
-#undef NR_syscalls
-#define NR_syscalls 288
-#endif
-
-#ifdef __powerpc__
-#define __NR_ltt_trace_generic 309
-#define __NR_ltt_register_generic 310
-#undef NR_syscalls
-#define NR_syscalls 311
-#endif
-
-#ifdef __powerpc64__
-#define __NR_ltt_trace_generic 309
-#define __NR_ltt_register_generic 310
-#undef NR_syscalls
-#define NR_syscalls 311
-#endif
-
-#ifdef __arm__
-#define __NR_ltt_trace_generic 352
-#define __NR_ltt_register_generic 353
-#undef NR_syscalls
-#define NR_syscalls 354
-#endif
-
-//FIXME : setup for MIPS
-
-#ifndef _LIBC
-// Put in bits/syscall.h
-#define SYS_ltt_trace_generic __NR_ltt_trace_generic
-#define SYS_ltt_register_generic __NR_ltt_register_generic
-#endif
-
-#define FACNAME_LEN 32
-
-/* LTT userspace tracing is non blocking by default when buffers are full */
-#ifndef LTT_BLOCKING
-#define LTT_BLOCKING 0
-#endif //LTT_BLOCKING
-
-typedef unsigned int ltt_facility_t;
-
-struct user_facility_info {
- char name[FACNAME_LEN];
- uint32_t num_events;
- uint32_t alignment;
- uint32_t checksum;
- uint32_t int_size;
- uint32_t long_size;
- uint32_t pointer_size;
- uint32_t size_t_size;
-};
-#if 0
-static inline __attribute__((no_instrument_function))
-_syscall5(int, ltt_trace_generic, unsigned int, facility_id,
- unsigned int, event_id, void *, data, size_t, data_size, int, blocking)
-static inline __attribute__((no_instrument_function))
-_syscall2(int, ltt_register_generic, unsigned int *, facility_id,
- const struct user_facility_info *, info)
-#endif //0
-
-#define ltt_register_generic(...) syscall(__NR_ltt_register_generic, __VA_ARGS__)
-#define ltt_trace_generic(...) syscall(__NR_ltt_trace_generic, __VA_ARGS__)
-
-static inline unsigned int __attribute__((no_instrument_function))
- ltt_align(size_t align_drift, size_t size_of_type);
-
-#ifndef LTT_PACK
-/* Calculate the offset needed to align the type */
-static inline unsigned int
- ltt_align(size_t align_drift, size_t size_of_type)
-{
- size_t alignment = min(sizeof(void*), size_of_type);
-
- return ((alignment - align_drift) & (alignment-1));
-}
-#define LTT_ALIGN
-#else
-static inline unsigned int ltt_align(size_t align_drift, size_t size_of_type)
-{
- return 0;
-}
-#define LTT_ALIGN __attribute__((packed))
-#endif //LTT_PACK
-
-#ifdef __cplusplus
-} /* end of extern "C" */
-#endif
-
-#ifdef LTT_TRACE_FAST
-#include <ltt/ltt-usertrace-fast.h>
-#endif //LTT_TRACE_FAST
-
-#endif //_LTT_USERTRACE_H
+++ /dev/null
-/*
- * include/asm-ppc/ppc_asm.h
- *
- * Definitions used by various bits of low-level assembly code on PowerPC.
- *
- * Copyright (C) 1995-1999 Gary Thomas, Paul Mackerras, Cort Dougan.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _PPC_ASM_PPC_H
-#define _PPC_ASM_PPC_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/*
- * Macros for storing registers into and loading registers from
- * exception frames.
- */
-#define SAVE_GPR(n, base) stw n,GPR0+4*(n)(base)
-#define SAVE_2GPRS(n, base) SAVE_GPR(n, base); SAVE_GPR(n+1, base)
-#define SAVE_4GPRS(n, base) SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base)
-#define SAVE_8GPRS(n, base) SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base)
-#define SAVE_10GPRS(n, base) SAVE_8GPRS(n, base); SAVE_2GPRS(n+8, base)
-#define REST_GPR(n, base) lwz n,GPR0+4*(n)(base)
-#define REST_2GPRS(n, base) REST_GPR(n, base); REST_GPR(n+1, base)
-#define REST_4GPRS(n, base) REST_2GPRS(n, base); REST_2GPRS(n+2, base)
-#define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base)
-#define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base)
-
-#define SAVE_NVGPRS(base) SAVE_GPR(13, base); SAVE_8GPRS(14, base); \
- SAVE_10GPRS(22, base)
-#define REST_NVGPRS(base) REST_GPR(13, base); REST_8GPRS(14, base); \
- REST_10GPRS(22, base)
-
-#define SAVE_FPR(n, base) stfd n,THREAD_FPR0+8*(n)(base)
-#define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base)
-#define SAVE_4FPRS(n, base) SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base)
-#define SAVE_8FPRS(n, base) SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base)
-#define SAVE_16FPRS(n, base) SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base)
-#define SAVE_32FPRS(n, base) SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base)
-#define REST_FPR(n, base) lfd n,THREAD_FPR0+8*(n)(base)
-#define REST_2FPRS(n, base) REST_FPR(n, base); REST_FPR(n+1, base)
-#define REST_4FPRS(n, base) REST_2FPRS(n, base); REST_2FPRS(n+2, base)
-#define REST_8FPRS(n, base) REST_4FPRS(n, base); REST_4FPRS(n+4, base)
-#define REST_16FPRS(n, base) REST_8FPRS(n, base); REST_8FPRS(n+8, base)
-#define REST_32FPRS(n, base) REST_16FPRS(n, base); REST_16FPRS(n+16, base)
-
-#define SAVE_VR(n,b,base) li b,THREAD_VR0+(16*(n)); stvx n,b,base
-#define SAVE_2VR(n,b,base) SAVE_VR(n,b,base); SAVE_VR(n+1,b,base)
-#define SAVE_4VR(n,b,base) SAVE_2VR(n,b,base); SAVE_2VR(n+2,b,base)
-#define SAVE_8VR(n,b,base) SAVE_4VR(n,b,base); SAVE_4VR(n+4,b,base)
-#define SAVE_16VR(n,b,base) SAVE_8VR(n,b,base); SAVE_8VR(n+8,b,base)
-#define SAVE_32VR(n,b,base) SAVE_16VR(n,b,base); SAVE_16VR(n+16,b,base)
-#define REST_VR(n,b,base) li b,THREAD_VR0+(16*(n)); lvx n,b,base
-#define REST_2VR(n,b,base) REST_VR(n,b,base); REST_VR(n+1,b,base)
-#define REST_4VR(n,b,base) REST_2VR(n,b,base); REST_2VR(n+2,b,base)
-#define REST_8VR(n,b,base) REST_4VR(n,b,base); REST_4VR(n+4,b,base)
-#define REST_16VR(n,b,base) REST_8VR(n,b,base); REST_8VR(n+8,b,base)
-#define REST_32VR(n,b,base) REST_16VR(n,b,base); REST_16VR(n+16,b,base)
-
-#define SAVE_EVR(n,s,base) evmergehi s,s,n; stw s,THREAD_EVR0+4*(n)(base)
-#define SAVE_2EVR(n,s,base) SAVE_EVR(n,s,base); SAVE_EVR(n+1,s,base)
-#define SAVE_4EVR(n,s,base) SAVE_2EVR(n,s,base); SAVE_2EVR(n+2,s,base)
-#define SAVE_8EVR(n,s,base) SAVE_4EVR(n,s,base); SAVE_4EVR(n+4,s,base)
-#define SAVE_16EVR(n,s,base) SAVE_8EVR(n,s,base); SAVE_8EVR(n+8,s,base)
-#define SAVE_32EVR(n,s,base) SAVE_16EVR(n,s,base); SAVE_16EVR(n+16,s,base)
-
-#define REST_EVR(n,s,base) lwz s,THREAD_EVR0+4*(n)(base); evmergelo n,s,n
-#define REST_2EVR(n,s,base) REST_EVR(n,s,base); REST_EVR(n+1,s,base)
-#define REST_4EVR(n,s,base) REST_2EVR(n,s,base); REST_2EVR(n+2,s,base)
-#define REST_8EVR(n,s,base) REST_4EVR(n,s,base); REST_4EVR(n+4,s,base)
-#define REST_16EVR(n,s,base) REST_8EVR(n,s,base); REST_8EVR(n+8,s,base)
-#define REST_32EVR(n,s,base) REST_16EVR(n,s,base); REST_16EVR(n+16,s,base)
-
-#ifdef CONFIG_PPC601_SYNC_FIX
-#define SYNC \
-BEGIN_FTR_SECTION \
- sync; \
- isync; \
-END_FTR_SECTION_IFSET(CPU_FTR_601)
-#define SYNC_601 \
-BEGIN_FTR_SECTION \
- sync; \
-END_FTR_SECTION_IFSET(CPU_FTR_601)
-#define ISYNC_601 \
-BEGIN_FTR_SECTION \
- isync; \
-END_FTR_SECTION_IFSET(CPU_FTR_601)
-#else
-#define SYNC
-#define SYNC_601
-#define ISYNC_601
-#endif
-
-#ifndef CONFIG_SMP
-#define TLBSYNC
-#else /* CONFIG_SMP */
-/* tlbsync is not implemented on 601 */
-#define TLBSYNC \
-BEGIN_FTR_SECTION \
- tlbsync; \
- sync; \
-END_FTR_SECTION_IFCLR(CPU_FTR_601)
-#endif
-
-/*
- * This instruction is not implemented on the PPC 603 or 601; however, on
- * the 403GCX and 405GP tlbia IS defined and tlbie is not.
- * All of these instructions exist in the 8xx, they have magical powers,
- * and they must be used.
- */
-
-#if !defined(CONFIG_4xx) && !defined(CONFIG_8xx)
-#define tlbia \
- li r4,1024; \
- mtctr r4; \
- lis r4,KERNELBASE@h; \
-0: tlbie r4; \
- addi r4,r4,0x1000; \
- bdnz 0b
-#endif
-
-#ifdef CONFIG_BOOKE
-#define tophys(rd,rs) \
- addis rd,rs,0
-
-#define tovirt(rd,rs) \
- addis rd,rs,0
-
-#else /* CONFIG_BOOKE */
-/*
- * On APUS (Amiga PowerPC cpu upgrade board), we don't know the
- * physical base address of RAM at compile time.
- */
-#define tophys(rd,rs) \
-0: addis rd,rs,-KERNELBASE@h; \
- .section ".vtop_fixup","aw"; \
- .align 1; \
- .long 0b; \
- .previous
-
-#define tovirt(rd,rs) \
-0: addis rd,rs,KERNELBASE@h; \
- .section ".ptov_fixup","aw"; \
- .align 1; \
- .long 0b; \
- .previous
-#endif /* CONFIG_BOOKE */
-
-/*
- * On 64-bit cpus, we use the rfid instruction instead of rfi, but
- * we then have to make sure we preserve the top 32 bits except for
- * the 64-bit mode bit, which we clear.
- */
-#ifdef CONFIG_PPC64BRIDGE
-#define FIX_SRR1(ra, rb) \
- mr rb,ra; \
- mfmsr ra; \
- clrldi ra,ra,1; /* turn off 64-bit mode */ \
- rldimi ra,rb,0,32
-#define RFI .long 0x4c000024 /* rfid instruction */
-#define MTMSRD(r) .long (0x7c000164 + ((r) << 21)) /* mtmsrd */
-#define CLR_TOP32(r) rlwinm (r),(r),0,0,31 /* clear top 32 bits */
-
-#else
-#define FIX_SRR1(ra, rb)
-#ifndef CONFIG_40x
-#define RFI rfi
-#else
-#define RFI rfi; b . /* Prevent prefetch past rfi */
-#endif
-#define MTMSRD(r) mtmsr r
-#define CLR_TOP32(r)
-#endif /* CONFIG_PPC64BRIDGE */
-
-#define RFCI .long 0x4c000066 /* rfci instruction */
-#define RFDI .long 0x4c00004e /* rfdi instruction */
-#define RFMCI .long 0x4c00004c /* rfmci instruction */
-
-#ifdef CONFIG_IBM405_ERR77
-#define PPC405_ERR77(ra,rb) dcbt ra, rb;
-#define PPC405_ERR77_SYNC sync;
-#else
-#define PPC405_ERR77(ra,rb)
-#define PPC405_ERR77_SYNC
-#endif
-
-/* The boring bits... */
-
-/* Condition Register Bit Fields */
-
-#define cr0 0
-#define cr1 1
-#define cr2 2
-#define cr3 3
-#define cr4 4
-#define cr5 5
-#define cr6 6
-#define cr7 7
-
-
-/* General Purpose Registers (GPRs) */
-
-#define r0 0
-#define r1 1
-#define r2 2
-#define r3 3
-#define r4 4
-#define r5 5
-#define r6 6
-#define r7 7
-#define r8 8
-#define r9 9
-#define r10 10
-#define r11 11
-#define r12 12
-#define r13 13
-#define r14 14
-#define r15 15
-#define r16 16
-#define r17 17
-#define r18 18
-#define r19 19
-#define r20 20
-#define r21 21
-#define r22 22
-#define r23 23
-#define r24 24
-#define r25 25
-#define r26 26
-#define r27 27
-#define r28 28
-#define r29 29
-#define r30 30
-#define r31 31
-
-
-/* Floating Point Registers (FPRs) */
-
-#define fr0 0
-#define fr1 1
-#define fr2 2
-#define fr3 3
-#define fr4 4
-#define fr5 5
-#define fr6 6
-#define fr7 7
-#define fr8 8
-#define fr9 9
-#define fr10 10
-#define fr11 11
-#define fr12 12
-#define fr13 13
-#define fr14 14
-#define fr15 15
-#define fr16 16
-#define fr17 17
-#define fr18 18
-#define fr19 19
-#define fr20 20
-#define fr21 21
-#define fr22 22
-#define fr23 23
-#define fr24 24
-#define fr25 25
-#define fr26 26
-#define fr27 27
-#define fr28 28
-#define fr29 29
-#define fr30 30
-#define fr31 31
-
-#define vr0 0
-#define vr1 1
-#define vr2 2
-#define vr3 3
-#define vr4 4
-#define vr5 5
-#define vr6 6
-#define vr7 7
-#define vr8 8
-#define vr9 9
-#define vr10 10
-#define vr11 11
-#define vr12 12
-#define vr13 13
-#define vr14 14
-#define vr15 15
-#define vr16 16
-#define vr17 17
-#define vr18 18
-#define vr19 19
-#define vr20 20
-#define vr21 21
-#define vr22 22
-#define vr23 23
-#define vr24 24
-#define vr25 25
-#define vr26 26
-#define vr27 27
-#define vr28 28
-#define vr29 29
-#define vr30 30
-#define vr31 31
-
-#define evr0 0
-#define evr1 1
-#define evr2 2
-#define evr3 3
-#define evr4 4
-#define evr5 5
-#define evr6 6
-#define evr7 7
-#define evr8 8
-#define evr9 9
-#define evr10 10
-#define evr11 11
-#define evr12 12
-#define evr13 13
-#define evr14 14
-#define evr15 15
-#define evr16 16
-#define evr17 17
-#define evr18 18
-#define evr19 19
-#define evr20 20
-#define evr21 21
-#define evr22 22
-#define evr23 23
-#define evr24 24
-#define evr25 25
-#define evr26 26
-#define evr27 27
-#define evr28 28
-#define evr29 29
-#define evr30 30
-#define evr31 31
-
-/* some stab codes */
-#define N_FUN 36
-#define N_RSYM 64
-#define N_SLINE 68
-#define N_SO 100
-
-#ifdef __cplusplus
-} /* end of extern "C" */
-#endif
-
-#endif //_PPC_ASM_PPC_H
+++ /dev/null
-/*
- * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
- */
-#ifndef __PPC_SYSTEM_H
-#define __PPC_SYSTEM_H
-
-#include <asm/atomic.h>
-#include <asm/hw_irq.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/*
- * Memory barrier.
- * The sync instruction guarantees that all memory accesses initiated
- * by this processor have been performed (with respect to all other
- * mechanisms that access memory). The eieio instruction is a barrier
- * providing an ordering (separately) for (a) cacheable stores and (b)
- * loads and stores to non-cacheable memory (e.g. I/O devices).
- *
- * mb() prevents loads and stores being reordered across this point.
- * rmb() prevents loads being reordered across this point.
- * wmb() prevents stores being reordered across this point.
- * read_barrier_depends() prevents data-dependent loads being reordered
- * across this point (nop on PPC).
- *
- * We can use the eieio instruction for wmb, but since it doesn't
- * give any ordering guarantees about loads, we have to use the
- * stronger but slower sync instruction for mb and rmb.
- */
-#define mb() __asm__ __volatile__ ("sync" : : : "memory")
-#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
-#define wmb() __asm__ __volatile__ ("eieio" : : : "memory")
-#define read_barrier_depends() do { } while(0)
-
-#define set_mb(var, value) do { var = value; mb(); } while (0)
-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
-
-#ifdef CONFIG_SMP
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
-#define smp_read_barrier_depends() read_barrier_depends()
-#else
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define smp_read_barrier_depends() do { } while(0)
-#endif /* CONFIG_SMP */
-
-static inline unsigned long
-xchg_u32(volatile void *p, unsigned long val)
-{
- unsigned long prev;
-
- __asm__ __volatile__ ("\n\
-1: lwarx %0,0,%2 \n"
- PPC405_ERR77(0,%2)
-" stwcx. %3,0,%2 \n\
- bne- 1b"
- : "=&r" (prev), "=m" (*(volatile unsigned long *)p)
- : "r" (p), "r" (val), "m" (*(volatile unsigned long *)p)
- : "cc", "memory");
-
- return prev;
-}
-
-/*
- * This function doesn't exist, so you'll get a linker error
- * if something tries to do an invalid xchg().
- */
-extern void __xchg_called_with_bad_pointer(void);
-
-#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-#define tas(ptr) (xchg((ptr),1))
-
-static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
-{
- switch (size) {
- case 4:
- return (unsigned long) xchg_u32(ptr, x);
-#if 0 /* xchg_u64 doesn't exist on 32-bit PPC */
- case 8:
- return (unsigned long) xchg_u64(ptr, x);
-#endif /* 0 */
- }
- __xchg_called_with_bad_pointer();
- return x;
-
-
-}
-
-extern inline void * xchg_ptr(void * m, void * val)
-{
- return (void *) xchg_u32(m, (unsigned long) val);
-}
-
-
-#define __HAVE_ARCH_CMPXCHG 1
-
-static inline unsigned long
-__cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new)
-{
- unsigned int prev;
-
- __asm__ __volatile__ ("\n\
-1: lwarx %0,0,%2 \n\
- cmpw 0,%0,%3 \n\
- bne 2f \n"
- PPC405_ERR77(0,%2)
-" stwcx. %4,0,%2 \n\
- bne- 1b\n"
-#if 0 //only using one CPU at a time (LTT) // def CONFIG_SMP
-" sync\n"
-#endif /* CONFIG_SMP */
-"2:"
- : "=&r" (prev), "=m" (*p)
- : "r" (p), "r" (old), "r" (new), "m" (*p)
- : "cc", "memory");
-
- return prev;
-}
-
-/* This function doesn't exist, so you'll get a linker error
- if something tries to do an invalid cmpxchg(). */
-extern void __cmpxchg_called_with_bad_pointer(void);
-
-static inline unsigned long
-__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
-{
- switch (size) {
- case 4:
- return __cmpxchg_u32(ptr, old, new);
-#if 0 /* we don't have __cmpxchg_u64 on 32-bit PPC */
- case 8:
- return __cmpxchg_u64(ptr, old, new);
-#endif /* 0 */
- }
- __cmpxchg_called_with_bad_pointer();
- return old;
-}
-
-#define cmpxchg(ptr,o,n) \
- ({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
- (unsigned long)_n_, sizeof(*(ptr))); \
- })
-
-#define arch_align_stack(x) (x)
-
-#ifdef __cplusplus
-} /* end of extern "C" */
-#endif
-
-#endif /* __PPC_SYSTEM_H */
+++ /dev/null
-#ifndef __PPC64_SYSTEM_H
-#define __PPC64_SYSTEM_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-//#include <linux/config.h>
-//#include <linux/compiler.h>
-#include <asm/page.h>
-#include <asm/processor.h>
-#include <asm/hw_irq.h>
-#include <asm/memory.h>
-
-/*
- * Memory barrier.
- * The sync instruction guarantees that all memory accesses initiated
- * by this processor have been performed (with respect to all other
- * mechanisms that access memory). The eieio instruction is a barrier
- * providing an ordering (separately) for (a) cacheable stores and (b)
- * loads and stores to non-cacheable memory (e.g. I/O devices).
- *
- * mb() prevents loads and stores being reordered across this point.
- * rmb() prevents loads being reordered across this point.
- * wmb() prevents stores being reordered across this point.
- * read_barrier_depends() prevents data-dependent loads being reordered
- * across this point (nop on PPC).
- *
- * We have to use the sync instructions for mb(), since lwsync doesn't
- * order loads with respect to previous stores. Lwsync is fine for
- * rmb(), though.
- * For wmb(), we use sync since wmb is used in drivers to order
- * stores to system memory with respect to writes to the device.
- * However, smp_wmb() can be a lighter-weight eieio barrier on
- * SMP since it is only used to order updates to system memory.
- */
-#define mb() __asm__ __volatile__ ("sync" : : : "memory")
-#define rmb() __asm__ __volatile__ ("lwsync" : : : "memory")
-#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
-#define read_barrier_depends() do { } while(0)
-
-#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
-#define set_wmb(var, value) do { var = value; smp_wmb(); } while (0)
-
-#ifdef CONFIG_SMP
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory")
-#define smp_read_barrier_depends() read_barrier_depends()
-#else
-#define smp_mb() __asm__ __volatile__("": : :"memory")
-#define smp_rmb() __asm__ __volatile__("": : :"memory")
-#define smp_wmb() __asm__ __volatile__("": : :"memory")
-#define smp_read_barrier_depends() do { } while(0)
-#endif /* CONFIG_SMP */
-
-/*
- * Atomic exchange
- *
- * Changes the memory location '*ptr' to be val and returns
- * the previous value stored there.
- *
- * Inline asm pulled from arch/ppc/kernel/misc.S so ppc64
- * is more like most of the other architectures.
- */
-static inline unsigned long
-__xchg_u32(volatile int *m, unsigned long val)
-{
- unsigned long dummy;
-
- __asm__ __volatile__(
- EIEIO_ON_SMP
-"1: lwarx %0,0,%3 # __xchg_u32\n\
- stwcx. %2,0,%3\n\
-2: bne- 1b"
- ISYNC_ON_SMP
- : "=&r" (dummy), "=m" (*m)
- : "r" (val), "r" (m)
- : "cc", "memory");
-
- return (dummy);
-}
-
-static inline unsigned long
-__xchg_u64(volatile long *m, unsigned long val)
-{
- unsigned long dummy;
-
- __asm__ __volatile__(
- EIEIO_ON_SMP
-"1: ldarx %0,0,%3 # __xchg_u64\n\
- stdcx. %2,0,%3\n\
-2: bne- 1b"
- ISYNC_ON_SMP
- : "=&r" (dummy), "=m" (*m)
- : "r" (val), "r" (m)
- : "cc", "memory");
-
- return (dummy);
-}
-
-/*
- * This function doesn't exist, so you'll get a linker error
- * if something tries to do an invalid xchg().
- */
-extern void __xchg_called_with_bad_pointer(void);
-
-static inline unsigned long
-__xchg(volatile void *ptr, unsigned long x, int size)
-{
- switch (size) {
- case 4:
- return __xchg_u32(ptr, x);
- case 8:
- return __xchg_u64(ptr, x);
- }
- __xchg_called_with_bad_pointer();
- return x;
-}
-
-#define xchg(ptr,x) \
- ({ \
- __typeof__(*(ptr)) _x_ = (x); \
- (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
- })
-
-#define tas(ptr) (xchg((ptr),1))
-
-#define __HAVE_ARCH_CMPXCHG 1
-
-static inline unsigned long
-__cmpxchg_u32(volatile int *p, int old, int new)
-{
- unsigned int prev;
-
- __asm__ __volatile__ (
- EIEIO_ON_SMP
-"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
- cmpw 0,%0,%3\n\
- bne- 2f\n\
- stwcx. %4,0,%2\n\
- bne- 1b"
- ISYNC_ON_SMP
- "\n\
-2:"
- : "=&r" (prev), "=m" (*p)
- : "r" (p), "r" (old), "r" (new), "m" (*p)
- : "cc", "memory");
-
- return prev;
-}
-
-static inline unsigned long
-__cmpxchg_u64(volatile long *p, unsigned long old, unsigned long new)
-{
- unsigned long prev;
-
- __asm__ __volatile__ (
- EIEIO_ON_SMP
-"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
- cmpd 0,%0,%3\n\
- bne- 2f\n\
- stdcx. %4,0,%2\n\
- bne- 1b"
- ISYNC_ON_SMP
- "\n\
-2:"
- : "=&r" (prev), "=m" (*p)
- : "r" (p), "r" (old), "r" (new), "m" (*p)
- : "cc", "memory");
-
- return prev;
-}
-
-/* This function doesn't exist, so you'll get a linker error
- if something tries to do an invalid cmpxchg(). */
-extern void __cmpxchg_called_with_bad_pointer(void);
-
-static inline unsigned long
-__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
-{
- switch (size) {
- case 4:
- return __cmpxchg_u32(ptr, old, new);
- case 8:
- return __cmpxchg_u64(ptr, old, new);
- }
- __cmpxchg_called_with_bad_pointer();
- return old;
-}
-
-#define cmpxchg(ptr,o,n) \
- ({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
- (unsigned long)_n_, sizeof(*(ptr))); \
- })
-
-/*
- * We handle most unaligned accesses in hardware. On the other hand
- * unaligned DMA can be very expensive on some ppc64 IO chips (it does
- * powers of 2 writes until it reaches sufficient alignment).
- *
- * Based on this we disable the IP header alignment in network drivers.
- */
-#define NET_IP_ALIGN 0
-
-#define arch_align_stack(x) (x)
-
-#ifdef __cplusplus
-} /* end of extern "C" */
-#endif
-
-#endif
+++ /dev/null
-#ifndef __TIMEX_PPC_H
-#define __TIMEX_PPC_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define CPU_FTR_601 0x00000100
-
-#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
-
-typedef uint64_t cycles_t;
-
-/* On ppc64 this gets us the whole timebase; on ppc32 just the lower half */
-static inline unsigned long get_tbl(void)
-{
- unsigned long tbl;
-
-//#if defined(CONFIG_403GCX)
-// asm volatile("mfspr %0, 0x3dd" : "=r" (tbl));
-//#else
- asm volatile("mftb %0" : "=r" (tbl));
-//#endif
- return tbl;
-}
-
-static inline unsigned int get_tbu(void)
-{
- unsigned int tbu;
-
-//#if defined(CONFIG_403GCX)
-// asm volatile("mfspr %0, 0x3dc" : "=r" (tbu));
-//#else
- asm volatile("mftbu %0" : "=r" (tbu));
-//#endif
- return tbu;
-}
-
-static inline uint64_t get_tb(void)
-{
- unsigned int tbhi, tblo, tbhi2;
-
- do {
- tbhi = get_tbu();
- tblo = get_tbl();
- tbhi2 = get_tbu();
- } while (tbhi != tbhi2);
-
- return ((uint64_t)tbhi << 32) | tblo;
-}
-
-static inline cycles_t get_cycles(void)
-{
- return get_tb();
-}
-
-#ifdef __cplusplus
-} /* end of extern "C" */
-#endif
-
-#endif //__TIMEX_PPC_H
+++ /dev/null
-
-#include <stdio.h>
-#include <unistd.h>
-
-#define LTT_TRACE
-#define LTT_BLOCKING 1
-#include <ltt/ltt-facility-user_generic.h>
-
-
-int main(int argc, char **argv)
-{
- printf("Will trace the following string : \"Hello world! Have a nice day.\"\n");
- printf("every microsecond.\n");
- printf("Abort with CTRL-C.\n");
- printf("No file is created with this example : it logs through a kernel\n");
- printf("system call. See the LTTng lttctl command to start tracing.\n");
-
- while(1) {
- trace_user_generic_string("Hello world! Have a nice day.");
- usleep(1);
- }
-
- return 0;
-}
-
+++ /dev/null
-
-#include <stdio.h>
-#include <unistd.h>
-
-#define LTT_TRACE
-#define LTT_TRACE_FAST
-#include <ltt/ltt-facility-user_generic.h>
-
-
-int main(int argc, char **argv)
-{
- printf("Will trace the following string : Running fast! in an infinite loop.\n");
- printf("Abort with CTRL-C or it will quickly fill up your disk.\n");
- printf("See the result file in /tmp/ltt-usertrace.\n");
-
- int i;
- while(1) {
- trace_user_generic_string("Running fast!");
- }
-
- return 0;
-}
-
+++ /dev/null
-
-
-#include <stdio.h>
-#include <unistd.h>
-
-
-
-
-void test_function(void)
-{
- printf("we are in a test function\n");
-}
-
-
-int main(int argc, char **argv)
-{
- printf("Abort with CTRL-C.\n");
- printf("See the result file in /tmp/ltt-usertrace.\n");
-
-
- while(1) {
- test_function();
- sleep(1);
- }
-
- return 0;
-}
-
+++ /dev/null
-
-#include <stdio.h>
-#include <unistd.h>
-
-#define LTT_TRACE
-#define LTT_BLOCKING 1
-#include <ltt/ltt-facility-user_generic.h>
-#include <ltt/ltt-facility-custom-user_generic.h>
-// Notice the inclusion of ltt-facility-custom-user_generic.h for the
-// slow_printf support
-
-
-int main(int argc, char **argv)
-{
- printf("Will trace a printf of an incrementing counter.\n");
- printf("Abort with CTRL-C.\n");
- printf("No file is created with this example : it logs through a kernel\n");
- printf("system call. See the LTTng lttctl command to start tracing.\n");
-
- unsigned int count = 0;
-
- while(1) {
- trace_user_generic_slow_printf("in: %s at: %s:%d: Counter value is: %u.",
- __FILE__, __func__, __LINE__, count);
- count++;
- sleep(1);
- }
-
- return 0;
-}
-
+++ /dev/null
-
-#include <stdio.h>
-#include <unistd.h>
-
-#define LTT_TRACE
-#define LTT_BLOCKING 1
-#include <ltt/ltt-facility-user_generic.h>
-
-
-int main(int argc, char **argv)
-{
- printf("Will create a branded thread\n");
- trace_user_generic_thread_brand("Sample_brand");
-
- sleep(2);
-
- return 0;
-}
-
+++ /dev/null
-
-#include <pthread.h>
-#include <stdio.h>
-#include <unistd.h>
-#include <stdlib.h>
-
-#define LTT_TRACE
-#define LTT_TRACE_FAST
-#include <ltt/ltt-facility-user_generic.h>
-
-
-void *thr1(void *arg)
-{
- int i;
- ltt_thread_init(); /* This init is not required : it will be done
- automatically anyways at the first tracing call site */
- printf("thread 1, thread id : %lu, pid %lu\n", pthread_self(), getpid());
-
- for(i=0; i<100000; i++) {
- trace_user_generic_string("Hello world! Have a nice day.");
- }
- pthread_exit((void*)1);
-}
-
-
-/* Example of a _bad_ thread, which still works with the tracing */
-void *thr2(void *arg)
-{
- int i;
- /* See ? no init */
- printf("thread 2, thread id : %lu, pid %lu\n", pthread_self(), getpid());
-
- for(i=0; i<100000; i++) {
- trace_user_generic_string("Hello world! Have a nice day.");
- }
- /* This thread is a bad citizen : returning like this will cause its cancel
- * routines not to be executed. This is still detected by the tracer, but only
- * when the complete process dies. This is not recommended if you create a
- * huge amount of threads */
- return ((void*)2);
-}
-
-
-int main()
-{
- int err;
- pthread_t tid1, tid2;
- void *tret;
-
- printf("Will trace the following string : Hello world! Have a nice day.\n");
- printf("It will stop automatically.\n");
- printf("See the result file in /tmp/ltt-usertrace.\n");
-
- printf("thread main, thread id : %lu, pid %lu\n", pthread_self(), getpid());
- err = pthread_create(&tid1, NULL, thr1, NULL);
- if(err!=0) exit(1);
-
- err = pthread_create(&tid2, NULL, thr2, NULL);
- if(err!=0) exit(1);
-
- err = pthread_join(tid1, &tret);
- if(err!= 0) exit(1);
-
- err = pthread_join(tid2, &tret);
- if(err!= 0) exit(1);
-
- return 0;
-}
+++ /dev/null
-
-#include <pthread.h>
-#include <stdio.h>
-#include <unistd.h>
-#include <stdlib.h>
-
-#define LTT_TRACE
-//this one is a non blocking sample (not #define LTT_BLOCKING 1)
-#include <ltt/ltt-facility-user_generic.h>
-
-
-void *thr1(void *arg)
-{
- printf("thread 1, thread id : %lu, pid %lu\n", pthread_self(), getpid());
-
- while(1) {
- trace_user_generic_string("Hello world! Have a nice day.");
- sleep(2);
- }
- pthread_exit((void*)1);
-}
-
-
-/* Example of a _bad_ thread, which still works with the tracing */
-void *thr2(void *arg)
-{
- printf("thread 2, thread id : %lu, pid %lu\n", pthread_self(), getpid());
- sleep(1);
- while(1) {
- trace_user_generic_string("Hello world! Have a nice day.");
- sleep(2);
- }
- return ((void*)2);
-}
-
-
-int main()
-{
- int err;
- pthread_t tid1, tid2;
- void *tret;
-
- printf("Will trace the following string : Hello world! Have a nice day.\n");
- printf("Press CTRL-C to stop.\n");
- printf("No file is created with this example : it logs through a kernel\n");
- printf("system call. See the LTTng lttctl command to start tracing.\n\n");
-
- printf("thread main, thread id : %lu, pid %lu\n", pthread_self(), getpid());
- err = pthread_create(&tid1, NULL, thr1, NULL);
- if(err!=0) exit(1);
-
- err = pthread_create(&tid2, NULL, thr2, NULL);
- if(err!=0) exit(1);
-
- err = pthread_join(tid1, &tret);
- if(err!= 0) exit(1);
-
- err = pthread_join(tid2, &tret);
- if(err!= 0) exit(1);
-
- return 0;
-}
+++ /dev/null
-
-#include <stdio.h>
-#include <unistd.h>
-
-#define LTT_TRACE
-#define LTT_BLOCKING 1
-#include <ltt/ltt-facility-user_generic.h>
-
-
-int main(int argc, char **argv)
-{
- printf("Will trace the following string : \"Hello world! Have a nice day.\"\n");
- printf("every second.\n");
- printf("Abort with CTRL-C.\n");
- printf("No file is created with this example : it logs through a kernel\n");
- printf("system call. See the LTTng lttctl command to start tracing.\n");
-
- while(1) {
- trace_user_generic_string("Hello world! Have a nice day.");
- sleep(1);
- }
-
- return 0;
-}
-
+++ /dev/null
-#
-# Spec file for LTT Usertrace
-#
-Summary: Linux Trace Toolkit Userspace Tracing Package
-Name: ltt-usertrace
-Version: 0.13
-License: GPL
-Release: 1
-Group: Applications/Development
-Source: http://ltt.polymtl.ca/packages/%{name}-%{version}.tar.gz
-URL: http://ltt.polymtl.ca
-Packager: Martin Bisson <bissonm@discreet.com>
-BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
-
-# Where do we install the libs
-%ifarch x86_64 ppc64 ppc64iseries ia64
-%define libdir /usr/lib64
-%else
-%define libdir /usr/lib
-%endif
-
-
-%description
-This packages makes it possible to do userspace tracing with the Linux
-Trace Toolkit.
-
-%prep
-%setup -q
-
-%build
-make libs
-
-%install
-rm -rf $RPM_BUILD_ROOT
-mkdir -p $RPM_BUILD_ROOT $RPM_BUILD_ROOT/usr/include $RPM_BUILD_ROOT/%{libdir}
-make INCLUDE_DIR=$RPM_BUILD_ROOT/usr/include LIB_DIR=$RPM_BUILD_ROOT/%{libdir} install
-
-%post
-echo "Running ldconfig (might take a while)"
-ldconfig
-
-%postun
-echo "Running ldconfig (might take a while)"
-ldconfig
-
-%files
-/usr/include/ltt
-/usr/include/ltt/atomic-ppc.h
-/usr/include/ltt/atomic-ppc64.h
-/usr/include/ltt/kernelutils-x86_64.h
-/usr/include/ltt/kernelutils-i386.h
-/usr/include/ltt/ltt-facility-custom-user_generic.h
-/usr/include/ltt/ltt-facility-id-user_generic.h
-/usr/include/ltt/ltt-facility-user_generic.h
-/usr/include/ltt/ltt-usertrace-fast.h
-/usr/include/ltt/ltt-usertrace-ppc.h
-/usr/include/ltt/ltt-usertrace.h
-/usr/include/ltt/ppc_asm-ppc.h
-/usr/include/ltt/system-ppc.h
-/usr/include/ltt/system-ppc64.h
-/usr/include/ltt/timex-ppc.h
-%{libdir}/libltt-instrument-functions.a
-%{libdir}/libltt-instrument-functions.so
-%{libdir}/libltt-instrument-functions.so.0
-%{libdir}/libltt-loader-user_generic.a
-%{libdir}/libltt-loader-user_generic.so
-%{libdir}/libltt-loader-user_generic.so.0
-%{libdir}/libltt-usertrace-fast.a
-%{libdir}/libltt-usertrace-fast.so
-%{libdir}/libltt-usertrace-fast.so.0
#obj-m += test-sys_call.o
# obj-m += test-bug.o
obj-m += test-tsc-sync2.o
- obj-m += test-nop-speed.o
+ #obj-m += test-63.o
+ #obj-m += test-nop-speed.o
#obj-m += test-hpet.o
# obj-m += test-prefix-speed-32.o
#obj-m += test-prefix-speed.o