tests/hello.cxx/Makefile
tests/same_line_tracepoint/Makefile
tests/snprintf/Makefile
+ tests/benchmark/Makefile
tests/utils/Makefile
lttng-ust.pc
])
-SUBDIRS = utils hello same_line_tracepoint snprintf
+SUBDIRS = utils hello same_line_tracepoint snprintf benchmark
if CXX_WORKS
SUBDIRS += hello.cxx
+++ /dev/null
-CC=gcc
-CFLAGS=-O3 -Wall
-LFLAGS=-lpthread -lust
-
-all: bench1 bench2
-
-bench1: bench.c
- $(CC) $(CFLAGS) $(LFLAGS) -o bench1 bench.c
-bench2: bench.c
- $(CC) $(CFLAGS) $(LFLAGS) -DMARKER -o bench2 bench.c
-
-
-clean:
- rm -f *.o bench1 bench2
--- /dev/null
+AM_CPPFLAGS = -I$(top_srcdir)/include -I$(top_builddir)/include -Wsystem-headers
+
+noinst_PROGRAMS = bench1 bench2
+bench1_SOURCES = bench.c tp.c ust_tests_benchmark.h
+bench1_LDADD = $(top_builddir)/liblttng-ust/liblttng-ust.la
+bench2_SOURCES = bench.c tp.c ust_tests_benchmark.h
+bench2_LDADD = $(top_builddir)/liblttng-ust/liblttng-ust.la
+bench2_CFLAGS = -DTRACING
+
+dist_noinst_SCRIPTS = test_benchmark ptime
+
+extra_DIST = README
+
+if LTTNG_UST_BUILD_WITH_LIBDL
+bench1_LDADD += -ldl
+bench2_LDADD += -ldl
+endif
+if LTTNG_UST_BUILD_WITH_LIBC_DL
+bench1_LDADD += -lc
+bench2_LDADD += -lc
+endif
-UST Benchmark
+To run the benchmark:
-INSTALLATION INSTRUCTIONS:
+ ./test_benchmark
- - run ./install to install and compile UST/benchmark
- - run ./run to perform the benchmark
+You can specify the number of iterations, events and threads by setting
+environment variables ITERS, NR_EVENTS, NR_CPUS respectively:
-
-The trace files will be automatically saved at $HOME/.usttraces
+ ITERS=10 NR_EVENTS=10000 NR_CPUS=4 ./test_benchmark
+++ /dev/null
-#!/usr/bin/python
-
-import sys
-
-def main():
- total = 0.0
- n = 0.0
- while 1:
- line = sys.stdin.readline()
- if not line:
- break
-
- val = float(line)
- n = n+1.0
- total = total+val
-
- print total/n
-main()
#include <stdlib.h>
#include <unistd.h>
#include <sched.h>
-#include <ust/marker.h>
#include <time.h>
+#ifdef TRACING
+#define TRACEPOINT_DEFINE
+#include "ust_tests_benchmark.h"
+#endif
+
static int nr_cpus;
static unsigned long nr_events;
fclose(file);
time(NULL);
-#ifdef MARKER
- ust_marker(event, "event %d", v);
+#ifdef TRACING
+ tracepoint(ust_tests_benchmark, tpbench, v);
#endif
}
+++ /dev/null
-#!/bin/bash
-#
-# install ust
-
-# compile and install UST
-cd ../../
-make clean
-CFLAGS=-O3 ./configure
-make
-sudo make install
-sudo ldconfig
-
-# compile benchmark
-cd tests/benchmark
-make clean && make
-#!/usr/bin/python
+#!/usr/bin/env python
import sys
import time
def main():
args = sys.argv[1:]
if len(args) < 1:
- print "usage: %s COMMAND" % sys.argv[0]
+ print("usage: %s COMMAND" % sys.argv[0])
sys.exit(1)
- cmd = args[0]
+ cmd = ' '.join(args)
t1 = time.time()
os.system(cmd)
- 1
t2 = time.time()
- print (t2-t1)
+ print(t2-t1)
-main()
+if __name__ == "__main__":
+ main()
+++ /dev/null
-#!/bin/bash
-#
-# run ust benchmark
-#
-
-iters=20
-NR_EVENTS=7000000
-NR_CPUS=1
-
-TIME="./ptime"
-
-#PROG_NOMARKER="find /usr"
-PROG_NOMARKER="./bench1 $NR_CPUS $NR_EVENTS"
-#PROG_MARKER=$PROG_NOMARKER
-PROG_MARKER="./bench2 $NR_CPUS $NR_EVENTS"
-
-CMD_NOMARKER="$TIME '$PROG_NOMARKER >/dev/null 2>&1'"
-CMD_MARKER="$TIME '$PROG_MARKER >/dev/null 2>&1'"
-CMD_MARKERCONN="UST_AUTOPROBE=1 $TIME '$PROG_MARKER >/dev/null 2>&1'"
-CMD_MARKERTRACE="UST_TRACE=1 UST_AUTOPROBE=1 UST_OVERWRITE=1 UST_AUTOCOLLECT=0 $TIME '$PROG_MARKER >/dev/null 2>&1'"
-
-echo "ust benchmark"
-
-rm -f result-*.txt average-*.txt perevent-*.txt
-
-echo "using $NR_CPUS processor(s)"
-echo "using $NR_EVENTS events per cpu"
-
-n=0
-while [ $n -lt "$iters" ]; do
- # without markers
- echo ">running without markers"
- echo 3 >/proc/sys/vm/drop_caches
- t1=$(sh -c "$CMD_NOMARKER")
- echo "$t1" >>result-nomark.txt
- echo " time=$t1 sec"
- n=$(($n+1))
-done
-./average <result-nomark.txt >average-nomark.txt
-
-## with markers, not connected
-#n=0
-#while [ $n -lt "$iters" ]; do
-# echo ">running with markers, not connected"
-# echo 3 >/proc/sys/vm/drop_caches
-# t2=$(sh -c "$CMD_MARKER")
-# echo "$t2" >>result-mark.txt
-# echo " time=$t2 sec"
-# n=$(($n+1))
-#done
-#./average <result-mark.txt >average-mark.txt
-#echo "( $(<average-mark.txt) - $(<average-nomark.txt) ) / $NR_EVENTS" | bc -l >perevent-mark.txt
-
-## with markers, connected
-#n=0
-#while [ $n -lt "$iters" ]; do
-# echo ">running with markers activated"
-# echo 3 >/proc/sys/vm/drop_caches
-# t2=$(sh -c "$CMD_MARKERCONN")
-# echo "$t2" >>result-markconn.txt
-# echo " time=$t2 sec"
-# n=$(($n+1))
-#done
-#./average <result-markconn.txt >average-markconn.txt
-#echo "( $(<average-markconn.txt) - $(<average-nomark.txt) ) / $NR_EVENTS" | bc -l >perevent-markconn.txt
-
-# with markers, connected, tracing
-n=0
-while [ $n -lt "$iters" ]; do
- echo ">running with markers activated, trace recording"
- echo 3 >/proc/sys/vm/drop_caches
- t3=$(sh -c "$CMD_MARKERTRACE")
- echo "$t3" >>result-marktrace.txt
- echo " time=$t3 sec"
- n=$(($n+1))
-done
-./average <result-marktrace.txt >average-marktrace.txt
-echo "( $(<average-marktrace.txt) - $(<average-nomark.txt) ) / $NR_EVENTS" | bc -l >perevent-marktrace.txt
-
-function print_penalty()
-{
- echo ""
-
- #penalty = t2 - t1
- penalty=$(echo "$2 - $1;" | bc)
- echo "Penalty ($3) = $penalty sec"
-
- #event = penalty / (nr_events * nr_cpus)
- event=$(echo "scale=10; ($penalty / ($NR_EVENTS * $NR_CPUS));" | bc)
- echo "Penalty per event ($3) = $event sec"
-}
-
-#print_penalty $t1 $t2 "Penalty for markers enabled, not tracing"
-#print_penalty $t1 $t3 "Penalty for markers enabled, tracing"
-
-
-rm -f /tmp/bench.txt
--- /dev/null
+#!/bin/bash
+
+CURDIR=$(dirname $0)/
+TESTDIR=$CURDIR/..
+source $TESTDIR/utils/tap.sh
+
+plan_tests 1
+
+: ${ITERS:=20}
+: ${NR_EVENTS:=7000000}
+: ${NR_CPUS:=1}
+
+: ${TIME:="./$CURDIR/ptime"}
+
+: ${PROG_NOTRACING:="./$CURDIR/bench1 $NR_CPUS $NR_EVENTS"}
+: ${PROG_TRACING:="./$CURDIR/bench2 $NR_CPUS $NR_EVENTS"}
+
+CMD_NOTRACING="$TIME '$PROG_NOTRACING >/dev/null 2>&1'"
+CMD_TRACING="$TIME '$PROG_TRACING >/dev/null 2>&1'"
+
+time_notrace=0
+for i in $(seq $ITERS); do
+ echo 3 >/proc/sys/vm/drop_caches
+ time_notrace="$time_notrace+$(sh -c "$CMD_NOTRACING")"
+done
+
+lttng-sessiond -d --no-kernel
+lttng -q create
+lttng -q enable-event -u -a
+lttng -q start
+
+time_trace=0
+for i in $(seq $ITERS); do
+ echo 3 >/proc/sys/vm/drop_caches
+ time_trace="$time_trace+$(sh -c "$CMD_TRACING")"
+done
+
+lttng -q stop
+lttng -q destroy
+
+pass "Trace benchmark"
+diag "Average tracing overhead per event is $(echo "scale=6;( ($time_trace) - ($time_notrace) ) / $ITERS / $NR_EVENTS" | bc -l)s"
--- /dev/null
+#define TRACEPOINT_CREATE_PROBES
+#include "ust_tests_benchmark.h"
--- /dev/null
+#undef TRACEPOINT_PROVIDER
+#define TRACEPOINT_PROVIDER ust_tests_benchmark
+
+#if !defined(_TRACEPOINT_UST_TESTS_BENCHMARK_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
+#define _TRACEPOINT_UST_TESTS_BENCHMARK_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <lttng/tracepoint.h>
+
+TRACEPOINT_EVENT(ust_tests_benchmark, tpbench,
+ TP_ARGS(int, value),
+ TP_FIELDS(
+ ctf_integer(int, event, value)
+ )
+)
+
+#endif /* _TRACEPOINT_UST_TESTS_BENCHMARK_H */
+
+#undef TRACEPOINT_INCLUDE
+#define TRACEPOINT_INCLUDE "./ust_tests_benchmark.h"
+
+/* This part must be outside ifdef protection */
+#include <lttng/tracepoint-event.h>
+
+#ifdef __cplusplus
+}
+#endif