--- /dev/null
+- job:
+ name: babeltrace-benchmark
+ project-type: freestyle
+ defaults: global
+ description: |
+ This job is responsible for triggering jobs in lava and generate the report
+
+ The Babeltrace project provides trace read and write libraries, as well
+ as a trace converter. Plugins can be created for any trace format to
+ allow its conversion to/from another trace format.
+
+ The main format expected to be converted to/from is the Common Trace
+ Format (CTF). The default input format of the "babeltrace" command is
+ CTF, and its default output format is a human-readable text log. The
+ "babeltrace-log" command converts from a text log to a CTF trace.
+
+ <p>Job is managed by Jenkins Job Builder.</p>
+
+ wrappers:
+ - ansicolor
+ - timestamps
+ - workspace-cleanup
+ - credentials-binding:
+ - text:
+ credential-id: jenkins_lava2_key
+ variable: LAVA2_JENKINS_TOKEN
+
+ triggers:
+ - timed: '@daily'
+ builders:
+ - shell:
+ !include-raw: scripts/babeltrace-benchmark/benchmark.sh
+
+ publishers:
+ - archive:
+ artifacts: 'results/**'
+ allow-empty: false
+ scm:
+ - babeltrace
+ - lttng-ci
+
+- scm:
+ name: babeltrace
+ scm:
+ - git:
+ url: git://github.com/efficios/babeltrace.git
+ browser: githubweb
+ browser-url: https://github.com/efficios/babeltrace
+ basedir: src/babeltrace
+ skip-tag: true
+ git-tool: jgit
+- scm:
+ name: lttng-ci
+ scm:
+ - git:
+ url: git://github.com/lttng/lttng-ci.git
+ browser: githubweb
+ browser-url: https://github.com/lttng/lttng-ci
+ basedir: src/lttng-ci
+ skip-tag: true
+ git-tool: jgit
+
--- /dev/null
+metadata:
+ format: Lava-Test Test Definition 1.0
+ name: babeltrace 2.0 benchmark
+ description: "Run benchmark for babeltrace"
+params:
+ TRACE_LOCATION: "https://obj.internal.efficios.com/lava/traces/benchmark/babeltrace/babeltrace_benchmark_trace.tar.gz"
+ COMMIT: "invalid"
+ GIT_URL: "https://github.com/efficios/babeltrace.git"
+run:
+ steps:
+ - apt install -y time
+ - git clone ${GIT_URL} babeltrace
+ - pushd babeltrace
+ - git checkout ${COMMIT}
+ - ./bootstrap
+ - ./configure --disable-man-pages
+ - make -j
+ - make install
+ - ldconfig
+ - if [ -a /usr/local/bin/babeltrace ] ; then echo "Running bt1"; else ln -s /usr/local/bin/babeltrace2 /usr/local/bin/babeltrace; fi
+ - popd
+ - apt install -y curl python3
+ - git clone https://github.com/lttng/lttng-ci ci
+ - export TMPDIR="/tmp"
+ - mkdir -p coredump
+ - echo "$(pwd)/coredump/core.%e.%p.%h.%t" > /proc/sys/kernel/core_pattern
+ - ulimit -c unlimited
+ - mkdir /tmp/ram_disk
+ - mount -t tmpfs -o size=10024m new_ram_disk /tmp/ram_disk
+ - curl -o /tmp/trace.tar.gz "${TRACE_LOCATION}"
+ - mkdir /tmp/ram_disk/trace
+ - tar xvf /tmp/trace.tar.gz --directory /tmp/ram_disk/trace/
+ - python3 ./ci/scripts/babeltrace-benchmark/time.py --output=result_dummy_sink --command "babeltrace /tmp/ram_disk/trace/ -o dummy" --iteration 5
+ - python3 ./ci/scripts/babeltrace-benchmark/time.py --output=result_text_sink --command "babeltrace /tmp/ram_disk/trace/" --iteration 5
+ - ./ci/lava/upload_artifact.sh result_dummy_sink results/benchmarks/babeltrace/dummy/${COMMIT}
+ - ./ci/lava/upload_artifact.sh result_text_sink results/benchmarks/babeltrace/text/${COMMIT}
+
--- /dev/null
+#!/usr/bin/python3
+# Copyright (C) 2019 - Jonathan Rajotte <jonathan.rajotte-julien@efficios.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import json
+import os
+import tempfile
+from statistics import mean
+import argparse
+import sys
+from operator import add
+
+import matplotlib.pyplot as plt
+from matplotlib.backends.backend_pdf import PdfPages
+from matplotlib.ticker import PercentFormatter
+
+import git
+import numpy
+import lava_submit
+
+from minio import Minio
+from minio.error import NoSuchKey
+from minio.error import ResponseError
+
+
+BENCHMARK_TYPES = ["dummy", "text"]
+DEFAULT_BUCKET = "lava"
+
+
+def graph_get_color(branch):
+ """
+ Get the color matching the branch.
+ """
+ color = {"stable-1.5": "red", "stable-2.0": "green", "master": "blue"}
+ return color[branch]
+
+
+def graph_get_title(branch, benchmark_type):
+ """
+ Get title for graph based on benchmark type.
+ """
+ string = {"dummy": "Dummy output", "text": "Text output"}
+ return "{} - {}".format(branch, string[benchmark_type])
+
+
+def get_client():
+ """
+ Return minio client configured.
+ """
+ return Minio(
+ "obj.internal.efficios.com", access_key="jenkins", secret_key="echo123456"
+ )
+
+
+def get_file(client, prefix, file_name, workdir_name):
+ """
+ Return the path of the downloaded file.
+ Return None on error
+ """
+ destination = os.path.join(workdir_name, file_name)
+ object_name = "{}/{}".format(prefix, file_name)
+ try:
+ client.fget_object(DEFAULT_BUCKET, object_name, destination)
+ except NoSuchKey:
+ return None
+
+ return destination
+
+
+def delete_file(client, prefix, file_name):
+ """
+ Delete the file on remote.
+ """
+ object_name = "{}/{}".format(prefix, file_name)
+ try:
+ client.remove_object(DEFAULT_BUCKET, object_name)
+ except ResponseError as err:
+ print(err)
+ except NoSuchKey:
+ pass
+
+
+def get_git_log(bt_version, cutoff, repo_path):
+ """
+ Return an ordered (older to newer) list of commits for the bt_version and
+ cutoff. WARNING: This changes the git repo HEAD.
+ """
+ repo = git.Repo(repo_path)
+ repo.git.fetch()
+ return repo.git.log(
+ "{}..origin/{}".format(cutoff, bt_version), "--pretty=format:%H", "--reverse"
+ ).split("\n")
+
+
+def parse_result(result_path):
+ """
+ Parse the result file. Return a dataset of User time + System time.
+ """
+ with open(result_path) as result:
+ parsed_result = json.load(result)
+ return list(
+ map(
+ add,
+ parsed_result["User time (seconds)"],
+ parsed_result["System time (seconds)"],
+ )
+ )
+
+
+def get_benchmark_results(client, commit, workdir):
+ """
+ Fetch the benchmark result from a certain commit across all benchmark type.
+ """
+ results = {}
+ benchmark_valid = True
+ for b_type in BENCHMARK_TYPES:
+ prefix = "/results/benchmarks/babeltrace/{}/".format(b_type)
+ result_file = get_file(client, prefix, commit, workdir)
+ if not result_file:
+ """
+ Benchmark is either corrupted or not complete.
+ """
+ return None, benchmark_valid
+ results[b_type] = parse_result(result_file)
+ if all(i == 0.0 for i in results[b_type]):
+ benchmark_valid = False
+ print("Invalid benchmark for {}/{}/{}".format(prefix, b_type, commit))
+ # The dataset is valid return immediately.
+ return results, benchmark_valid
+
+
+def plot_raw_value(branch, benchmark_type, x_data, y_data, labels, latest_values):
+ """
+ Plot the graph using the raw value.
+ """
+ point_x_data = []
+ outlier_x_data = []
+ point_y_data = []
+ outlier_y_data = []
+ for pos in range(len(x_data)):
+ x = x_data[pos]
+ valid_points, outliers = sanitize_dataset(y_data[pos])
+ for y in valid_points:
+ point_x_data.append(x)
+ point_y_data.append(y)
+ for y in outliers:
+ outlier_x_data.append(x)
+ outlier_y_data.append(y)
+
+ plt.plot(
+ point_x_data, point_y_data, "o", label=branch, color=graph_get_color(branch)
+ )
+ plt.plot(outlier_x_data, outlier_y_data, "+", label="outlier", color="black")
+
+ ymin = 0
+ ymax = 1
+ if y_data:
+ ymin = 0.8 * min([item for sublist in y_data for item in sublist])
+ ymax = 1.2 * max([item for sublist in y_data for item in sublist])
+ # Put latest of other branches for reference as horizontal line.
+ for l_branch, l_result in latest_values.items():
+ if not l_result or l_branch == branch:
+ continue
+ plt.axhline(
+ y=l_result,
+ label="Latest {}".format(l_branch),
+ color=graph_get_color(l_branch),
+ )
+ if l_result <= ymin:
+ ymin = 0.8 * l_result
+ if l_result >= ymax:
+ ymax = 1.2 * l_result
+
+ plt.ylim(ymin=ymin, ymax=ymax)
+ plt.xticks(x_data, labels, rotation=90, family="monospace")
+ plt.title(graph_get_title(branch, benchmark_type), fontweight="bold")
+ plt.ylabel("User + system time (s)")
+ plt.xlabel("Latest commits")
+ plt.legend()
+
+ plt.tight_layout()
+ return
+
+
+def plot_ratio(branch, benchmark_type, x_data, y_data, labels, latest_values):
+ """
+ Plot the graph using a ratio using first point as reference (0%).
+ """
+ reference = 0.01
+ y_abs_max = 100
+
+ if y_data:
+ reference = y_data[0]
+
+ # Transform y_data to a list of ratio for which the reference is the first
+ # element.
+ local_y_data = list(map(lambda y: ((y / reference) - 1.0) * 100, y_data))
+
+ plt.plot(x_data, local_y_data, "o", label=branch, color=graph_get_color(branch))
+
+ # Put latest of other branches for reference as horizontal line.
+ for l_branch, l_result in latest_values.items():
+ if not l_result or l_branch == branch:
+ continue
+ ratio_l_result = ((l_result / reference) - 1.0) * 100.0
+ print(
+ "branch {} branch {} value {} l_result {} reference {}".format(
+ branch, l_branch, ratio_l_result, l_result, reference
+ )
+ )
+ plt.axhline(
+ y=ratio_l_result,
+ label="Latest {}".format(l_branch),
+ color=graph_get_color(l_branch),
+ )
+
+ # Draw the reference line.
+ plt.axhline(y=0, label="Reference (leftmost point)", linestyle="-", color="Black")
+
+ # Get max absolute value to align the y axis with zero in the middle.
+ if local_y_data:
+ local_abs_max = abs(max(local_y_data, key=abs)) * 1.3
+ if y_abs_max > 100:
+ y_abs_max = local_abs_max
+
+ plt.ylim(ymin=y_abs_max * -1, ymax=y_abs_max)
+
+ ax = plt.gca()
+ percent_formatter = PercentFormatter()
+ ax.yaxis.set_major_formatter(percent_formatter)
+ ax.yaxis.set_minor_formatter(percent_formatter)
+ plt.xticks(x_data, labels, rotation=90, family="monospace")
+ plt.title(graph_get_title(branch, benchmark_type), fontweight="bold")
+ plt.ylabel("Ratio")
+ plt.xlabel("Latest commits")
+ plt.legend()
+
+ plt.tight_layout()
+ return
+
+
+def generate_graph(branches, report_name, git_path):
+
+ # The PDF document
+ pdf_pages = PdfPages(report_name)
+
+ client = get_client()
+ branch_results = dict()
+
+ # Fetch the results for each branch.
+ for branch, cutoff in branches.items():
+ commits = get_git_log(branch, cutoff, git_path)
+ results = []
+ with tempfile.TemporaryDirectory() as workdir:
+ for commit in commits:
+ b_results, valid = get_benchmark_results(client, commit, workdir)
+ if not b_results or not valid:
+ continue
+ results.append((commit, b_results))
+ branch_results[branch] = results
+
+ for b_type in BENCHMARK_TYPES:
+ latest_values = {}
+ max_len = 0
+
+ # Find the maximum size for a series inside our series dataset.
+ # This is used later to compute the size of the actual plot (pdf).
+ # While there gather the comparison value used to draw comparison line
+ # between branches.
+ for branch, results in branch_results.items():
+ max_len = max([max_len, len(results)])
+ if results:
+ latest_values[branch] = mean(
+ sanitize_dataset(results[-1][1][b_type])[0]
+ )
+ else:
+ latest_values[branch] = None
+
+ for branch, results in branch_results.items():
+ # Create a figure instance
+ if max_len and max_len > 10:
+ width = 0.16 * max_len
+ else:
+ width = 11.69
+
+ x_data = list(range(len(results)))
+ y_data = [c[1][b_type] for c in results]
+ labels = [c[0][:8] for c in results]
+
+ fig = plt.figure(figsize=(width, 8.27), dpi=100)
+ plot_raw_value(branch, b_type, x_data, y_data, labels, latest_values)
+ pdf_pages.savefig(fig)
+
+ fig = plt.figure(figsize=(width, 8.27), dpi=100)
+ # Use the mean of each sanitize dataset here, we do not care for
+ # variance for ratio. At least not yet.
+ y_data = [mean(sanitize_dataset(c[1][b_type])[0]) for c in results]
+ plot_ratio(branch, b_type, x_data, y_data, labels, latest_values)
+ pdf_pages.savefig(fig)
+
+ pdf_pages.close()
+
+
+def launch_jobs(branches, git_path, wait_for_completion, debug):
+ """
+ Lauch jobs for all missing results.
+ """
+ client = get_client()
+ for branch, cutoff in branches.items():
+ commits = get_git_log(branch, cutoff, git_path)
+
+ with tempfile.TemporaryDirectory() as workdir:
+ for commit in commits:
+ b_results = get_benchmark_results(client, commit, workdir)[0]
+ if b_results:
+ continue
+ lava_submit.submit(
+ commit, wait_for_completion=wait_for_completion, debug=debug
+ )
+
+
+def main():
+ """
+ Parse arguments and execute as needed.
+ """
+ bt_branches = {
+ "master": "31976fe2d70a8b6b7f8b31b9e0b3bc004d415575",
+ "stable-2.0": "07f585356018b4ddfbd0e09c49a14e38977c6973",
+ "stable-1.5": "49e98b837a5667130e0d1e062a6bd7985c7c4582",
+ }
+
+ parser = argparse.ArgumentParser(description="Babeltrace benchmark utility")
+ parser.add_argument(
+ "--generate-jobs", action="store_true", help="Generate and send jobs"
+ )
+ parser.add_argument(
+ "--do-not-wait-on-completion",
+ action="store_true",
+ default=False,
+ help="Wait for the completion of each jobs sent. This is useful"
+ "for the ci. Otherwise we could end up spaming the lava instance.",
+ )
+ parser.add_argument(
+ "--generate-report",
+ action="store_true",
+ help="Generate graphs and save them to pdf",
+ )
+ parser.add_argument(
+ "--report-name", default="report.pdf", help="The name of the pdf report."
+ )
+ parser.add_argument(
+ "--debug", action="store_true", default=False, help="Do not send jobs to lava."
+ )
+ parser.add_argument(
+ "--repo-path", help="The location of the git repo to use.", required=True
+ )
+
+ args = parser.parse_args()
+
+ if not os.path.exists(args.repo_path):
+ print("Repository location does not exists.")
+ return 1
+
+ if args.generate_jobs:
+ print("Launching jobs for:")
+ for branch, cutoff in bt_branches.items():
+ print("\t Branch {} with cutoff {}".format(branch, cutoff))
+ launch_jobs(
+ bt_branches, args.repo_path, not args.do_not_wait_on_completion, args.debug
+ )
+
+ if args.generate_report:
+ print("Generating pdf report ({}) for:".format(args.report_name))
+ for branch, cutoff in bt_branches.items():
+ print("\t Branch {} with cutoff {}".format(branch, cutoff))
+ generate_graph(bt_branches, args.report_name, args.repo_path)
+
+ return 0
+
+
+def sanitize_dataset(dataset):
+ """
+ Use IRQ 1.5 [1] to remove outlier from the dataset. This is useful to get a
+ representative mean without outlier in it.
+ [1] https://en.wikipedia.org/wiki/Interquartile_range#Outliers
+ """
+ sorted_data = sorted(dataset)
+ q1, q3 = numpy.percentile(sorted_data, [25, 75])
+ iqr = q3 - q1
+ lower_bound = q1 - (1.5 * iqr)
+ upper_bound = q3 + (1.5 * iqr)
+ new_dataset = []
+ outliers = []
+ for i in dataset:
+ if lower_bound <= i <= upper_bound:
+ new_dataset.append(i)
+ else:
+ outliers.append(i)
+ return new_dataset, outliers
+
+
+if __name__ == "__main__":
+ sys.exit(main())
--- /dev/null
+#!/bin/bash -exu
+#
+# Copyright (C) 2019 - Jonathan Rajotte-Julien <jonathan.rajotte-julien@efficios.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+SRC_DIR="$WORKSPACE/src/babeltrace"
+SCRIPT_DIR="$WORKSPACE/src/lttng-ci"
+RESULTS_DIR="$WORKSPACE/results"
+
+REQUIREMENT_PATH="${SCRIPT_DIR}/scripts/babeltrace-benchmark/requirement.txt"
+SCRIPT_PATH="${SCRIPT_DIR}/scripts/babeltrace-benchmark/benchmark.py"
+VENV="$(mktemp -d)"
+TMPDIR="${VENV}/tmp"
+
+mkdir -p "$TMPDIR"
+export TMPDIR
+
+function setup_env ()
+{
+ mkdir -p "$RESULTS_DIR"
+ virtualenv --python python3 "$VENV"
+ set +u
+ # shellcheck disable=SC1090
+ . "${VENV}/bin/activate"
+ set -u
+ pip install -r "$REQUIREMENT_PATH"
+}
+
+function run_jobs ()
+{
+ python "$SCRIPT_PATH" --generate-jobs --repo-path "$SRC_DIR"
+}
+
+function generate_report ()
+{
+ python "$SCRIPT_PATH" --generate-report --repo-path "$SRC_DIR" --report-name "${RESULTS_DIR}/babeltrace-benchmark.pdf"
+}
+
+setup_env
+run_jobs
+generate_report
+
+rm -rf "$VENV"
--- /dev/null
+#!/usr/bin/python3
+# Copyright (C) 2019 - Jonathan Rajotte Julien <jonathan.rajotte-julien@efficios.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import argparse
+import os
+import sys
+import time
+import xmlrpc.client
+
+from jinja2 import Environment, FileSystemLoader
+
+USERNAME = "lava-jenkins"
+HOSTNAME = "lava-master-02.internal.efficios.com"
+DEFAULT_KERNEL_COMMIT = "1a1a512b983108015ced1e7a7c7775cfeec42d8c"
+
+
+def wait_on(server, jobid):
+ """
+ Wait for the completion of the job.
+ Do not care for result. This is mostly to prevent flooding of lava with
+ multiple jobs for the same commit hash. Jenkins is responsible for
+ running only one job for job submissions.
+ """
+ # Check the status of the job every 30 seconds
+ jobstatus = server.scheduler.job_state(jobid)["job_state"]
+ running = False
+ while jobstatus in ["Submitted", "Scheduling", "Scheduled", "Running"]:
+ if not running and jobstatus == "Running":
+ print("Job started running", flush=True)
+ running = True
+ time.sleep(30)
+ try:
+ jobstatus = server.scheduler.job_state(jobid)["job_state"]
+ except xmlrpc.client.ProtocolError:
+ print("Protocol error, retrying", flush=True)
+ continue
+ print("Job ended with {} status.".format(jobstatus), flush=True)
+
+
+def submit(
+ commit, debug=False, kernel_commit=DEFAULT_KERNEL_COMMIT, wait_for_completion=True
+):
+ nfsrootfs = "https://obj.internal.efficios.com/lava/rootfs/rootfs_amd64_xenial_2018-12-05.tar.gz"
+ kernel_url = "https://obj.internal.efficios.com/lava/kernel/{}.baremetal.bzImage".format(
+ kernel_commit
+ )
+ modules_url = "https://obj.internal.efficios.com/lava/modules/linux/{}.baremetal.linux.modules.tar.gz".format(
+ kernel_commit
+ )
+
+ lava_api_key = None
+ if not debug:
+ try:
+ lava_api_key = os.environ["LAVA2_JENKINS_TOKEN"]
+ except Exception as error:
+ print(
+ "LAVA2_JENKINS_TOKEN not found in the environment variable. Exiting...",
+ error,
+ )
+ return -1
+
+ jinja_loader = FileSystemLoader(os.path.dirname(os.path.realpath(__file__)))
+ jinja_env = Environment(loader=jinja_loader, trim_blocks=True, lstrip_blocks=True)
+ jinja_template = jinja_env.get_template("template_lava_job_bt_benchmark.jinja2")
+
+ context = dict()
+ context["kernel_url"] = kernel_url
+ context["nfsrootfs_url"] = nfsrootfs
+ context["modules_url"] = modules_url
+ context["commit_hash"] = commit
+
+ render = jinja_template.render(context)
+
+ print("Job to be submitted:", flush=True)
+
+ print(render, flush=True)
+
+ if debug:
+ return 0
+
+ server = xmlrpc.client.ServerProxy(
+ "http://%s:%s@%s/RPC2" % (USERNAME, lava_api_key, HOSTNAME)
+ )
+
+ for attempt in range(10):
+ try:
+ jobid = server.scheduler.submit_job(render)
+ except xmlrpc.client.ProtocolError as error:
+ print(
+ "Protocol error on submit, sleeping and retrying. Attempt #{}".format(
+ attempt
+ ),
+ flush=True,
+ )
+ time.sleep(5)
+ continue
+ else:
+ break
+
+ print("Lava jobid:{}".format(jobid), flush=True)
+ print(
+ "Lava job URL: http://lava-master-02.internal.efficios.com/scheduler/job/{}".format(
+ jobid
+ ),
+ flush=True,
+ )
+
+ if not wait_for_completion:
+ return 0
+
+ wait_on(server, jobid)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description="Launch baremetal babeltrace test using Lava"
+ )
+ parser.add_argument("-c", "--commit", required=True)
+ parser.add_argument(
+ "-k", "--kernel-commit", required=False, default=DEFAULT_KERNEL_COMMIT
+ )
+ parser.add_argument("-d", "--debug", required=False, action="store_true")
+ args = parser.parse_args()
+ sys.exit(submit(args.kernel_commit, args.commit, args.debug))
--- /dev/null
+backcall==0.1.0
+certifi==2019.3.9
+cycler==0.10.0
+decorator==4.4.0
+gitdb2==2.0.5
+GitPython==2.1.11
+ipython==7.5.0
+ipython-genutils==0.2.0
+jedi==0.13.3
+Jinja2==2.10.1
+kiwisolver==1.1.0
+MarkupSafe==1.1.1
+matplotlib==3.1.0
+minio==4.0.17
+mpld3==0.3
+numpy==1.16.4
+parso==0.4.0
+pexpect==4.7.0
+pickleshare==0.7.5
+pkg-resources==0.0.0
+prompt-toolkit==2.0.9
+ptyprocess==0.6.0
+Pygments==2.4.2
+pyparsing==2.4.0
+python-dateutil==2.8.0
+python-magic==0.4.15
+pytz==2019.1
+s3cmd==2.0.2
+six==1.12.0
+smmap2==2.0.5
+traitlets==4.3.2
+urllib3==1.25.3
+wcwidth==0.1.7
--- /dev/null
+device_type: x86
+job_name: babeltrace_benchmark
+timeouts:
+ job:
+ hours: 3
+ action:
+ hours: 3
+ connection:
+ minutes: 4
+ connections:
+ lava-test-shell:
+ minutes: 4
+priority: medium
+visibility: public
+context:
+ extra_kernel_args: cpuidle.off=1
+ extra_nfsroot_args: ",nfsvers=3 nfsrootdebug"
+
+tags:
+ - dev-sda1
+
+
+actions:
+ - deploy:
+ timeout:
+ minutes: 10
+ to: tftp
+ kernel:
+ url: {{ kernel_url }}
+ type: zimage
+ modules:
+ url: {{ modules_url }}
+ compression: gz
+ nfsrootfs:
+ url: {{ nfsrootfs_url }}
+ compression: gz
+
+ - boot:
+ timeout:
+ minutes: 10
+ method: ipxe
+ commands: nfs
+ auto_login:
+ login_prompt: 'login:'
+ username: root
+ password_prompt: 'Password:'
+ password: root
+ prompts:
+ - 'root@linaro-server:~#'
+
+ - test:
+ definitions:
+ # Base setup of environment #
+ - repository:
+ metadata:
+ format: Lava-Test Test Definition 1.0
+ name: x86-env-setup
+ description: "Basic environment setup for x86 board"
+ os:
+ - ubuntu
+ devices:
+ - x86
+ run:
+ steps:
+ - chmod 755 /
+ - systemctl start systemd-timesyncd
+ - echo nameserver 172.18.0.12 > /etc/resolv.conf
+ - ip a
+ - mount /dev/sda /tmp
+ - rm -rf /tmp/*
+ - locale-gen en_US.UTF-8
+ - apt-get update
+ - apt-get upgrade
+ - hash -r
+ - sync
+ from: inline
+ name: x86-env-setup-inline
+ path: inline/x86-env-setup.yaml
+ - repository: https://github.com/lttng/lttng-ci.git
+ from: git
+ path: lava/benchmark/babeltrace/benchmark.yml
+ name: babeltrace-benchmark
+ params:
+ COMMIT: {{ commit_hash }}
--- /dev/null
+#!/usr/bin/python3
+# Copyright (C) 2019 - Jonathan Rajotte Julien <jonathan.rajotte-julien@efficios.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import argparse
+import os
+import subprocess
+import tempfile
+import json
+from collections import defaultdict
+
+_METRIC = {
+ "User time (seconds)": float,
+ "System time (seconds)": float,
+ "Percent of CPU this job got": percent_parser,
+ "Elapsed (wall clock) time (h:mm:ss or m:ss)": wall_clock_parser,
+ "Average shared text size (kbytes)": int,
+ "Average unshared data size (kbytes)": int,
+ "Average stack size (kbytes)": int,
+ "Average total size (kbytes)": int,
+ "Maximum resident set size (kbytes)": int,
+ "Average resident set size (kbytes)": int,
+ "Major (requiring I/O) page faults": int,
+ "Minor (reclaiming a frame) page faults": int,
+ "Voluntary context switches": int,
+ "Involuntary context switches": int,
+ "Swaps": int,
+ "File system inputs": int,
+ "File system outputs": int,
+ "Socket messages sent": int,
+ "Socket messages received": int,
+ "Signals delivered": int,
+ "Page size (bytes)": int,
+}
+
+
+def wall_clock_parser(value):
+ """
+ Parse /usr/bin/time wall clock value.
+ Wall clock value is expressed in different formats depending on the actual
+ elapsed time.
+ """
+ total = 0.0
+ pos = value.find(".")
+ if value.find("."):
+ total += float(value[pos:])
+ value = value[:pos]
+
+ v_split = value.split(":")
+ if len(v_split) == 2:
+ total += float(v_split[0]) * 60.0
+ total += float(v_split[1]) * 1.0
+ elif len(v_split) == 3:
+ total += float(v_split[0]) * 360.0
+ total += float(v_split[1]) * 60.0
+ total += float(v_split[2]) * 1.0
+ else:
+ return 0.0
+
+ return total
+
+
+def percent_parser(value):
+ """
+ Parse /usr/bin/time percent value.
+ """
+ parsed = value.replace("%", "").replace("?", "")
+ if parsed:
+ return float(parsed)
+ return 0
+
+
+def parse(path, results):
+ """
+ Parser and accumulator for /usr/bin/time results.
+ """
+ with open(path, "r") as data:
+ for line in data:
+ if line.rfind(":") == -1:
+ continue
+ key, value = line.lstrip().rsplit(": ")
+ if key in _METRIC:
+ results[key].append(_METRIC[key](value))
+
+ return results
+
+
+def save(path, results):
+ """
+ Save the result in json format to path.
+ """
+ with open(path, "w") as out:
+ json.dump(results, out, sort_keys=True, indent=4)
+
+
+def run(command, iteration, output, stdout, stderr):
+ """
+ Run the command throught /usr/bin/time n iterations and parse each result.
+ """
+ results = defaultdict(list)
+ for i in range(iteration):
+ time_stdout = tempfile.NamedTemporaryFile(delete=False)
+ # We must delete this file later on.
+ time_stdout.close()
+ with open(stdout, "a+") as out, open(stderr, "a+") as err:
+ cmd = "/usr/bin/time -v --output='{}' {}".format(time_stdout.name, command)
+ ret = subprocess.run(cmd, shell=True, stdout=out, stderr=err)
+ if ret.returncode != 0:
+ print("Iteration: {}, Command failed: {}".format(str(i), cmd))
+ results = parse(time_stdout.name, results)
+ os.remove(time_stdout.name)
+ save(output, results)
+
+
+def main():
+ """
+ Run /usr/bin/time N time and collect the result.
+ The resulting json have the following form:
+ {
+ "/usr/bin/time": {
+ "User time (seconds)": [],
+ "System time (seconds)": [],
+ "Percent of CPU this job got": [],
+ "Elapsed (wall clock) time (h:mm:ss or m:ss)": [],
+ "Average shared text size (kbytes)": [],
+ "Average unshared data size (kbytes)": [],
+ "Average stack size (kbytes)": [],
+ "Average total size (kbytes)": [],
+ "Maximum resident set size (kbytes)": [],
+ "Average resident set size (kbytes)": [],
+ "Major (requiring I/O) page faults": [],
+ "Minor (reclaiming a frame) page faults": [],
+ "Voluntary context switches": [],
+ "Involuntary context switches": [],
+ "Swaps": [],
+ "File system inputs": [],
+ "File system outputs": [],
+ "Socket messages sent": [],
+ "Socket messages received": [],
+ "Signals delivered": [],
+ "Page size (bytes)": [],
+ }
+ }
+ """
+ parser = argparse.ArgumentParser(
+ description="Run command N time using /usr/bin/time and collect the statistics"
+ )
+ parser.add_argument("--output", help="Where to same the result", required=True)
+ parser.add_argument("--command", help="The command to benchmark", required=True)
+ parser.add_argument(
+ "--iteration",
+ type=int,
+ default=5,
+ help="The number of iteration to run the command (default: 5)",
+ required=True,
+ )
+ parser.add_argument(
+ "--stdout",
+ default="/dev/null",
+ help="Where to append the stdout of each command (default: /dev/null)",
+ )
+ parser.add_argument(
+ "--stderr",
+ default=os.path.join(os.getcwd(), "stderr.out"),
+ help="Where to append the stderr of each command (default: $CWD/stderr.out)",
+ )
+
+ args = parser.parse_args()
+ run(args.command, args.iteration, args.output, args.stdout, args.stderr)
+
+
+if __name__ == "__main__":
+ main()