send-to:
- recipients
-## Templates
-- job-template:
- name: baremetal_benchmarks_k{kversion}_l{lttngversion}
- description: |
- Runs baremetal kernel benchmarks over different combination of kernel and lttng configurations.
- project-type: freestyle
- node: 'amd64'
-
- <<: *system_tests_parameters_defaults
-
- properties:
- - build-discarder:
- num-to-keep: 10
- - throttle:
- max-total: 2
- option: 'category'
- categories:
- - 'baremetal-tests'
-
- wrappers:
- - workspace-cleanup
- - timestamps
- - ansicolor
- - credentials-binding:
- - text:
- credential-id: jenkins_lava_key
- variable: LAVA_JENKINS_TOKEN
- - text:
- credential-id: jenkins_lava2_key
- variable: LAVA2_JENKINS_TOKEN
- - file:
- credential-id: system_tests_storage_key
- variable: identity_file
- - inject:
- properties-content: |
- BUILD_DEVICE=baremetal
- LTTNG_VERSION={lttngversion}
- scm:
- - git:
- url: https://github.com/lttng/lttng-ci
- basedir: src/lttng-ci/
-
- builders:
- - shell: !include-raw-escape: scripts/system-tests/generate-properties-master.sh
- - shell: !include-raw-escape: scripts/system-tests/inject-ssh-commands.sh
- - trigger-builds:
- - project: "build_kernel_PARAM"
- property-file: 'properties.txt'
- block: true
- - inject:
- properties-file: properties.txt
- - shell: !include-raw-escape: scripts/system-tests/run-baremetal-benchmarks.sh
- - shell: !include-raw-escape: scripts/system-tests/summarize-results.sh
-
- publishers:
- - archive:
- artifacts: '*.png,*.csv'
- stable: true
- do-not-fingerprint: true
- - image-gallery:
- - gallery-type: archived-images-gallery
- title: Results
- includes: '*.png'
- - workspace-cleanup
-
- job-template:
name: vm_tests_k{kversion}_l{lttngversion}
description: |
- stable-2.12
jobs:
- 'vm_tests_k{kversion}_l{lttngversion}'
- - 'baremetal_benchmarks_k{kversion}_l{lttngversion}'
- 'baremetal_tests_k{kversion}_l{lttngversion}'
# Test against particular linux version.
- stable-2.12
jobs:
- 'vm_tests_k{kversion}_l{lttngversion}'
- - 'baremetal_benchmarks_k{kversion}_l{lttngversion}'
- 'baremetal_tests_k{kversion}_l{lttngversion}'
- project:
name: system-tests-vm-only
test_type:
- vm_tests
- baremetal_tests
- - baremetal_benchmarks
jobs:
- 'build_kernel_PARAM'
- 'system_ALL_{test_type}_trigger'
+++ /dev/null
-metadata:
- format: Lava-Test Test Definition 1.0
- name: benchmark-syscall-failing-close
- description: "Perform syscall tracing benchmark of failing close"
-params:
- JENKINS_BUILD_ID: "invalid_jenkins_build_id"
-run:
- steps:
- - apt install -y python3-pandas python3-numpy curl
- - git clone https://github.com/lttng/lttng-ci ci
- - export TMPDIR="/tmp"
- - mkdir -p coredump
- - echo "$(pwd)/coredump/core.%e.%p.%h.%t" > /proc/sys/kernel/core_pattern
- - ulimit -c unlimited
- - source /root/lttngvenv/activate
- - export BENCHMARK_DIR=$(mktemp --directory)/bm
- - git clone https://github.com/frdeso/syscall-bench-it.git $BENCHMARK_DIR
- - pushd $BENCHMARK_DIR
- - lava-test-case build-benchmarks --shell "make"
- - lava-test-case run-benchmarks --shell "./run.sh failing-close sys_close"
- - popd
- - python3 ./ci/scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv
- - mv ./processed_results.csv ./processed_results_close.csv
- - tar czf coredump.tar.gz coredump
- - ./ci/lava/upload_artifact.sh coredump.tar.gz "results/${JENKINS_BUILD_ID}/${TESTRUN_ID}-coredump.tar.gz"
- - ./ci/lava/upload_artifact.sh "${BENCHMARK_DIR}/results.csv" "results/${JENKINS_BUILD_ID}/results_close.csv"
- - ./ci/lava/upload_artifact.sh processed_results_close.csv "results/${JENKINS_BUILD_ID}/processed_results_close.csv"
+++ /dev/null
-metadata:
- format: Lava-Test Test Definition 1.0
- name: benchmark-syscall-failing-ioctl
- description: "Perform syscall tracing benchmark of failing ioctl"
-params:
- JENKINS_BUILD_ID: "invalid_jenkins_build_id"
-run:
- steps:
- - apt install -y python3-pandas python3-numpy
- - git clone https://github.com/lttng/lttng-ci ci
- - export TMPDIR="/tmp"
- - mkdir -p coredump
- - echo "$(pwd)/coredump/core.%e.%p.%h.%t" > /proc/sys/kernel/core_pattern
- - ulimit -c unlimited
- - source /root/lttngvenv/activate
- - export BENCHMARK_DIR=$(mktemp --directory)/bm
- - git clone https://github.com/frdeso/syscall-bench-it.git $BENCHMARK_DIR
- - pushd $BENCHMARK_DIR
- - lava-test-case build-benchmarks --shell "make"
- - lava-test-case run-benchmarks --shell "./run.sh failing-ioctl sys_ioctl"
- - popd
- - python3 ./ci/scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv
- - mv ./processed_results.csv ./processed_results_ioctl.csv
- - tar czf coredump.tar.gz coredump
- - ./ci/lava/upload_artifact.sh coredump.tar.gz "results/${JENKINS_BUILD_ID}/${TESTRUN_ID}-coredump.tar.gz"
- - ./ci/lava/upload_artifact.sh "${BENCHMARK_DIR}/results.csv" "results/${JENKINS_BUILD_ID}/results_ioctl.csv"
- - ./ci/lava/upload_artifact.sh processed_results_ioctl.csv "results/${JENKINS_BUILD_ID}/processed_results_ioctl.csv"
+++ /dev/null
-metadata:
- format: Lava-Test Test Definition 1.0
- name: benchmark-syscall-failing-open-efault
- description: "Perform syscall tracing benchmark of failing open-efault"
-params:
- JENKINS_BUILD_ID: "invalid_jenkins_build_id"
-run:
- steps:
- - apt install -y python3-pandas python3-numpy
- - git clone https://github.com/lttng/lttng-ci ci
- - export TMPDIR="/tmp"
- - mkdir -p coredump
- - echo "$(pwd)/coredump/core.%e.%p.%h.%t" > /proc/sys/kernel/core_pattern
- - ulimit -c unlimited
- - source /root/lttngvenv/activate
- - export BENCHMARK_DIR=$(mktemp --directory)/bm
- - git clone https://github.com/frdeso/syscall-bench-it.git $BENCHMARK_DIR
- - pushd $BENCHMARK_DIR
- - lava-test-case build-benchmarks --shell "make"
- - lava-test-case run-benchmarks --shell "./run.sh failing-open-efault sys_open"
- - popd
- - python3 ./ci/scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv
- - mv ./processed_results.csv ./processed_results_open_efault.csv
- - tar czf coredump.tar.gz coredump
- - ./ci/lava/upload_artifact.sh coredump.tar.gz "results/${JENKINS_BUILD_ID}/${TESTRUN_ID}-coredump.tar.gz"
- - ./ci/lava/upload_artifact.sh "${BENCHMARK_DIR}/results.csv" "results/${JENKINS_BUILD_ID}/results_open_efault.csv"
- - ./ci/lava/upload_artifact.sh processed_results_open_efault.csv "results/${JENKINS_BUILD_ID}/processed_results_open_efault.csv"
+++ /dev/null
-metadata:
- format: Lava-Test Test Definition 1.0
- name: benchmark-syscall-failing-open-enoent
- description: "Perform syscall tracing benchmark of failing open-enoent"
-params:
- JENKINS_BUILD_ID: "invalid_jenkins_build_id"
-run:
- steps:
- - apt install -y python3-pandas python3-numpy
- - git clone https://github.com/lttng/lttng-ci ci
- - export TMPDIR="/tmp"
- - mkdir -p coredump
- - echo "$(pwd)/coredump/core.%e.%p.%h.%t" > /proc/sys/kernel/core_pattern
- - ulimit -c unlimited
- - source /root/lttngvenv/activate
- - export BENCHMARK_DIR=$(mktemp --directory)/bm
- - git clone https://github.com/frdeso/syscall-bench-it.git $BENCHMARK_DIR
- - pushd $BENCHMARK_DIR
- - lava-test-case build-benchmarks --shell "make"
- - lava-test-case run-benchmarks --shell "./run.sh failing-open-enoent sys_open"
- - popd
- - python3 ./ci/scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv
- - mv ./processed_results.csv ./processed_results_open_enoent.csv
- - tar czf coredump.tar.gz coredump
- - ./ci/lava/upload_artifact.sh coredump.tar.gz "results/${JENKINS_BUILD_ID}/${TESTRUN_ID}-coredump.tar.gz"
- - ./ci/lava/upload_artifact.sh "${BENCHMARK_DIR}/results.csv" "results/${JENKINS_BUILD_ID}/results_open_enoent.csv"
- - ./ci/lava/upload_artifact.sh processed_results_open_enoent.csv "results/${JENKINS_BUILD_ID}/processed_results_open_enoent.csv"
+++ /dev/null
-metadata:
- format: Lava-Test Test Definition 1.0
- name: benchmark-lttng-test-filter
- description: "Perform syscall tracing benchmark of the lttng-test-filter"
-params:
- JENKINS_BUILD_ID: "invalid_jenkins_build_id"
-run:
- steps:
- - apt install -y python3-pandas python3-numpy curl
- - git clone https://github.com/lttng/lttng-ci ci
- - export TMPDIR="/tmp"
- - mkdir -p coredump
- - echo "$(pwd)/coredump/core.%e.%p.%h.%t" > /proc/sys/kernel/core_pattern
- - ulimit -c unlimited
- - source /root/lttngvenv/activate
- - export BENCHMARK_DIR=$(mktemp --directory)/bm
- - git clone https://github.com/frdeso/syscall-bench-it.git $BENCHMARK_DIR
- - pushd $BENCHMARK_DIR
- - modprobe lttng-test
- - lava-test-case build-benchmarks --shell "make"
- - lava-test-case run-benchmarks --shell "./run.sh lttng-test-filter lttng_test_filter_event"
- - popd
- - python3 ./ci/scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv
- - mv ./processed_results.csv ./processed_results_lttng_test_filter.csv
- - tar czf coredump.tar.gz coredump
- - ./ci/lava/upload_artifact.sh coredump.tar.gz "results/${JENKINS_BUILD_ID}/${TESTRUN_ID}-coredump.tar.gz"
- - ./ci/lava/upload_artifact.sh "${BENCHMARK_DIR}/results.csv" "results/${JENKINS_BUILD_ID}/results_lttng_test_filter.csv"
- - ./ci/lava/upload_artifact.sh processed_results_lttng_test_filter.csv "results/${JENKINS_BUILD_ID}/processed_results_lttng_test_filter.csv"
+++ /dev/null
-metadata:
- format: Lava-Test Test Definition 1.0
- name: benchmark-raw-syscall-getpid
- description: "Perform syscall tracing benchmark of the raw syscall getpid"
-params:
- JENKINS_BUILD_ID: "invalid_jenkins_build_id"
-run:
- steps:
- - apt install -y python3-pandas python3-numpy
- - git clone https://github.com/lttng/lttng-ci ci
- - export TMPDIR="/tmp"
- - mkdir -p coredump
- - echo "$(pwd)/coredump/core.%e.%p.%h.%t" > /proc/sys/kernel/core_pattern
- - ulimit -c unlimited
- - source /root/lttngvenv/activate
- - export BENCHMARK_DIR=$(mktemp --directory)/bm
- - git clone https://github.com/frdeso/syscall-bench-it.git $BENCHMARK_DIR
- - pushd $BENCHMARK_DIR
- - lava-test-case build-benchmarks --shell "make"
- - lava-test-case run-benchmarks --shell "./run.sh raw-syscall-getpid sys_getpid"
- - popd
- - python3 ./ci/scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv
- - mv ./processed_results.csv ./processed_results_raw_syscall_getpid.csv
- - tar czf coredump.tar.gz coredump
- - ./ci/lava/upload_artifact.sh coredump.tar.gz "results/${JENKINS_BUILD_ID}/${TESTRUN_ID}-coredump.tar.gz"
- - ./ci/lava/upload_artifact.sh "${BENCHMARK_DIR}/results.csv" "results/${JENKINS_BUILD_ID}/results_raw_syscall_getpid.csv"
- - ./ci/lava/upload_artifact.sh processed_results_raw_syscall_getpid.csv "results/${JENKINS_BUILD_ID}/processed_results_raw_syscall_getpid.csv"
+++ /dev/null
-metadata:
- format: Lava-Test Test Definition 1.0
- name: benchmark-syscall-success-dup-close
- description: "Perform syscall tracing benchmark of successful dup and close"
-params:
- JENKINS_BUILD_ID: "invalid_jenkins_build_id"
-run:
- steps:
- - apt install -y python3-pandas python3-numpy curl
- - git clone https://github.com/lttng/lttng-ci ci
- - export TMPDIR="/tmp"
- - mkdir -p coredump
- - echo "$(pwd)/coredump/core.%e.%p.%h.%t" > /proc/sys/kernel/core_pattern
- - ulimit -c unlimited
- - source /root/lttngvenv/activate
- - export BENCHMARK_DIR=$(mktemp --directory)/bm
- - git clone https://github.com/frdeso/syscall-bench-it.git $BENCHMARK_DIR
- - pushd $BENCHMARK_DIR
- - lava-test-case build-benchmarks --shell "make"
- - lava-test-case run-benchmarks --shell "./run.sh success-dup-close sys_close,sys_dup"
- - popd
- - python3 ./ci/scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv
- - mv ./processed_results.csv ./processed_results_dup_close.csv
- - tar czf coredump.tar.gz coredump
- - ./ci/lava/upload_artifact.sh coredump.tar.gz "results/${JENKINS_BUILD_ID}/${TESTRUN_ID}-coredump.tar.gz"
- - ./ci/lava/upload_artifact.sh "${BENCHMARK_DIR}/results.csv" "results/${JENKINS_BUILD_ID}/results_dup_close.csv"
- - ./ci/lava/upload_artifact.sh processed_results_dup_close.csv "results/${JENKINS_BUILD_ID}/processed_results_dup_close.csv"
+++ /dev/null
-# Copyright (C) 2017 - Francis Deslauriers <francis.deslauriers@efficios.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-import os, sys
-import numpy as np
-import pandas as pd
-
-#Set Matplotlib to use the PNG non interactive backend
-import matplotlib as mpl
-mpl.use('Agg')
-
-import matplotlib.pyplot as plt
-from matplotlib.ticker import MaxNLocator
-from cycler import cycler
-from collections import OrderedDict
-
-def rename_cols(df):
- new_cols = {'baseline_1thr_peritermean': 'basel_1thr',
- 'baseline_2thr_peritermean': 'basel_2thr',
- 'baseline_4thr_peritermean': 'basel_4thr',
- 'baseline_8thr_peritermean': 'basel_8thr',
- 'baseline_16thr_peritermean': 'basel_16thr',
- 'lttng_1thr_peritermean': 'lttng_1thr',
- 'lttng_2thr_peritermean': 'lttng_2thr',
- 'lttng_4thr_peritermean': 'lttng_4thr',
- 'lttng_8thr_peritermean': 'lttng_8thr',
- 'lttng_16thr_peritermean': 'lttng_16thr',
- 'baseline_1thr_periterstdev': 'basel_1thr_stdev',
- 'baseline_2thr_periterstdev': 'basel_2thr_stdev',
- 'baseline_4thr_periterstdev': 'basel_4thr_stdev',
- 'baseline_8thr_periterstdev': 'basel_8thr_stdev',
- 'baseline_16thr_periterstdev': 'basel_16thr_stdev',
- 'lttng_1thr_periterstdev': 'lttng_1thr_stdev',
- 'lttng_2thr_periterstdev': 'lttng_2thr_stdev',
- 'lttng_4thr_periterstdev': 'lttng_4thr_stdev',
- 'lttng_8thr_periterstdev': 'lttng_8thr_stdev',
- 'lttng_16thr_periterstdev': 'lttng_16thr_stdev'
- }
- df.rename(columns=new_cols, inplace=True)
- return df
-
-def convert_us_to_ns(df):
- cols = [col for col in df.columns if 'periter' in col]
- df[cols] = df[cols].apply(lambda x: x*1000)
- return df
-
-def create_plot(df, graph_type):
- # We map all test configurations and their
- # respective color
- conf_to_color = OrderedDict([
- ('basel_1thr','lightcoral'),
- ('lttng_1thr','red'),
- ('basel_2thr','gray'),
- ('lttng_2thr','black'),
- ('basel_4thr','chartreuse'),
- ('lttng_4thr','forestgreen'),
- ('basel_8thr','deepskyblue'),
- ('lttng_8thr','mediumblue'),
- ('basel_16thr','orange'),
- ('lttng_16thr','saddlebrown')])
-
- # We create a list for each of the subplots
- baseline = [x for x in conf_to_color.keys() if 'basel' in x]
- lttng = [x for x in conf_to_color.keys() if 'lttng' in x]
- one_thr = [x for x in conf_to_color.keys() if '_1thr' in x]
- two_thr = [x for x in conf_to_color.keys() if '_2thr' in x]
- four_thr = [x for x in conf_to_color.keys() if '_4thr' in x]
- eight_thr = [x for x in conf_to_color.keys() if '_8thr' in x]
- sixteen_thr = [x for x in conf_to_color.keys() if '_16thr' in x]
-
- plots = [baseline, lttng, one_thr, two_thr, four_thr, eight_thr, sixteen_thr]
-
- title='Meantime per event for {} testcase'.format(graph_type)
-
- # Create a axe object for each sub-plots
- f, arrax = plt.subplots(len(plots), sharex=True, figsize=(16, 25))
- f.suptitle(title, fontsize=20)
-
- for (ax, data_cols) in zip(arrax, plots):
- curr_df = df[data_cols]
-
- stdev_cols = ['{}_stdev'.format(x) for x in data_cols]
- # Extract the color for each configuration
- colors = [conf_to_color[x] for x in data_cols]
-
- # set the color cycler for this plot
- ax.set_prop_cycle(cycler('color', colors))
-
- # Plot each line and its errorbars
- for (data, stdev) in zip(data_cols, stdev_cols):
- ax.errorbar(x=df.index.values, y=df[data].values, yerr=df[stdev].values, marker='o')
-
- ax.set_ylim(0)
- ax.grid()
- ax.set_xlabel('Jenkins Build ID')
- ax.set_ylabel('Meantime per event [us]')
-
- ax.xaxis.set_major_locator(MaxNLocator(integer=True, nbins=30))
-
- ax.legend(prop={'family': 'monospace'},
- labels=curr_df.columns.values, bbox_to_anchor=(1.2,1))
-
- plt.subplots_adjust(top=0.95)
- plt.savefig('{}.png'.format(graph_type), bbox_inches='tight')
-
-# Writes a file that contains commit id of all configurations shown in the
-# plots
-def create_metadata_file(res_dir):
- list_ = []
- for dirname, dirnames, res_files in os.walk('./'+res_dir):
- if len(dirnames) > 0:
- continue
- try:
- metadata = pd.read_csv(os.path.join(dirname, 'metadata.csv'))
- except Exception:
- print('Omitting run {} because metadata.csv is missing'.format(dirname))
- continue
- list_.append(metadata)
-
- df = pd.concat(list_, sort=True)
- df.index=df.build_id
- df.sort_index(inplace=True)
- df.to_csv('metadata.csv', index=False)
-
-#Iterates over a result directory and creates the plots for the different
-#testcases
-def create_plots(res_dir):
- df = pd.DataFrame()
- metadata_df = pd.DataFrame()
- list_ = []
- for dirname, dirnames, res_files in os.walk('./'+res_dir):
- if len(dirnames) > 0:
- continue
- try:
- metadata = pd.read_csv(os.path.join(dirname, 'metadata.csv'))
- except Exception:
- print('Omitting run {} because metadata.csv is missing'.format(dirname))
- continue
-
- for res in res_files:
- if res in 'metadata.csv':
- continue
- tmp = pd.read_csv(os.path.join(dirname, res))
- #Use the build id as the index for the dataframe for filtering
- tmp.index = metadata.build_id
- #Add the testcase name to the row for later filtering
- tmp['testcase'] = res.split('.')[0]
- list_.append(tmp)
-
- df = pd.concat(list_, sort=True)
- df = convert_us_to_ns(df)
- df = rename_cols(df)
- df.sort_index(inplace=True)
-
- #Go over the entire dataframe by testcase and create a plot for each type
- for testcase in df.testcase.unique():
- df_testcase = df.loc[df['testcase'] == testcase]
- create_plot(df=df_testcase, graph_type=testcase)
-
-def main():
- res_path = sys.argv[1]
- create_plots(os.path.join(res_path))
- create_metadata_file(os.path.join(res_path))
-
-if __name__ == '__main__':
- main()
class TestType:
""" Enum like for test type """
- baremetal_benchmarks = 1
- baremetal_tests = 2
- kvm_tests = 3
+ baremetal_tests = 1
+ kvm_tests = 2
values = {
- 'baremetal-benchmarks': baremetal_benchmarks,
'baremetal-tests': baremetal_tests,
'kvm-tests': kvm_tests,
}
return (passed_tests, failed_tests)
-def fetch_benchmark_results(build_id):
- """
- Get the benchmark results from the objstore
- save them as CSV files localy
- """
- testcases = [
- 'processed_results_close.csv',
- 'processed_results_ioctl.csv',
- 'processed_results_open_efault.csv',
- 'processed_results_open_enoent.csv',
- 'processed_results_dup_close.csv',
- 'processed_results_raw_syscall_getpid.csv',
- 'processed_results_lttng_test_filter.csv',
- ]
- for testcase in testcases:
- url = urljoin(OBJSTORE_URL, "{:s}/{:s}".format(build_id, testcase))
- print('Fetching {}'.format(url))
- urlretrieve(url, testcase)
-
-
def print_test_output(server, job):
"""
Parse the attachment of the testcase to fetch the stdout of the test suite
test_type = TestType.values[args.type]
- if test_type in [TestType.baremetal_benchmarks, TestType.baremetal_tests]:
+ if test_type is TestType.baremetal_tests:
device_type = DeviceType.x86
else:
device_type = DeviceType.kvm
if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
print_test_output(server, jobid)
- elif test_type is TestType.baremetal_benchmarks:
- fetch_benchmark_results(args.build_id)
passed, failed = check_job_all_test_cases_state_count(server, jobid)
print('With {} passed and {} failed Lava test cases.'.format(passed, failed))
+++ /dev/null
-#!/bin/bash -xeu
-# Copyright (C) 2016 - Francis Deslauriers <francis.deslauriers@efficios.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-echo 'At this point, we built the modules and kernel if we needed to.'
-echo 'We can now launch the lava job using those artefacts'
-
-venv=$(mktemp -d)
-virtualenv -p python3 "$venv"
-set +eu
-source "${venv}/bin/activate"
-set -eu
-pip install pyyaml Jinja2
-
-python -u "$LTTNG_CI_PATH"/scripts/system-tests/lava2-submit.py \
- -t baremetal-benchmarks \
- -lv "$LTTNG_VERSION" \
- -j "$JOB_NAME" \
- -k "$S3_URL_KERNEL_IMAGE" \
- -lm "$S3_URL_LTTNG_MODULES" \
- -tu "$LTTNG_TOOLS_REPO" \
- -tc "$LTTNG_TOOLS_COMMIT_ID" \
- -id "$BUILD_TAG"
-set +eu
-deactivate
-set -eu
-rm -rf "$venv"
-
-# Create a results folder for this job
-RESULT_STORAGE_FOLDER="$BASE_STORAGE_FOLDER/benchmark-results/$JOB_NAME/$BUILD_NUMBER"
-$SSH_COMMAND "$STORAGE_USER@$STORAGE_HOST" mkdir -p "$RESULT_STORAGE_FOLDER"
-
-# Create a metadata file for this job containing the build_id, timestamp and the commit ids
-TIMESTAMP=$(/bin/date --iso-8601=seconds)
-LTTNG_CI_COMMIT_ID="$(git --git-dir="$LTTNG_CI_PATH"/.git/ --work-tree="$LTTNG_CI_PATH" rev-parse --short HEAD)"
-
-echo "build_id,timestamp,kernel_commit,modules_commit,tools_commit,ci_commit" > metadata.csv
-echo "$BUILD_NUMBER,$TIMESTAMP,$KERNEL_COMMIT_ID,$LTTNG_MODULES_COMMIT_ID,$LTTNG_TOOLS_COMMIT_ID,$LTTNG_CI_COMMIT_ID" >> metadata.csv
-
-# Copy the result files for each benchmark and metadata on storage server
-$SCP_COMMAND ./processed_results_close.csv "$STORAGE_USER@$STORAGE_HOST:$RESULT_STORAGE_FOLDER/close.csv"
-$SCP_COMMAND ./processed_results_ioctl.csv "$STORAGE_USER@$STORAGE_HOST:$RESULT_STORAGE_FOLDER/ioctl.csv"
-$SCP_COMMAND ./processed_results_open_efault.csv "$STORAGE_USER@$STORAGE_HOST:$RESULT_STORAGE_FOLDER/open-efault.csv"
-$SCP_COMMAND ./processed_results_open_enoent.csv "$STORAGE_USER@$STORAGE_HOST:$RESULT_STORAGE_FOLDER/open-enoent.csv"
-$SCP_COMMAND ./processed_results_dup_close.csv "$STORAGE_USER@$STORAGE_HOST:$RESULT_STORAGE_FOLDER/dup-close.csv"
-$SCP_COMMAND ./processed_results_lttng_test_filter.csv "$STORAGE_USER@$STORAGE_HOST:$RESULT_STORAGE_FOLDER/lttng-test-filter.csv"
-$SCP_COMMAND ./processed_results_raw_syscall_getpid.csv "$STORAGE_USER@$STORAGE_HOST:$RESULT_STORAGE_FOLDER/raw_syscall_getpid.csv"
-$SCP_COMMAND ./metadata.csv "$STORAGE_USER@$STORAGE_HOST:$RESULT_STORAGE_FOLDER/metadata.csv"
+++ /dev/null
-#!/bin/bash -xeu
-# Copyright (C) 2017 - Francis Deslauriers <francis.deslauriers@efficios.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-$SCP_COMMAND -r "$STORAGE_USER@$STORAGE_HOST:$BASE_STORAGE_FOLDER/benchmark-results/$JOB_NAME/" ./plot-data/
-
-PYTHON3="python3"
-
-PYENV_HOME=$WORKSPACE/.pyenv/
-
-# Delete previously built virtualenv
-if [ -d "$PYENV_HOME" ]; then
- rm -rf "$PYENV_HOME"
-fi
-
-# Create virtualenv and install necessary packages
-virtualenv -p $PYTHON3 "$PYENV_HOME"
-
-set +ux
-. "$PYENV_HOME/bin/activate"
-set -ux
-
-pip install pandas
-pip install matplotlib
-
-python3 "$LTTNG_CI_PATH"/scripts/system-tests/generate-plots.py ./plot-data/
}
} else if (triggerJobName.contains("baremetal_tests")) {
jobType = 'baremetal_tests';
-} else if (triggerJobName.contains("baremetal_benchmarks")) {
- jobType = 'baremetal_benchmarks';
}
// Hashmap containing all the jobs, their configuration (commit id, etc. )and
from: inline
name: x86-env-setup-inline
path: inline/x86-env-setup.yaml
- {% if test_type == TestType.baremetal_benchmarks %}
- - repository: https://github.com/lttng/lttng-ci.git
- from: git
- path: lava/system-tests/failing-close.yml
- name: failing-close
- params:
- JENKINS_BUILD_ID: {{ jenkins_build_id }}
- - repository: https://github.com/lttng/lttng-ci.git
- from: git
- path: lava/system-tests/failing-ioctl.yml
- name: failing-ioctl
- params:
- JENKINS_BUILD_ID: {{ jenkins_build_id }}
- - repository: https://github.com/lttng/lttng-ci.git
- from: git
- path: lava/system-tests/failing-open-efault.yml
- name: failing-open-efault
- params:
- JENKINS_BUILD_ID: {{ jenkins_build_id }}
- - repository: https://github.com/lttng/lttng-ci.git
- from: git
- path: lava/system-tests/success-dup-close.yml
- name: success-dup-close
- params:
- JENKINS_BUILD_ID: {{ jenkins_build_id }}
- - repository: https://github.com/lttng/lttng-ci.git
- from: git
- path: lava/system-tests/raw-syscall-getpid.yml
- name: raw-syscall-getpid
- params:
- JENKINS_BUILD_ID: {{ jenkins_build_id }}
- - repository: https://github.com/lttng/lttng-ci.git
- from: git
- path: lava/system-tests/failing-open-enoent.yml
- name: failing-open-enoent
- params:
- JENKINS_BUILD_ID: {{ jenkins_build_id }}
- - repository: https://github.com/lttng/lttng-ci.git
- from: git
- path: lava/system-tests/lttng-test-filter.yml
- name: lttng-test-filter
- params:
- JENKINS_BUILD_ID: {{ jenkins_build_id }}
- {% elif test_type == TestType.baremetal_tests %}
+ {% if test_type == TestType.baremetal_tests %}
- repository: https://github.com/lttng/lttng-ci.git
from: git
path: lava/system-tests/perf-tests.yml