name: lttng-destructive-tests
description: "Run root destructive test suite"
install:
+ deps:
+ - curl
+ git-repos:
+ - url: https://github.com/lttng/lttng-ci
+ destination: ci
+ branch: master
steps:
- export TMPDIR="/tmp"
- - cd
- systemctl stop systemd-timesyncd.service
- ulimit -c unlimited
- mkdir -p coredump
- echo "$(pwd)/coredump/core.%e.%p.%h.%t" > /proc/sys/kernel/core_pattern
+params:
+ JENKINS_BUILD_ID: "invalid_jenkins_build_id"
run:
steps:
- source /root/lttngvenv/activate
- - cd /root/lttngvenv/src/lttng-tools
+ - pushd /root/lttngvenv/src/lttng-tools
- lava-test-case build-test-suite --shell "make"
- export LTTNG_ENABLE_DESTRUCTIVE_TESTS="will-break-my-system"
- cd tests
#Need to check if the file is present for branches where the testcase was not backported
- lava-test-case run-tests --shell "if [ -e root_destructive_tests ]; then prove --verbose --merge --exec '' - < root_destructive_tests; else echo 'root_destructive_tests not found'; fi"
- - cd
+ - popd
- tar czf coredump.tar.gz coredump
- - lava-test-case-attach run-tests coredump.tar.gz
+ - ./ci/lava/upload_artifact.sh coredump.tar.gz "results/${JENKINS_BUILD_ID}/coredump.tar.gz"
name: benchmark-syscall-failing-close
description: "Perform syscall tracing benchmark of failing close"
params:
- JENKINS_JOBNAME: "default jobname"
+ JENKINS_BUILD_ID: "invalid_jenkins_build_id"
install:
deps:
- python3-pandas
- python3-numpy
+ - curl
git-repos:
- url: https://github.com/lttng/lttng-ci
destination: ci
- source /root/lttngvenv/activate
- export BENCHMARK_DIR=$(mktemp --directory)/bm
- git clone https://github.com/frdeso/syscall-bench-it.git $BENCHMARK_DIR
- - cd $BENCHMARK_DIR
+ - pushd $BENCHMARK_DIR
- lava-test-case build-benchmarks --shell "make"
- lava-test-case run-benchmarks --shell "./run.sh failing-close sys_close"
- - lava-test-case-attach run-benchmarks "./results.csv"
- - cd -
- - cd ci
- - python3 ./scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv
- - mv ./processed_results.csv ../processed_results_close.csv
- - cd -
+ - popd
+ - python3 ./ci/scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv
+ - mv ./processed_results.csv ./processed_results_close.csv
- tar czf coredump.tar.gz coredump
- - lava-test-case-attach run-benchmarks coredump.tar.gz
- - lava-test-case-attach run-benchmarks "./processed_results_close.csv"
+ - ./ci/lava/upload_artifact.sh coredump.tar.gz "results/${JENKINS_BUILD_ID}/${TESTRUN_ID}-coredump.tar.gz"
+ - ./ci/lava/upload_artifact.sh "${BENCHMARK_DIR}/results.csv" "results/${JENKINS_BUILD_ID}/results_close.csv"
+ - ./ci/lava/upload_artifact.sh processed_results_close.csv "results/${JENKINS_BUILD_ID}/processed_results_close.csv"
name: benchmark-syscall-failing-ioctl
description: "Perform syscall tracing benchmark of failing ioctl"
params:
- JENKINS_JOBNAME: "default jobname"
+ JENKINS_BUILD_ID: "invalid_jenkins_build_id"
install:
deps:
- python3-pandas
- python3-numpy
+ - curl
git-repos:
- url: https://github.com/lttng/lttng-ci
destination: ci
- source /root/lttngvenv/activate
- export BENCHMARK_DIR=$(mktemp --directory)/bm
- git clone https://github.com/frdeso/syscall-bench-it.git $BENCHMARK_DIR
- - cd $BENCHMARK_DIR
+ - pushd $BENCHMARK_DIR
- lava-test-case build-benchmarks --shell "make"
- lava-test-case run-benchmarks --shell "./run.sh failing-ioctl sys_ioctl"
- - lava-test-case-attach run-benchmarks "./results.csv"
- - cd -
- - cd ci
- - python3 ./scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv
- - mv ./processed_results.csv ../processed_results_ioctl.csv
- - cd -
+ - popd
+ - python3 ./ci/scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv
+ - mv ./processed_results.csv ./processed_results_ioctl.csv
- tar czf coredump.tar.gz coredump
- - lava-test-case-attach run-benchmarks coredump.tar.gz
- - lava-test-case-attach run-benchmarks "./processed_results_ioctl.csv"
+ - ./ci/lava/upload_artifact.sh coredump.tar.gz "results/${JENKINS_BUILD_ID}/${TESTRUN_ID}-coredump.tar.gz"
+ - ./ci/lava/upload_artifact.sh "${BENCHMARK_DIR}/results.csv" "results/${JENKINS_BUILD_ID}/results_ioctl.csv"
+ - ./ci/lava/upload_artifact.sh processed_results_ioctl.csv "results/${JENKINS_BUILD_ID}/processed_results_ioctl.csv"
name: benchmark-syscall-failing-open-efault
description: "Perform syscall tracing benchmark of failing open-efault"
params:
- JENKINS_JOBNAME: "default jobname"
+ JENKINS_BUILD_ID: "invalid_jenkins_build_id"
install:
deps:
- python3-pandas
- python3-numpy
+ - curl
git-repos:
- url: https://github.com/lttng/lttng-ci
destination: ci
- source /root/lttngvenv/activate
- export BENCHMARK_DIR=$(mktemp --directory)/bm
- git clone https://github.com/frdeso/syscall-bench-it.git $BENCHMARK_DIR
- - cd $BENCHMARK_DIR
+ - pushd $BENCHMARK_DIR
- lava-test-case build-benchmarks --shell "make"
- lava-test-case run-benchmarks --shell "./run.sh failing-open-efault sys_open"
- - lava-test-case-attach run-benchmarks "./results.csv"
- - cd -
- - cd ci
- - python3 ./scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv
- - mv ./processed_results.csv ../processed_results_open_efault.csv
- - cd -
+ - popd
+ - python3 ./ci/scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv
+ - mv ./processed_results.csv ./processed_results_open_efault.csv
- tar czf coredump.tar.gz coredump
- - lava-test-case-attach run-benchmarks coredump.tar.gz
- - lava-test-case-attach run-benchmarks "./processed_results_open_efault.csv"
+ - ./ci/lava/upload_artifact.sh coredump.tar.gz "results/${JENKINS_BUILD_ID}/${TESTRUN_ID}-coredump.tar.gz"
+ - ./ci/lava/upload_artifact.sh "${BENCHMARK_DIR}/results.csv" "results/${JENKINS_BUILD_ID}/results_open_efault.csv"
+ - ./ci/lava/upload_artifact.sh processed_results_open_efault.csv "results/${JENKINS_BUILD_ID}/processed_results_open_efault.csv"
name: benchmark-syscall-failing-open-enoent
description: "Perform syscall tracing benchmark of failing open-enoent"
params:
- JENKINS_JOBNAME: "default jobname"
+ JENKINS_BUILD_ID: "invalid_jenkins_build_id"
install:
deps:
- python3-pandas
- python3-numpy
+ - curl
git-repos:
- url: https://github.com/lttng/lttng-ci
destination: ci
- source /root/lttngvenv/activate
- export BENCHMARK_DIR=$(mktemp --directory)/bm
- git clone https://github.com/frdeso/syscall-bench-it.git $BENCHMARK_DIR
- - cd $BENCHMARK_DIR
+ - pushd $BENCHMARK_DIR
- lava-test-case build-benchmarks --shell "make"
- lava-test-case run-benchmarks --shell "./run.sh failing-open-enoent sys_open"
- - lava-test-case-attach run-benchmarks "./results.csv"
- - cd -
- - cd ci
- - python3 ./scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv
- - mv ./processed_results.csv ../processed_results_open_enoent.csv
- - cd -
+ - popd
+ - python3 ./ci/scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv
+ - mv ./processed_results.csv ./processed_results_open_enoent.csv
- tar czf coredump.tar.gz coredump
- - lava-test-case-attach run-benchmarks coredump.tar.gz
- - lava-test-case-attach run-benchmarks "./processed_results_open_enoent.csv"
+ - ./ci/lava/upload_artifact.sh coredump.tar.gz "results/${JENKINS_BUILD_ID}/${TESTRUN_ID}-coredump.tar.gz"
+ - ./ci/lava/upload_artifact.sh "${BENCHMARK_DIR}/results.csv" "results/${JENKINS_BUILD_ID}/results_open_enoent.csv"
+ - ./ci/lava/upload_artifact.sh processed_results_open_enoent.csv "results/${JENKINS_BUILD_ID}/processed_results_open_enoent.csv"
format: Lava-Test Test Definition 1.0
name: lttng-kernel-test
description: "Run kernel test suite"
+params:
+ JENKINS_BUILD_ID: "invalid_jenkins_build_id"
+
install:
+ deps:
+ - curl
+ git-repos:
+ - url: https://github.com/lttng/lttng-ci
+ destination: ci
+ branch: master
steps:
- export TMPDIR="/tmp"
- - cd
- ulimit -c unlimited
- mkdir -p coredump
- echo "$(pwd)/coredump/core.%e.%p.%h.%t" > /proc/sys/kernel/core_pattern
run:
steps:
- source /root/lttngvenv/activate
- - cd /root/lttngvenv/src/lttng-tools
+ - pushd /root/lttngvenv/src/lttng-tools
- lava-test-case build-test-suite --shell "make"
- cd tests
- lava-test-case run-tests --shell "prove --verbose --merge --exec '' - < root_regression"
- - cd
+ - popd
- tar czf coredump.tar.gz coredump
- - lava-test-case-attach run-tests coredump.tar.gz
+ - ./ci/lava/upload_artifact.sh coredump.tar.gz coredump.tar.gz "results/${JENKINS_BUILD_ID}/${TESTRUN_ID}-coredump.tar.gz"
name: benchmark-lttng-test-filter
description: "Perform syscall tracing benchmark of the lttng-test-filter"
params:
- JENKINS_JOBNAME: "default jobname"
+ JENKINS_BUILD_ID: "invalid_jenkins_build_id"
install:
deps:
- python3-pandas
- python3-numpy
+ - curl
git-repos:
- url: https://github.com/lttng/lttng-ci
destination: ci
- source /root/lttngvenv/activate
- export BENCHMARK_DIR=$(mktemp --directory)/bm
- git clone https://github.com/frdeso/syscall-bench-it.git $BENCHMARK_DIR
- - cd $BENCHMARK_DIR
+ - pushd $BENCHMARK_DIR
- modprobe lttng-test
- lava-test-case build-benchmarks --shell "make"
- lava-test-case run-benchmarks --shell "./run.sh lttng-test-filter lttng_test_filter_event"
- - lava-test-case-attach run-benchmarks "./results.csv"
- - cd -
- - cd ci
- - python3 ./scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv
- - mv ./processed_results.csv ../processed_results_lttng_test_filter.csv
- - cd -
+ - popd
+ - python3 ./ci/scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv
+ - mv ./processed_results.csv ./processed_results_lttng_test_filter.csv
- tar czf coredump.tar.gz coredump
- - lava-test-case-attach run-benchmarks coredump.tar.gz
- - lava-test-case-attach run-benchmarks "./processed_results_lttng_test_filter.csv"
+ - ./ci/lava/upload_artifact.sh coredump.tar.gz "results/${JENKINS_BUILD_ID}/${TESTRUN_ID}-coredump.tar.gz"
+ - ./ci/lava/upload_artifact.sh "${BENCHMARK_DIR}/results.csv" "results/${JENKINS_BUILD_ID}/results_lttng_test_filter.csv"
+ - ./ci/lava/upload_artifact.sh processed_results_lttng_test_filter.csv "results/${JENKINS_BUILD_ID}/processed_results_lttng_test_filter.csv"
format: Lava-Test Test Definition 1.0
name: lttng-perf-tests
description: "Run perf regression test suite"
+params:
+ JENKINS_BUILD_ID: "invalid_jenkins_build_id"
install:
deps:
- libpfm4-dev
+ - curl
+ git-repos:
+ - url: https://github.com/lttng/lttng-ci
+ destination: ci
+ branch: master
steps:
- export TMPDIR="/tmp"
- - cd
- ulimit -c unlimited
- mkdir -p coredump
- echo "$(pwd)/coredump/core.%e.%p.%h.%t" > /proc/sys/kernel/core_pattern
run:
steps:
- source /root/lttngvenv/activate
- - cd /root/lttngvenv/src/lttng-tools
+ - pushd /root/lttngvenv/src/lttng-tools
- lava-test-case build-test-suite --shell "make"
- cd tests
#Need to check if the file is present for branches where the testcase was not backported
- lava-test-case run-tests --shell "if [ -e perf_regression ]; then prove --verbose --merge --exec '' - < perf_regression; else echo 'perf_regression not found'; fi"
- - cd
+ - popd
- tar czf coredump.tar.gz coredump
- - lava-test-case-attach run-tests coredump.tar.gz
+ - ./ci/lava/upload_artifact.sh coredump.tar.gz coredump.tar.gz "results/${JENKINS_BUILD_ID}/${TESTRUN_ID}-coredump.tar.gz"
name: benchmark-raw-syscall-getpid
description: "Perform syscall tracing benchmark of the raw syscall getpid"
params:
- JENKINS_JOBNAME: "default jobname"
+ JENKINS_BUILD_ID: "invalid_jenkins_build_id"
install:
deps:
- source /root/lttngvenv/activate
- export BENCHMARK_DIR=$(mktemp --directory)/bm
- git clone https://github.com/frdeso/syscall-bench-it.git $BENCHMARK_DIR
- - cd $BENCHMARK_DIR
+ - pushd $BENCHMARK_DIR
- lava-test-case build-benchmarks --shell "make"
- lava-test-case run-benchmarks --shell "./run.sh raw-syscall-getpid sys_getpid"
- lava-test-case-attach run-benchmarks "./results.csv"
- - cd -
- - cd ci
- - python3 ./scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv
- - mv ./processed_results.csv ../processed_results_raw_syscall_getpid.csv
- - cd -
+ - popd
+ - python3 ./ci/scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv
+ - mv ./processed_results.csv ./processed_results_raw_syscall_getpid.csv
- tar czf coredump.tar.gz coredump
- - lava-test-case-attach run-benchmarks coredump.tar.gz
- - lava-test-case-attach run-benchmarks "./processed_results_raw_syscall_getpid.csv"
+ - ./ci/lava/upload_artifact.sh coredump.tar.gz "results/${JENKINS_BUILD_ID}/${TESTRUN_ID}-coredump.tar.gz"
+ - ./ci/lava/upload_artifact.sh "${BENCHMARK_DIR}/results.csv" "results/${JENKINS_BUILD_ID}/results_raw_syscall_getpid.csv"
+ - ./ci/lava/upload_artifact.sh processed_results_raw_syscall_getpid.csv "results/${JENKINS_BUILD_ID}/processed_results_raw_syscall_getpid.csv"
name: benchmark-syscall-success-dup-close
description: "Perform syscall tracing benchmark of successful dup and close"
params:
- JENKINS_JOBNAME: "default jobname"
+ JENKINS_BUILD_ID: "invalid_jenkins_build_id"
install:
deps:
- python3-pandas
- python3-numpy
+ - curl
git-repos:
- url: https://github.com/lttng/lttng-ci
destination: ci
- source /root/lttngvenv/activate
- export BENCHMARK_DIR=$(mktemp --directory)/bm
- git clone https://github.com/frdeso/syscall-bench-it.git $BENCHMARK_DIR
- - cd $BENCHMARK_DIR
+ - pushd $BENCHMARK_DIR
- lava-test-case build-benchmarks --shell "make"
- lava-test-case run-benchmarks --shell "./run.sh success-dup-close sys_close,sys_dup"
- lava-test-case-attach run-benchmarks "./results.csv"
- - cd -
- - cd ci
- - python3 ./scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv
- - mv ./processed_results.csv ../processed_results_dup_close.csv
- - cd -
+ - popd
+ - python3 ./ci/scripts/system-tests/parse-results.py $BENCHMARK_DIR/results.csv
+ - mv ./processed_results.csv ./processed_results_dup_close.csv
- tar czf coredump.tar.gz coredump
- - lava-test-case-attach run-benchmarks coredump.tar.gz
- - lava-test-case-attach run-benchmarks "./processed_results_dup_close.csv"
+ - ./ci/lava/upload_artifact.sh coredump.tar.gz "results/${JENKINS_BUILD_ID}/${TESTRUN_ID}-coredump.tar.gz"
+ - ./ci/lava/upload_artifact.sh "${BENCHMARK_DIR}/results.csv" "results/${JENKINS_BUILD_ID}/results_dup_close.csv"
+ - ./ci/lava/upload_artifact.sh processed_results_dup_close.csv "results/${JENKINS_BUILD_ID}/processed_results_dup_close.csv"
--- /dev/null
+#!/bin/bash -eux
+# Copyright (C) 2018 - Jonathan Rajotte-Julien <jonthan.rajotte-julien@efficios.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+bucket=lava
+file=$1
+#Path must include the file name
+path=$2
+
+host=obj.internal.efficios.com
+s3_k='jenkins'
+s3_s='echo123456'
+
+resource="/${bucket}/${path}"
+content_type="application/octet-stream"
+date=`date -R`
+_signature="PUT\n\n${content_type}\n${date}\n${resource}"
+signature=`echo -en ${_signature} | openssl sha1 -hmac ${s3_s} -binary | base64`
+
+curl -v -X PUT -T "${file}" \
+ -H "Host: $host" \
+ -H "Date: ${date}" \
+ -H "Content-Type: ${content_type}" \
+ -H "Authorization: AWS ${s3_k}:${signature}" \
+ https://$host${resource}
import sys
import time
import xmlrpc.client
+from urllib.parse import urljoin
+from urllib.request import urlretrieve
from collections import OrderedDict
from enum import Enum
USERNAME = 'frdeso'
HOSTNAME = 'lava-master.internal.efficios.com'
SCP_PATH = 'scp://jenkins-lava@storage.internal.efficios.com'
+OBJSTORE_URL = "https://obj.internal.efficios.com/lava/results/"
class TestType(Enum):
baremetal_benchmarks=1
# Get the benchmark results from the lava bundle
# save them as CSV files localy
-def fetch_benchmark_results(server, job):
- content = get_job_bundle_content(server, job)
+def fetch_benchmark_results(build_id):
testcases = ['processed_results_close.csv',
'processed_results_ioctl.csv',
'processed_results_open_efault.csv',
'processed_results_dup_close.csv',
'processed_results_raw_syscall_getpid.csv',
'processed_results_lttng_test_filter.csv']
-
- # The result bundle is a large JSON containing the results of every testcase
- # of the LAVA job as well as the files that were attached during the run.
- # We need to iterate over this JSON to get the base64 representation of the
- # benchmark results produced during the run.
- for run in content['test_runs']:
- # We only care of the benchmark testcases
- if 'benchmark-' in run['test_id']:
- if 'test_results' in run:
- for res in run['test_results']:
- if 'attachments' in res:
- for a in res['attachments']:
- # We only save the results file
- if a['pathname'] in testcases:
- with open(a['pathname'],'wb') as f:
- # Convert the b64 representation of the
- # result file and write it to a file
- # in the current working directory
- f.write(base64.b64decode(a['content']))
+ for testcase in testcases:
+ url = urljoin(OBJSTORE_URL, "{:s}/{:s}".format(build_id, testcase))
+ urlretrieve(url, case)
# Parse the attachment of the testcase to fetch the stdout of the test suite
def print_test_output(server, job):
])
return command
-def get_baremetal_benchmarks_cmd():
+def get_baremetal_benchmarks_cmd(build_id):
command = OrderedDict({
'command': 'lava_test_shell',
'parameters': {
{
'git-repo': 'https://github.com/lttng/lttng-ci.git',
'revision': 'master',
- 'testdef': 'lava/system-tests/failing-close.yml'
+ 'testdef': 'lava/system-tests/failing-close.yml',
+ 'parameters': { 'JENKINS_BUILD_ID': build_id }
},
{
'git-repo': 'https://github.com/lttng/lttng-ci.git',
'revision': 'master',
- 'testdef': 'lava/system-tests/failing-ioctl.yml'
+ 'testdef': 'lava/system-tests/failing-ioctl.yml',
+ 'parameters': { 'JENKINS_BUILD_ID': build_id }
},
{
'git-repo': 'https://github.com/lttng/lttng-ci.git',
'revision': 'master',
- 'testdef': 'lava/system-tests/failing-open-efault.yml'
+ 'testdef': 'lava/system-tests/failing-open-efault.yml',
+ 'parameters': { 'JENKINS_BUILD_ID': build_id }
},
{
'git-repo': 'https://github.com/lttng/lttng-ci.git',
'revision': 'master',
- 'testdef': 'lava/system-tests/success-dup-close.yml'
+ 'testdef': 'lava/system-tests/success-dup-close.yml',
+ 'parameters': { 'JENKINS_BUILD_ID': build_id }
},
{
'git-repo': 'https://github.com/lttng/lttng-ci.git',
'revision': 'master',
- 'testdef': 'lava/system-tests/raw-syscall-getpid.yml'
+ 'testdef': 'lava/system-tests/raw-syscall-getpid.yml',
+ 'parameters': { 'JENKINS_BUILD_ID': build_id }
},
{
'git-repo': 'https://github.com/lttng/lttng-ci.git',
'revision': 'master',
- 'testdef': 'lava/system-tests/failing-open-enoent.yml'
+ 'testdef': 'lava/system-tests/failing-open-enoent.yml',
+ 'parameters': { 'JENKINS_BUILD_ID': build_id }
},
{
'git-repo': 'https://github.com/lttng/lttng-ci.git',
'revision': 'master',
- 'testdef': 'lava/system-tests/lttng-test-filter.yml'
+ 'testdef': 'lava/system-tests/lttng-test-filter.yml',
+ 'parameters': { 'JENKINS_BUILD_ID': build_id }
}
],
'timeout': 7200
})
return command
-def get_baremetal_tests_cmd():
+def get_baremetal_tests_cmd(build_id):
command = OrderedDict({
'command': 'lava_test_shell',
'parameters': {
{
'git-repo': 'https://github.com/lttng/lttng-ci.git',
'revision': 'master',
- 'testdef': 'lava/system-tests/perf-tests.yml'
+ 'testdef': 'lava/system-tests/perf-tests.yml',
+ 'parameters': { 'JENKINS_BUILD_ID': build_id }
}
],
'timeout': 3600
})
return command
-def get_kvm_tests_cmd():
+def get_kvm_tests_cmd(build_id):
command = OrderedDict({
'command': 'lava_test_shell',
'parameters': {
{
'git-repo': 'https://github.com/lttng/lttng-ci.git',
'revision': 'master',
- 'testdef': 'lava/system-tests/kernel-tests.yml'
+ 'testdef': 'lava/system-tests/kernel-tests.yml',
+ 'parameters': { 'JENKINS_BUILD_ID': build_id }
},
{
'git-repo': 'https://github.com/lttng/lttng-ci.git',
'revision': 'master',
- 'testdef': 'lava/system-tests/destructive-tests.yml'
+ 'testdef': 'lava/system-tests/destructive-tests.yml',
+ 'parameters': { 'JENKINS_BUILD_ID': build_id }
}
],
'timeout': 7200
parser.add_argument('-k', '--kernel', required=True)
parser.add_argument('-lm', '--lmodule', required=True)
parser.add_argument('-tc', '--tools-commit', required=True)
+ parser.add_argument('-id', '--build-id', required=True)
parser.add_argument('-uc', '--ust-commit', required=False)
args = parser.parse_args()
if test_type is TestType.baremetal_benchmarks:
j['actions'].append(get_config_cmd('x86'))
j['actions'].append(get_env_setup_cmd('x86', args.tools_commit))
- j['actions'].append(get_baremetal_benchmarks_cmd())
+ j['actions'].append(get_baremetal_benchmarks_cmd(args.build_id))
j['actions'].append(get_results_cmd(stream_name='benchmark-kernel'))
elif test_type is TestType.baremetal_tests:
if args.ust_commit is None:
return -1
j['actions'].append(get_config_cmd('x86'))
j['actions'].append(get_env_setup_cmd('x86', args.tools_commit, args.ust_commit))
- j['actions'].append(get_baremetal_tests_cmd())
+ j['actions'].append(get_baremetal_tests_cmd(args.build_id))
j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
elif test_type is TestType.kvm_tests:
if args.ust_commit is None:
return -1
j['actions'].append(get_config_cmd('kvm'))
j['actions'].append(get_env_setup_cmd('kvm', args.tools_commit, args.ust_commit))
- j['actions'].append(get_kvm_tests_cmd())
+ j['actions'].append(get_kvm_tests_cmd(args.build_id))
j['actions'].append(get_results_cmd(stream_name='tests-kernel'))
elif test_type is TestType.kvm_fuzzing_tests:
if args.ust_commit is None:
if test_type is TestType.kvm_tests or test_type is TestType.baremetal_tests:
print_test_output(server, jobid)
elif test_type is TestType.baremetal_benchmarks:
- fetch_benchmark_results(server, jobid)
+ fetch_benchmark_results(args.build_id)
print('Job ended with {} status.'.format(jobstatus))
if jobstatus not in 'Complete':
parser.add_argument('-k', '--kernel', required=True)
parser.add_argument('-lm', '--lmodule', required=True)
parser.add_argument('-tc', '--tools-commit', required=True)
+ parser.add_argument('-id', '--build-id', required=True)
parser.add_argument('-uc', '--ust-commit', required=False)
parser.add_argument('-d', '--debug', required=False, action='store_true')
args = parser.parse_args()
context['kernel_url'] = args.kernel
context['nfsrootfs_url'] = nfsrootfs
context['lttng_modules_url'] = args.lmodule
+ context['jenkins_build_id'] = args.build_id
context['kprobe_round_nb'] = 10
-k "$S3_URL_KERNEL_IMAGE" \
-lm "$S3_URL_LTTNG_MODULES" \
-tc "$LTTNG_TOOLS_COMMIT_ID" \
+ -id "$BUILD_TAG" \
--debug
python -u "$LTTNG_CI_PATH"/scripts/system-tests/lava-submit.py \
-lm "$S3_URL_LTTNG_MODULES" \
-tc "$LTTNG_TOOLS_COMMIT_ID" \
-uc "$LTTNG_UST_COMMIT_ID" \
+ -id "$BUILD_TAG" \
--debug
python -u "$LTTNG_CI_PATH"/scripts/system-tests/lava-submit.py \
-lm "$S3_URL_LTTNG_MODULES" \
-tc "$LTTNG_TOOLS_COMMIT_ID" \
-uc "$LTTNG_UST_COMMIT_ID" \
+ -id "$BUILD_TAG" \
--debug
python -u "$LTTNG_CI_PATH"/scripts/system-tests/lava-submit.py \
-lm "$S3_URL_LTTNG_MODULES" \
-tc "$LTTNG_TOOLS_COMMIT_ID" \
-uc "$LTTNG_UST_COMMIT_ID" \
+ -id "$BUILD_TAG" \
--debug
python -u "$LTTNG_CI_PATH"/scripts/system-tests/lava-submit.py \
from: git
path: lava/system-tests/failing-close.yml
name: failing-close
+ params:
+ JENKINS_BUILD_ID: {{ jenkins_build_id }}
- repository: https://github.com/lttng/lttng-ci.git
from: git
path: lava/system-tests/failing-ioctl.yml
name: failing-ioctl
+ params:
+ JENKINS_BUILD_ID: {{ jenkins_build_id }}
- repository: https://github.com/lttng/lttng-ci.git
from: git
path: lava/system-tests/failing-open-efault.yml
name: failing-open-efault
+ params:
+ JENKINS_BUILD_ID: {{ jenkins_build_id }}
- repository: https://github.com/lttng/lttng-ci.git
from: git
path: lava/system-tests/success-dup-close.yml
name: success-dup-close
+ params:
+ JENKINS_BUILD_ID: {{ jenkins_build_id }}
- repository: https://github.com/lttng/lttng-ci.git
from: git
path: lava/system-tests/raw-syscall-getpid.yml
name: raw-syscall-getpid
+ params:
+ JENKINS_BUILD_ID: {{ jenkins_build_id }}
- repository: https://github.com/lttng/lttng-ci.git
from: git
path: lava/system-tests/failing-open-enoent.yml
name: failing-open-enoent
+ params:
+ JENKINS_BUILD_ID: {{ jenkins_build_id }}
- repository: https://github.com/lttng/lttng-ci.git
from: git
path: lava/system-tests/lttng-test-filter.yml
name: lttng-test-filter
+ params:
+ JENKINS_BUILD_ID: {{ jenkins_build_id }}
{% elif test_type == TestType.baremetal_tests %}
- repository: https://github.com/lttng/lttng-ci.git
from: git
path: lava/system-tests/perf-tests.yml
name: perf-tests
+ params:
+ JENKINS_BUILD_ID: {{ jenkins_build_id }}
{% elif test_type == TestType.kvm_tests %}
- repository: https://github.com/lttng/lttng-ci.git
from: git
path: lava/system-tests/kernel-tests.yml
name: kernel-tests
+ params:
+ JENKINS_BUILD_ID: {{ jenkins_build_id }}
- repository: https://github.com/lttng/lttng-ci.git
from: git
path: lava/system-tests/destructive-tests.yml
name: destructive-tests
+ params:
+ JENKINS_BUILD_ID: {{ jenkins_build_id }}
{% elif test_type == TestType.kvm_fuzzing_tests %}
- repository: https://github.com/lttng/lttng-ci.git
from: git